コード例 #1
0
  public static void validateKeyRange(KeyRange range) throws InvalidRequestException {
    if ((range.start_key == null) != (range.end_key == null)) {
      throw new InvalidRequestException(
          "start key and end key must either both be non-null, or both be null");
    }
    if ((range.start_token == null) != (range.end_token == null)) {
      throw new InvalidRequestException(
          "start token and end token must either both be non-null, or both be null");
    }
    if ((range.start_key == null) == (range.start_token == null)) {
      throw new InvalidRequestException(
          "exactly one of {start key, end key} or {start token, end token} must be specified");
    }

    if (range.start_key != null) {
      IPartitioner p = StorageService.getPartitioner();
      Token startToken = p.getToken(range.start_key);
      Token endToken = p.getToken(range.end_key);
      if (startToken.compareTo(endToken) > 0 && !endToken.equals(p.getMinimumToken())) {
        if (p instanceof RandomPartitioner)
          throw new InvalidRequestException(
              "start key's md5 sorts after end key's md5.  this is not allowed; you probably should not specify end key at all, under RandomPartitioner");
        else
          throw new InvalidRequestException(
              "start key must sort before (or equal to) finish key in your partitioner!");
      }
    }

    if (range.count <= 0) {
      throw new InvalidRequestException("maxRows must be positive");
    }
  }
コード例 #2
0
  @Test
  public void testExportSuperCf() throws IOException {
    File tempSS = createTemporarySSTable("Keyspace1", "Super4");
    ColumnFamily cfamily = ColumnFamily.create("Keyspace1", "Super4");
    IPartitioner<?> partitioner = DatabaseDescriptor.getPartitioner();
    DataOutputBuffer dob = new DataOutputBuffer();
    SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, partitioner);

    // Add rowA
    cfamily.addColumn(
        new QueryPath("Super4", "superA".getBytes(), "colA".getBytes()),
        "valA".getBytes(),
        1,
        false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, false);
    writer.append(partitioner.decorateKey("rowA"), dob);
    dob.reset();
    cfamily.clear();

    // Add rowB
    cfamily.addColumn(
        new QueryPath("Super4", "superB".getBytes(), "colB".getBytes()),
        "valB".getBytes(),
        1,
        false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, false);
    writer.append(partitioner.decorateKey("rowB"), dob);
    dob.reset();
    cfamily.clear();

    // Add rowExclude
    cfamily.addColumn(
        new QueryPath("Super4", "superX".getBytes(), "colX".getBytes()),
        "valX".getBytes(),
        1,
        false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, false);
    dob.reset();
    cfamily.clear();

    SSTableReader reader = writer.closeAndOpenReader();

    // Export to JSON and verify
    File tempJson = File.createTempFile("Super4", ".json");
    SSTableExport.export(reader, new PrintStream(tempJson.getPath()), new String[] {"rowExclude"});

    JSONObject json = (JSONObject) JSONValue.parse(new FileReader(tempJson));

    JSONObject rowA = (JSONObject) json.get("rowA");
    JSONObject superA =
        (JSONObject) rowA.get(cfamily.getComparator().getString("superA".getBytes()));
    JSONArray subColumns = (JSONArray) superA.get("subColumns");
    JSONArray colA = (JSONArray) subColumns.get(0);
    JSONObject rowExclude = (JSONObject) json.get("rowExclude");
    assert Arrays.equals(hexToBytes((String) colA.get(1)), "valA".getBytes());
    assert !(Boolean) colA.get(3);
    assert rowExclude == null;
  }
コード例 #3
0
    public TokenRange getRange(ByteBuffer key) {
      Token t = partitioner.getToken(key);
      com.datastax.driver.core.Token driverToken =
          metadata.newToken(partitioner.getTokenFactory().toString(t));
      for (TokenRange range : rangeMap.keySet()) {
        if (range.contains(driverToken)) {
          return range;
        }
      }

      throw new RuntimeException(
          "Invalid token information returned by describe_ring: " + rangeMap);
    }
コード例 #4
0
  @Test
  public void testRoundTripStandardCf() throws IOException, ParseException {
    File tempSS = createTemporarySSTable("Keyspace1", "Standard1");
    ColumnFamily cfamily = ColumnFamily.create("Keyspace1", "Standard1");
    IPartitioner<?> partitioner = DatabaseDescriptor.getPartitioner();
    DataOutputBuffer dob = new DataOutputBuffer();
    SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, partitioner);

    // Add rowA
    cfamily.addColumn(
        new QueryPath("Standard1", null, "name".getBytes()), "val".getBytes(), 1, false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, false);
    writer.append(partitioner.decorateKey("rowA"), dob);
    dob.reset();
    cfamily.clear();

    // Add rowExclude
    cfamily.addColumn(
        new QueryPath("Standard1", null, "name".getBytes()), "val".getBytes(), 1, false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, false);
    writer.append(partitioner.decorateKey("rowExclude"), dob);
    dob.reset();
    cfamily.clear();

    SSTableReader reader = writer.closeAndOpenReader();

    // Export to JSON and verify
    File tempJson = File.createTempFile("Standard1", ".json");
    SSTableExport.export(reader, new PrintStream(tempJson.getPath()), new String[] {"rowExclude"});

    // Import JSON to another SSTable file
    File tempSS2 = createTemporarySSTable("Keyspace1", "Standard1");
    SSTableImport.importJson(tempJson.getPath(), "Keyspace1", "Standard1", tempSS2.getPath());

    reader = SSTableReader.open(tempSS2.getPath(), DatabaseDescriptor.getPartitioner());
    NamesQueryFilter qf =
        new NamesQueryFilter("rowA", new QueryPath("Standard1", null, null), "name".getBytes());
    ColumnFamily cf = qf.getSSTableColumnIterator(reader).getColumnFamily();
    assertTrue(cf != null);
    assertTrue(Arrays.equals(cf.getColumn("name".getBytes()).value(), hexToBytes("76616c")));

    qf =
        new NamesQueryFilter(
            "rowExclude", new QueryPath("Standard1", null, null), "name".getBytes());
    cf = qf.getSSTableColumnIterator(reader).getColumnFamily();
    assert cf == null;
  }
コード例 #5
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
 /**
  * Hash the given range in the tree. The range must have been generated with recursive
  * applications of partitioner.midpoint().
  *
  * <p>NB: Currently does not support wrapping ranges that do not end with
  * partitioner.getMinimumToken().
  *
  * @return Null if any subrange of the range is invalid, or if the exact range cannot be
  *     calculated using this tree.
  */
 public byte[] hash(Range range) {
   Token mintoken = partitioner.getMinimumToken();
   try {
     return hashHelper(root, new Range(mintoken, mintoken), range);
   } catch (StopRecursion e) {
     return null;
   }
 }
コード例 #6
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
 private Hashable initHelper(Token left, Token right, byte depth, byte max) {
   if (depth == max)
     // we've reached the leaves
     return new Leaf();
   Token midpoint = partitioner.midpoint(left, right);
   Hashable lchild = initHelper(left, midpoint, inc(depth), max);
   Hashable rchild = initHelper(midpoint, right, inc(depth), max);
   return new Inner(midpoint, lchild, rchild);
 }
コード例 #7
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
  /**
   * Initializes this tree by splitting it until hashdepth is reached, or until an additional level
   * of splits would violate maxsize.
   *
   * <p>NB: Replaces all nodes in the tree.
   */
  public void init() {
    // determine the depth to which we can safely split the tree
    byte sizedepth = (byte) (Math.log10(maxsize) / Math.log10(2));
    byte depth = (byte) Math.min(sizedepth, hashdepth);

    Token mintoken = partitioner.getMinimumToken();
    root = initHelper(mintoken, mintoken, (byte) 0, depth);
    size = (long) Math.pow(2, depth);
  }
コード例 #8
0
  private void testRangeSliceCommandWrite() throws IOException {
    IPartitioner part = StorageService.getPartitioner();
    AbstractBounds<RowPosition> bounds =
        new Range<Token>(part.getRandomToken(), part.getRandomToken()).toRowBounds();

    RangeSliceCommand namesCmd =
        new RangeSliceCommand(statics.KS, "Standard1", statics.readTs, namesPred, bounds, 100);
    MessageOut<RangeSliceCommand> namesCmdMsg = namesCmd.createMessage();
    RangeSliceCommand emptyRangeCmd =
        new RangeSliceCommand(statics.KS, "Standard1", statics.readTs, emptyRangePred, bounds, 100);
    MessageOut<RangeSliceCommand> emptyRangeCmdMsg = emptyRangeCmd.createMessage();
    RangeSliceCommand regRangeCmd =
        new RangeSliceCommand(
            statics.KS, "Standard1", statics.readTs, nonEmptyRangePred, bounds, 100);
    MessageOut<RangeSliceCommand> regRangeCmdMsg = regRangeCmd.createMessage();
    RangeSliceCommand namesCmdSup =
        new RangeSliceCommand(statics.KS, "Super1", statics.readTs, namesSCPred, bounds, 100);
    MessageOut<RangeSliceCommand> namesCmdSupMsg = namesCmdSup.createMessage();
    RangeSliceCommand emptyRangeCmdSup =
        new RangeSliceCommand(statics.KS, "Super1", statics.readTs, emptyRangePred, bounds, 100);
    MessageOut<RangeSliceCommand> emptyRangeCmdSupMsg = emptyRangeCmdSup.createMessage();
    RangeSliceCommand regRangeCmdSup =
        new RangeSliceCommand(
            statics.KS, "Super1", statics.readTs, nonEmptyRangeSCPred, bounds, 100);
    MessageOut<RangeSliceCommand> regRangeCmdSupMsg = regRangeCmdSup.createMessage();

    DataOutputStream out = getOutput("db.RangeSliceCommand.bin");
    namesCmdMsg.serialize(out, getVersion());
    emptyRangeCmdMsg.serialize(out, getVersion());
    regRangeCmdMsg.serialize(out, getVersion());
    namesCmdSupMsg.serialize(out, getVersion());
    emptyRangeCmdSupMsg.serialize(out, getVersion());
    regRangeCmdSupMsg.serialize(out, getVersion());
    out.close();

    // test serializedSize
    testSerializedSize(namesCmd, RangeSliceCommand.serializer);
    testSerializedSize(emptyRangeCmd, RangeSliceCommand.serializer);
    testSerializedSize(regRangeCmd, RangeSliceCommand.serializer);
    testSerializedSize(namesCmdSup, RangeSliceCommand.serializer);
    testSerializedSize(emptyRangeCmdSup, RangeSliceCommand.serializer);
    testSerializedSize(regRangeCmdSup, RangeSliceCommand.serializer);
  }
コード例 #9
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
  /**
   * Splits the range containing the given token, if no tree limits would be violated. If the range
   * would be split to a depth below hashdepth, or if the tree already contains maxsize subranges,
   * this operation will fail.
   *
   * @return True if the range was successfully split.
   */
  public boolean split(Token t) {
    if (!(size < maxsize)) return false;

    Token mintoken = partitioner.getMinimumToken();
    try {
      root = splitHelper(root, mintoken, mintoken, (byte) 0, t);
    } catch (StopRecursion.TooDeep e) {
      return false;
    }
    return true;
  }
コード例 #10
0
  @Test
  public void testEnumeratekeys() throws IOException {
    File tempSS = createTemporarySSTable("Keyspace1", "Standard1");
    ColumnFamily cfamily = ColumnFamily.create("Keyspace1", "Standard1");
    IPartitioner<?> partitioner = DatabaseDescriptor.getPartitioner();
    DataOutputBuffer dob = new DataOutputBuffer();
    SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, partitioner);

    // Add rowA
    cfamily.addColumn(
        new QueryPath("Standard1", null, "colA".getBytes()), "valA".getBytes(), 1, false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, false);
    writer.append(partitioner.decorateKey("rowA"), dob);
    dob.reset();
    cfamily.clear();

    // Add rowB
    cfamily.addColumn(
        new QueryPath("Standard1", null, "colB".getBytes()), "valB".getBytes(), 1, false);
    ColumnFamily.serializer().serializeWithIndexes(cfamily, dob, true);
    writer.append(partitioner.decorateKey("rowB"), dob);
    dob.reset();
    cfamily.clear();

    writer.closeAndOpenReader();

    // Enumerate and verify
    File temp = File.createTempFile("Standard1", ".txt");
    SSTableExport.enumeratekeys(writer.getFilename(), new PrintStream(temp.getPath()));

    FileReader file = new FileReader(temp);
    char[] buf = new char[(int) temp.length()];
    file.read(buf);
    String output = new String(buf);

    String sep = System.getProperty("line.separator");
    assert output.equals("rowA" + sep + "rowB" + sep) : output;
  }
コード例 #11
0
  // binary search is notoriously more difficult to get right than it looks; this is lifted from
  // Harmony's Collections implementation
  public int binarySearch(RowPosition key) {
    int low = 0, mid = keys.length, high = mid - 1, result = -1;

    while (low <= high) {
      mid = (low + high) >> 1;
      result = -partitioner.decorateKey(ByteBuffer.wrap(keys[mid])).compareTo(key);

      if (result > 0) {
        low = mid + 1;
      } else if (result == 0) {
        return mid;
      } else {
        high = mid - 1;
      }
    }

    return -mid - (result < 0 ? 1 : 2);
  }
コード例 #12
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
  private Hashable splitHelper(Hashable hashable, Token pleft, Token pright, byte depth, Token t)
      throws StopRecursion.TooDeep {
    if (depth >= hashdepth) throw new StopRecursion.TooDeep();

    if (hashable instanceof Leaf) {
      // split
      size++;
      Token midpoint = partitioner.midpoint(pleft, pright);
      return new Inner(midpoint, new Leaf(), new Leaf());
    }
    // else: node.

    // recurse on the matching child
    Inner node = (Inner) hashable;
    if (Range.contains(pleft, node.token, t))
      // left child contains token
      node.lchild(splitHelper(node.lchild, pleft, node.token, inc(depth), t));
    else
      // else: right child contains token
      node.rchild(splitHelper(node.rchild, node.token, pright, inc(depth), t));
    return node;
  }
コード例 #13
0
  @Test
  public void testDifference() {
    int maxsize = 16;
    mts = new MerkleTrees(partitioner);
    mts.addMerkleTree(32, fullRange());

    MerkleTrees mts2 = new MerkleTrees(partitioner);
    mts2.addMerkleTree(32, fullRange());

    mts.init();
    mts2.init();

    // add dummy hashes to both trees
    for (TreeRange range : mts.invalids()) range.addAll(new HIterator(range.right));
    for (TreeRange range : mts2.invalids()) range.addAll(new HIterator(range.right));

    TreeRange leftmost = null;
    TreeRange middle = null;

    mts.maxsize(fullRange(), maxsize + 2); // give some room for splitting

    // split the leftmost
    Iterator<TreeRange> ranges = mts.invalids();
    leftmost = ranges.next();
    mts.split(leftmost.right);

    // set the hashes for the leaf of the created split
    middle = mts.get(leftmost.right);
    middle.hash("arbitrary!".getBytes());
    mts.get(partitioner.midpoint(leftmost.left, leftmost.right))
        .hash("even more arbitrary!".getBytes());

    // trees should disagree for (leftmost.left, middle.right]
    List<Range<Token>> diffs = MerkleTrees.difference(mts, mts2);
    assertEquals(diffs + " contains wrong number of differences:", 1, diffs.size());
    assertTrue(diffs.contains(new Range<>(leftmost.left, middle.right)));
  }
コード例 #14
0
  public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();

    validateConfiguration(conf);

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration());
    cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration());
    partitioner = ConfigHelper.getInputPartitioner(context.getConfiguration());
    logger.debug("partitioner is " + partitioner);

    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
      List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
      KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf);
      Range<Token> jobRange = null;
      if (jobKeyRange != null) {
        if (jobKeyRange.start_key == null) {
          logger.warn("ignoring jobKeyRange specified without start_key");
        } else {
          if (!partitioner.preservesOrder())
            throw new UnsupportedOperationException(
                "KeyRange based on keys can only be used with a order preserving paritioner");
          if (jobKeyRange.start_token != null)
            throw new IllegalArgumentException("only start_key supported");
          if (jobKeyRange.end_token != null)
            throw new IllegalArgumentException("only start_key supported");
          jobRange =
              new Range<Token>(
                  partitioner.getToken(jobKeyRange.start_key),
                  partitioner.getToken(jobKeyRange.end_key),
                  partitioner);
        }
      }

      for (TokenRange range : masterRangeNodes) {
        if (jobRange == null) {
          // for each range, pick a live owner and ask it to compute bite-sized splits
          splitfutures.add(executor.submit(new SplitCallable(range, conf)));
        } else {
          Range<Token> dhtRange =
              new Range<Token>(
                  partitioner.getTokenFactory().fromString(range.start_token),
                  partitioner.getTokenFactory().fromString(range.end_token),
                  partitioner);

          if (dhtRange.intersects(jobRange)) {
            for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
              range.start_token = partitioner.getTokenFactory().toString(intersection.left);
              range.end_token = partitioner.getTokenFactory().toString(intersection.right);
              // for each range, pick a live owner and ask it to compute bite-sized splits
              splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            }
          }
        }
      }

      // wait until we have all the results back
      for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
        try {
          splits.addAll(futureInputSplits.get());
        } catch (Exception e) {
          throw new IOException("Could not get input splits", e);
        }
      }
    } finally {
      executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
  }
コード例 #15
0
 private void resolve(String key, byte[] buffer) {
   columnFamilies.put(partitioner.decorateKey(key), buffer);
   currentSize.addAndGet(buffer.length + key.length());
 }
コード例 #16
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
 /** For testing purposes. Gets the smallest range containing the token. */
 TreeRange get(Token t) {
   Token mintoken = partitioner.getMinimumToken();
   return getHelper(root, mintoken, mintoken, (byte) 0, t);
 }
コード例 #17
0
ファイル: MerkleTree.java プロジェクト: franczyk/cassandra
 /** Invalidates the ranges containing the given token. */
 public void invalidate(Token t) {
   invalidateHelper(root, partitioner.getMinimumToken(), t);
 }
コード例 #18
0
 public ByteBuffer execute(List<ByteBuffer> parameters) throws InvalidRequestException {
   ColumnNameBuilder builder = cfDef.getKeyNameBuilder();
   for (ByteBuffer bb : parameters) builder.add(bb);
   return partitioner.getTokenFactory().toByteArray(partitioner.getToken(builder.build()));
 }
コード例 #19
0
 public TokenFct(CFMetaData cfm) {
   super("token", partitioner.getTokenValidator(), getKeyTypes(cfm));
   this.cfDef = cfm.getCfDef();
 }
コード例 #20
0
 private Range<Token> fullRange() {
   return new Range<>(partitioner.getMinimumToken(), partitioner.getMinimumToken());
 }