public static void validateKeyRange(KeyRange range) throws InvalidRequestException { if ((range.start_key == null) != (range.end_key == null)) { throw new InvalidRequestException( "start key and end key must either both be non-null, or both be null"); } if ((range.start_token == null) != (range.end_token == null)) { throw new InvalidRequestException( "start token and end token must either both be non-null, or both be null"); } if ((range.start_key == null) == (range.start_token == null)) { throw new InvalidRequestException( "exactly one of {start key, end key} or {start token, end token} must be specified"); } if (range.start_key != null) { IPartitioner p = StorageService.getPartitioner(); Token startToken = p.getToken(range.start_key); Token endToken = p.getToken(range.end_key); if (startToken.compareTo(endToken) > 0 && !endToken.equals(p.getMinimumToken())) { if (p instanceof RandomPartitioner) throw new InvalidRequestException( "start key's md5 sorts after end key's md5. this is not allowed; you probably should not specify end key at all, under RandomPartitioner"); else throw new InvalidRequestException( "start key must sort before (or equal to) finish key in your partitioner!"); } } if (range.count <= 0) { throw new InvalidRequestException("maxRows must be positive"); } }
public TokenRange getRange(ByteBuffer key) { Token t = partitioner.getToken(key); com.datastax.driver.core.Token driverToken = metadata.newToken(partitioner.getTokenFactory().toString(t)); for (TokenRange range : rangeMap.keySet()) { if (range.contains(driverToken)) { return range; } } throw new RuntimeException( "Invalid token information returned by describe_ring: " + rangeMap); }
public ByteBuffer execute(List<ByteBuffer> parameters) throws InvalidRequestException { ColumnNameBuilder builder = cfDef.getKeyNameBuilder(); for (ByteBuffer bb : parameters) builder.add(bb); return partitioner.getTokenFactory().toByteArray(partitioner.getToken(builder.build())); }
public List<InputSplit> getSplits(JobContext context) throws IOException { Configuration conf = context.getConfiguration(); validateConfiguration(conf); // cannonical ranges and nodes holding replicas List<TokenRange> masterRangeNodes = getRangeMap(conf); keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration()); cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration()); partitioner = ConfigHelper.getInputPartitioner(context.getConfiguration()); logger.debug("partitioner is " + partitioner); // cannonical ranges, split into pieces, fetching the splits in parallel ExecutorService executor = Executors.newCachedThreadPool(); List<InputSplit> splits = new ArrayList<InputSplit>(); try { List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>(); KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf); Range<Token> jobRange = null; if (jobKeyRange != null) { if (jobKeyRange.start_key == null) { logger.warn("ignoring jobKeyRange specified without start_key"); } else { if (!partitioner.preservesOrder()) throw new UnsupportedOperationException( "KeyRange based on keys can only be used with a order preserving paritioner"); if (jobKeyRange.start_token != null) throw new IllegalArgumentException("only start_key supported"); if (jobKeyRange.end_token != null) throw new IllegalArgumentException("only start_key supported"); jobRange = new Range<Token>( partitioner.getToken(jobKeyRange.start_key), partitioner.getToken(jobKeyRange.end_key), partitioner); } } for (TokenRange range : masterRangeNodes) { if (jobRange == null) { // for each range, pick a live owner and ask it to compute bite-sized splits splitfutures.add(executor.submit(new SplitCallable(range, conf))); } else { Range<Token> dhtRange = new Range<Token>( partitioner.getTokenFactory().fromString(range.start_token), partitioner.getTokenFactory().fromString(range.end_token), partitioner); if (dhtRange.intersects(jobRange)) { for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) { range.start_token = partitioner.getTokenFactory().toString(intersection.left); range.end_token = partitioner.getTokenFactory().toString(intersection.right); // for each range, pick a live owner and ask it to compute bite-sized splits splitfutures.add(executor.submit(new SplitCallable(range, conf))); } } } } // wait until we have all the results back for (Future<List<InputSplit>> futureInputSplits : splitfutures) { try { splits.addAll(futureInputSplits.get()); } catch (Exception e) { throw new IOException("Could not get input splits", e); } } } finally { executor.shutdownNow(); } assert splits.size() > 0; Collections.shuffle(splits, new Random(System.nanoTime())); return splits; }