Пример #1
0
  @Test
  public void testCompactionLog() throws Exception {
    SystemKeyspace.discardCompactionsInProgress();

    String cf = "Standard4";
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(cf);
    SchemaLoader.insertData(KEYSPACE1, cf, 0, 1);
    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstables = cfs.getSSTables();
    assertFalse(sstables.isEmpty());
    Set<Integer> generations =
        Sets.newHashSet(
            Iterables.transform(
                sstables,
                new Function<SSTableReader, Integer>() {
                  public Integer apply(SSTableReader sstable) {
                    return sstable.descriptor.generation;
                  }
                }));
    UUID taskId = SystemKeyspace.startCompaction(cfs, sstables);
    Map<Pair<String, String>, Map<Integer, UUID>> compactionLogs =
        SystemKeyspace.getUnfinishedCompactions();
    Set<Integer> unfinishedCompactions = compactionLogs.get(Pair.create(KEYSPACE1, cf)).keySet();
    assertTrue(unfinishedCompactions.containsAll(generations));

    SystemKeyspace.finishCompaction(taskId);
    compactionLogs = SystemKeyspace.getUnfinishedCompactions();
    assertFalse(compactionLogs.containsKey(Pair.create(KEYSPACE1, cf)));
  }
Пример #2
0
  public void udtSerDeserTest(int version) throws Exception {
    ListType<?> lt = ListType.getInstance(Int32Type.instance, true);
    SetType<?> st = SetType.getInstance(UTF8Type.instance, true);
    MapType<?, ?> mt = MapType.getInstance(UTF8Type.instance, LongType.instance, true);

    UserType udt =
        new UserType(
            "ks",
            bb("myType"),
            Arrays.asList(bb("f1"), bb("f2"), bb("f3"), bb("f4")),
            Arrays.asList(LongType.instance, lt, st, mt));

    Map<ColumnIdentifier, Term.Raw> value = new HashMap<>();
    value.put(ci("f1"), lit(42));
    value.put(ci("f2"), new Lists.Literal(Arrays.<Term.Raw>asList(lit(3), lit(1))));
    value.put(ci("f3"), new Sets.Literal(Arrays.<Term.Raw>asList(lit("foo"), lit("bar"))));
    value.put(
        ci("f4"),
        new Maps.Literal(
            Arrays.<Pair<Term.Raw, Term.Raw>>asList(
                Pair.<Term.Raw, Term.Raw>create(lit("foo"), lit(24)),
                Pair.<Term.Raw, Term.Raw>create(lit("bar"), lit(12)))));

    UserTypes.Literal u = new UserTypes.Literal(value);
    Term t = u.prepare("ks", columnSpec("myValue", udt));

    QueryOptions options = QueryOptions.DEFAULT;
    if (version == 2)
      options =
          QueryOptions.fromProtocolV2(ConsistencyLevel.ONE, Collections.<ByteBuffer>emptyList());
    else if (version != 3) throw new AssertionError("Invalid protocol version for test");

    ByteBuffer serialized = t.bindAndGet(options);

    ByteBuffer[] fields = udt.split(serialized);

    assertEquals(4, fields.length);

    assertEquals(bytes(42L), fields[0]);

    // Note that no matter what the protocol version has been used in bindAndGet above, the
    // collections inside
    // a UDT should alway be serialized with version 3 of the protocol. Which is why we don't use
    // 'version'
    // on purpose below.

    assertEquals(
        Arrays.asList(3, 1), lt.getSerializer().deserializeForNativeProtocol(fields[1], 3));

    LinkedHashSet<String> s = new LinkedHashSet<>();
    s.addAll(Arrays.asList("bar", "foo"));
    assertEquals(s, st.getSerializer().deserializeForNativeProtocol(fields[2], 3));

    LinkedHashMap<String, Long> m = new LinkedHashMap<>();
    m.put("bar", 12L);
    m.put("foo", 24L);
    assertEquals(m, mt.getSerializer().deserializeForNativeProtocol(fields[3], 3));
  }
 /**
  * @param sstables
  * @return
  */
 public static List<Pair<SSTableReader, Long>> createSSTableAndMinTimestampPairs(
     Iterable<SSTableReader> sstables) {
   List<Pair<SSTableReader, Long>> sstableMinTimestampPairs =
       Lists.newArrayListWithCapacity(Iterables.size(sstables));
   for (SSTableReader sstable : sstables)
     sstableMinTimestampPairs.add(Pair.create(sstable, sstable.getMinTimestamp()));
   return sstableMinTimestampPairs;
 }
 private static List<Pair<SSTableReader, Long>> createSSTableAndLengthPairs(
     Collection<SSTableReader> collection) {
   List<Pair<SSTableReader, Long>> tableLengthPairs =
       new ArrayList<Pair<SSTableReader, Long>>(collection.size());
   for (SSTableReader table : collection)
     tableLengthPairs.add(Pair.create(table, table.onDiskLength()));
   return tableLengthPairs;
 }
Пример #5
0
  public Pair<AbstractBounds<T>, AbstractBounds<T>> split(T position) {
    assert contains(position);
    // Check if the split would have no effect on the range
    if (position.equals(right)) return null;

    AbstractBounds<T> lb = new Bounds<T>(left, position);
    AbstractBounds<T> rb = new Range<T>(position, right);
    return Pair.create(lb, rb);
  }
Пример #6
0
  /**
   * Load individual ColumnFamily Definition to the schema (to make ColumnFamily lookup faster)
   *
   * @param cfm The ColumnFamily definition to load
   */
  public void load(CFMetaData cfm) {
    Pair<String, String> key = Pair.create(cfm.ksName, cfm.cfName);

    if (cfIdMap.containsKey(key))
      throw new RuntimeException(
          String.format(
              "Attempting to load already loaded column family %s.%s", cfm.ksName, cfm.cfName));

    logger.debug("Adding {} to cfIdMap", cfm);
    cfIdMap.put(key, cfm.cfId);
  }
Пример #7
0
  /**
   * Add a new moving endpoint
   *
   * @param token token which is node moving to
   * @param endpoint address of the moving node
   */
  public void addMovingEndpoint(Token token, InetAddress endpoint) {
    assert endpoint != null;

    lock.writeLock().lock();

    try {
      movingEndpoints.add(Pair.create(token, endpoint));
    } finally {
      lock.writeLock().unlock();
    }
  }
Пример #8
0
    /** Stores current DC/rack assignment for ep */
    protected void addEndpoint(InetAddress ep) {
      IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
      String dc = snitch.getDatacenter(ep);
      String rack = snitch.getRack(ep);
      Pair<String, String> current = currentLocations.get(ep);
      if (current != null) {
        if (current.left.equals(dc) && current.right.equals(rack)) return;
        dcRacks.get(current.left).remove(current.right, ep);
        dcEndpoints.remove(current.left, ep);
      }

      dcEndpoints.put(dc, ep);

      if (!dcRacks.containsKey(dc)) dcRacks.put(dc, HashMultimap.<String, InetAddress>create());
      dcRacks.get(dc).put(rack, ep);

      currentLocations.put(ep, Pair.create(dc, rack));
    }
Пример #9
0
  /**
   * Splits this filter into two SliceQueryFilters: one that slices only the static columns, and one
   * that slices the remainder of the normal data.
   *
   * <p>This should only be called when the filter is reversed and the filter is known to cover
   * static columns (through hasStaticSlice()).
   *
   * @return a pair of (static, normal) SliceQueryFilters
   */
  public Pair<SliceQueryFilter, SliceQueryFilter> splitOutStaticSlice(CFMetaData cfm) {
    assert reversed;

    Composite staticSliceEnd = cfm.comparator.staticPrefix().end();
    List<ColumnSlice> nonStaticSlices = new ArrayList<>(slices.length);
    for (ColumnSlice slice : slices) {
      if (sliceIncludesStatics(slice, cfm))
        nonStaticSlices.add(new ColumnSlice(slice.start, staticSliceEnd));
      else nonStaticSlices.add(slice);
    }

    return Pair.create(
        new SliceQueryFilter(staticSliceEnd, Composites.EMPTY, true, count, compositesToGroup),
        new SliceQueryFilter(
            nonStaticSlices.toArray(new ColumnSlice[nonStaticSlices.size()]),
            true,
            count,
            compositesToGroup));
  }
  public ParsedStatement.Prepared prepare(ColumnSpecification[] boundNames)
      throws InvalidRequestException {
    CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace(), columnFamily());
    type = metadata.getDefaultValidator().isCommutative() ? Type.COUNTER : Type.LOGGED;

    cfDef = metadata.getCfDef();
    UpdateStatement.processKeys(cfDef, whereClause, processedKeys, boundNames);

    for (Selector column : columns) {
      CFDefinition.Name name = cfDef.get(column.id());
      if (name == null)
        throw new InvalidRequestException(String.format("Unknown identifier %s", column));

      // For compact, we only have one value except the key, so the only form of DELETE that make
      // sense is without a column
      // list. However, we support having the value name for coherence with the static/sparse case
      if (name.kind != CFDefinition.Name.Kind.COLUMN_METADATA
          && name.kind != CFDefinition.Name.Kind.VALUE_ALIAS)
        throw new InvalidRequestException(
            String.format(
                "Invalid identifier %s for deletion (should not be a PRIMARY KEY part)", column));

      if (column.key() != null) {
        if (name.type instanceof ListType) {
          if (column.key().isBindMarker())
            boundNames[column.key().bindIndex] = ListOperation.indexSpecOf(name);
        } else if (name.type instanceof MapType) {
          if (column.key().isBindMarker())
            boundNames[column.key().bindIndex] = MapOperation.keySpecOf(name, (MapType) name.type);
        } else {
          throw new InvalidRequestException(
              String.format(
                  "Invalid selection %s since %s is neither a list or a map", column, column.id()));
        }
      }

      toRemove.add(Pair.create(name, column.key()));
    }

    return new ParsedStatement.Prepared(this, Arrays.<ColumnSpecification>asList(boundNames));
  }
  @Test
  public void testGetNewNames() throws IOException {
    Descriptor desc =
        Descriptor.fromFilename(
            new File("Keyspace1", "Keyspace1-Standard1-ia-500-Data.db").toString());
    // assert !desc.isLatestVersion; // minimum compatible version -- for now it is the latest as
    // well
    PendingFile inContext =
        new PendingFile(
            null, desc, "Data.db", Arrays.asList(Pair.create(0L, 1L)), OperationType.BOOTSTRAP);

    PendingFile outContext = StreamIn.getContextMapping(inContext);
    // filename and generation are expected to have changed
    assert !inContext.getFilename().equals(outContext.getFilename());

    // nothing else should
    assertEquals(inContext.component, outContext.component);
    assertEquals(desc.ksname, outContext.desc.ksname);
    assertEquals(desc.cfname, outContext.desc.cfname);
    assertEquals(desc.version, outContext.desc.version);
  }
Пример #12
0
 /**
  * Used for ColumnFamily data eviction out from the schema
  *
  * @param cfm The ColumnFamily Definition to evict
  */
 public void purge(CFMetaData cfm) {
   cfIdMap.remove(Pair.create(cfm.ksName, cfm.cfName));
 }
Пример #13
0
 /**
  * Lookup keyspace/ColumnFamily identifier
  *
  * @param ksName The keyspace name
  * @param cfName The ColumnFamily name
  * @return The id for the given (ksname,cfname) pair, or null if it has been dropped.
  */
 public UUID getId(String ksName, String cfName) {
   return cfIdMap.get(Pair.create(ksName, cfName));
 }
 public Pair<AbstractBounds<T>, AbstractBounds<T>> split(T position) {
   assert contains(position);
   AbstractBounds<T> lb = new Bounds<T>(left, position, partitioner);
   AbstractBounds<T> rb = new ExcludingBounds<T>(position, right, partitioner);
   return Pair.create(lb, rb);
 }