@Test
  public void testRecoverCounter() throws IOException, ExecutionException, InterruptedException {
    Table table1 = Table.open("Keyspace1");

    RowMutation rm;
    DecoratedKey dk = Util.dk("key");
    ColumnFamily cf;

    for (int i = 0; i < 10; ++i) {
      rm = new RowMutation("Keyspace1", dk.key);
      cf = ColumnFamily.create("Keyspace1", "Counter1");
      cf.addColumn(new CounterColumn(ByteBufferUtil.bytes("col"), 1L, 1L));
      rm.add(cf);
      rm.apply();
    }

    table1.getColumnFamilyStore("Counter1").clearUnsafe();

    CommitLog.instance.resetUnsafe(); // disassociate segments from live CL
    CommitLog.instance.recover();

    cf = Util.getColumnFamily(table1, dk, "Counter1");

    assert cf.getColumnCount() == 1;
    Column c = cf.getColumn(ByteBufferUtil.bytes("col"));

    assert c != null;
    assert ((CounterColumn) c).total() == 10L;
  }
  @Test(timeout = 5000)
  public void testTruncateHints() throws Exception {
    Keyspace systemKeyspace = Keyspace.open("system");
    ColumnFamilyStore hintStore = systemKeyspace.getColumnFamilyStore(SystemKeyspace.HINTS_CF);
    hintStore.clearUnsafe();

    // insert 1 hint
    RowMutation rm = new RowMutation(KEYSPACE4, ByteBufferUtil.bytes(1));
    rm.add(
        STANDARD1_CF,
        ByteBufferUtil.bytes(String.valueOf(COLUMN1)),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        System.currentTimeMillis());

    HintedHandOffManager.instance
        .hintFor(
            rm,
            System.currentTimeMillis(),
            HintedHandOffManager.calculateHintTTL(rm),
            UUID.randomUUID())
        .apply();

    assert getNoOfHints() == 1;

    HintedHandOffManager.instance.truncateAllHints();

    while (getNoOfHints() > 0) {
      Thread.sleep(100);
    }

    assert getNoOfHints() == 0;
  }
  // Returns every permission on the resource granted to the user.
  public Set<Permission> authorize(AuthenticatedUser user, IResource resource) {
    if (user.isSuper()) return Permission.ALL;

    UntypedResultSet result;
    try {
      ResultMessage.Rows rows =
          authorizeStatement.execute(
              QueryState.forInternalCalls(),
              new QueryOptions(
                  ConsistencyLevel.ONE,
                  Lists.newArrayList(
                      ByteBufferUtil.bytes(user.getName()),
                      ByteBufferUtil.bytes(resource.getName()))));
      result = UntypedResultSet.create(rows.result);
    } catch (RequestValidationException e) {
      throw new AssertionError(e); // not supposed to happen
    } catch (RequestExecutionException e) {
      logger.warn("CassandraAuthorizer failed to authorize {} for {}", user, resource);
      return Permission.NONE;
    }

    if (result.isEmpty() || !result.one().has(PERMISSIONS)) return Permission.NONE;

    Set<Permission> permissions = EnumSet.noneOf(Permission.class);
    for (String perm : result.one().getSet(PERMISSIONS, UTF8Type.instance))
      permissions.add(Permission.valueOf(perm));
    return permissions;
  }
  // Test compaction of hints column family. It shouldn't remove all columns on compaction.
  @Test
  public void testCompactionOfHintsCF() throws Exception {
    // prepare hints column family
    Table systemTable = Table.open("system");
    ColumnFamilyStore hintStore = systemTable.getColumnFamilyStore(SystemTable.HINTS_CF);
    hintStore.clearUnsafe();
    hintStore.metadata.gcGraceSeconds(36000); // 10 hours
    hintStore.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
    hintStore.disableAutoCompaction();

    // insert 1 hint
    RowMutation rm = new RowMutation(TABLE4, ByteBufferUtil.bytes(1));
    rm.add(
        new QueryPath(STANDARD1_CF, null, ByteBufferUtil.bytes(String.valueOf(COLUMN1))),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        System.currentTimeMillis());

    RowMutation.hintFor(rm, UUID.randomUUID()).apply();

    // flush data to disk
    hintStore.forceBlockingFlush();
    assertEquals(1, hintStore.getSSTables().size());

    // submit compaction
    FBUtilities.waitOnFuture(HintedHandOffManager.instance.compact());
    while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0) TimeUnit.SECONDS.sleep(1);

    // single row should not be removed because of gc_grace_seconds
    // is 10 hours and there are no any tombstones in sstable
    assertEquals(1, hintStore.getSSTables().size());
  }
  /**
   * Test altering the type of a column, including the one in the primary key (#4041) migrated from
   * cql_tests.py:TestCQL.update_type_test()
   */
  @Test
  public void testUpdateColumnType() throws Throwable {
    createTable("CREATE TABLE %s (k text, c text, s set <text>, v text, PRIMARY KEY(k, c))");

    // using utf8 character so that we can see the transition to BytesType
    execute("INSERT INTO %s (k, c, v, s) VALUES ('ɸ', 'ɸ', 'ɸ', {'ɸ'})");

    assertRows(execute("SELECT * FROM %s"), row("ɸ", "ɸ", set("ɸ"), "ɸ"));

    execute("ALTER TABLE %s ALTER v TYPE blob");
    assertRows(execute("SELECT * FROM %s"), row("ɸ", "ɸ", set("ɸ"), ByteBufferUtil.bytes("ɸ")));

    execute("ALTER TABLE %s ALTER k TYPE blob");
    assertRows(
        execute("SELECT * FROM %s"),
        row(ByteBufferUtil.bytes("ɸ"), "ɸ", set("ɸ"), ByteBufferUtil.bytes("ɸ")));

    execute("ALTER TABLE %s ALTER c TYPE blob");
    assertRows(
        execute("SELECT * FROM %s"),
        row(
            ByteBufferUtil.bytes("ɸ"),
            ByteBufferUtil.bytes("ɸ"),
            set("ɸ"),
            ByteBufferUtil.bytes("ɸ")));

    execute("ALTER TABLE %s ALTER s TYPE set<blob>");
    assertRows(
        execute("SELECT * FROM %s"),
        row(
            ByteBufferUtil.bytes("ɸ"),
            ByteBufferUtil.bytes("ɸ"),
            set(ByteBufferUtil.bytes("ɸ")),
            ByteBufferUtil.bytes("ɸ")));
  }
  @PooledConnection
  public void setColumn(
      String keyspace,
      String column_family,
      String key,
      JSONObject json,
      ConsistencyLevel consistency_level,
      boolean index,
      long timestamp)
      throws InvalidRequestException, UnavailableException, TimedOutException, TException,
          HttpException, IOException {
    List<Mutation> slice = new ArrayList<Mutation>();
    for (Object field : json.keySet()) {
      String name = (String) field;
      String value = (String) json.get(name);
      Column c = new Column();
      c.setName(ByteBufferUtil.bytes(name));
      c.setValue(ByteBufferUtil.bytes(value));
      c.setTimestamp(timestamp);

      Mutation m = new Mutation();
      ColumnOrSuperColumn cc = new ColumnOrSuperColumn();
      cc.setColumn(c);
      m.setColumn_or_supercolumn(cc);
      slice.add(m);
    }
    Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
        new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
    Map<String, List<Mutation>> cfMutations = new HashMap<String, List<Mutation>>();
    cfMutations.put(column_family, slice);
    mutationMap.put(ByteBufferUtil.bytes(key), cfMutations);
    getConnection(keyspace).batch_mutate(mutationMap, consistency_level);

    if (config.isIndexingEnabled() && index) indexer.index(column_family, key, json);
  }
  @PooledConnection
  public void deleteColumn(
      String keyspace,
      String column_family,
      String key,
      String column,
      ConsistencyLevel consistency_level,
      boolean purgeIndex)
      throws InvalidRequestException, UnavailableException, TimedOutException, TException,
          HttpException, IOException {
    ColumnPath path = new ColumnPath(column_family);
    path.setColumn(ByteBufferUtil.bytes(column));
    getConnection(keyspace)
        .remove(
            ByteBufferUtil.bytes(key), path, System.currentTimeMillis() * 1000, consistency_level);

    // TODO: Revisit deleting a single field because it requires a fetch
    // first.
    // Evidently it is impossible to remove just a field from a document in
    // SOLR
    // http://stackoverflow.com/questions/4802620/can-you-delete-a-field-from-a-document-in-solr-index
    if (config.isIndexingEnabled() && purgeIndex) {
      indexer.delete(column_family, key);
      JSONObject json = this.getSlice(keyspace, column_family, key, consistency_level);
      json.remove(column);
      indexer.index(column_family, key, json);
    }
  }
  @Test
  public void testRemoveColumnFamilyWithFlush1() {
    Keyspace keyspace = Keyspace.open("Keyspace1");
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
    Mutation rm;
    DecoratedKey dk = Util.dk("key1");

    // add data
    rm = new Mutation("Keyspace1", dk.key);
    rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
    rm.add("Standard1", Util.cellname("Column2"), ByteBufferUtil.bytes("asdf"), 0);
    rm.apply();
    store.forceBlockingFlush();

    // remove
    rm = new Mutation("Keyspace1", dk.key);
    rm.delete("Standard1", 1);
    rm.apply();

    ColumnFamily retrieved =
        store.getColumnFamily(
            QueryFilter.getIdentityFilter(dk, "Standard1", System.currentTimeMillis()));
    assert retrieved.isMarkedForDelete();
    assertNull(retrieved.getColumn(Util.cellname("Column1")));
    assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
  }
  @PooledConnection
  public JSONArray getRowsWithQuery(
      String keyspace, String columnFamily, String queryStr, ConsistencyLevel consistencyLevel)
      throws InvalidRequestException, UnavailableException, TimedOutException, TException,
          CharacterCodingException {
    Query query = QueryParser.parse(queryStr);
    SlicePredicate predicate = new SlicePredicate();
    SliceRange range =
        new SliceRange(ByteBufferUtil.bytes(""), ByteBufferUtil.bytes(""), false, MAX_COLUMNS);
    predicate.setSlice_range(range);

    ColumnParent parent = new ColumnParent(columnFamily);

    IndexClause indexClause = new IndexClause();
    indexClause.setCount(MAX_ROWS);
    indexClause.setStart_key(new byte[0]);
    for (String keyName : query.getEqStmt().keySet()) {
      indexClause.addToExpressions(
          new IndexExpression(
              ByteBufferUtil.bytes(keyName),
              IndexOperator.EQ,
              ByteBufferUtil.bytes(query.getEqStmt().get(keyName))));
    }

    List<KeySlice> rows =
        getConnection(keyspace)
            .get_indexed_slices(parent, indexClause, predicate, ConsistencyLevel.QUORUM);
    return JsonMarshaller.marshallRows(rows, true);
  }
Exemple #10
0
  @Test
  public void testEchoedRow() throws IOException, ExecutionException, InterruptedException {
    // This test check that EchoedRow doesn't skipp rows: see CASSANDRA-2653

    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    // Insert 4 keys in two sstables. We need the sstables to have 2 rows
    // at least to trigger what was causing CASSANDRA-2653
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      rm.add(
          "Standard2",
          ByteBufferUtil.bytes(String.valueOf(i)),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          i);
      rm.apply();

      if (i % 2 == 0) cfs.forceBlockingFlush();
    }
    Collection<SSTableReader> toCompact = cfs.getSSTables();
    assert toCompact.size() == 2;

    // Reinserting the same keys. We will compact only the previous sstable, but we need those new
    // ones
    // to make sure we use EchoedRow, otherwise it won't be used because purge can be done.
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      rm.add(
          "Standard2",
          ByteBufferUtil.bytes(String.valueOf(i)),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          i);
      rm.apply();
    }
    cfs.forceBlockingFlush();
    SSTableReader tmpSSTable = null;
    for (SSTableReader sstable : cfs.getSSTables())
      if (!toCompact.contains(sstable)) tmpSSTable = sstable;
    assert tmpSSTable != null;

    // Force compaction on first sstables. Since each row is in only one sstable, we will be using
    // EchoedRow.
    Util.compact(cfs, toCompact);
    assertEquals(2, cfs.getSSTables().size());

    // Now, we remove the sstable that was just created to force the use of EchoedRow (so that it
    // doesn't hide the problem)
    cfs.markObsolete(Collections.singleton(tmpSSTable), OperationType.UNKNOWN);
    assertEquals(1, cfs.getSSTables().size());

    // Now assert we do have the 4 keys
    assertEquals(4, Util.getRangeSlice(cfs).size());
  }
  @Test
  public void simpleQueryWithRangeTombstoneTest() throws Exception {
    Table table = Table.open(KSNAME);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(CFNAME);

    // Inserting data
    String key = "k1";
    RowMutation rm;
    ColumnFamily cf;

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    for (int i = 0; i < 40; i += 2) add(rm, i, 0);
    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    cf = rm.addOrGet(CFNAME);
    delete(cf, 10, 22, 1);
    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    for (int i = 1; i < 40; i += 2) add(rm, i, 2);
    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    cf = rm.addOrGet(CFNAME);
    delete(cf, 19, 27, 3);
    rm.apply();
    // We don't flush to test with both a range tomsbtone in memtable and in sstable

    QueryPath path = new QueryPath(CFNAME);

    // Queries by name
    int[] live = new int[] {4, 9, 11, 17, 28};
    int[] dead = new int[] {12, 19, 21, 24, 27};
    SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfs.getComparator());
    for (int i : live) columns.add(b(i));
    for (int i : dead) columns.add(b(i));
    cf = cfs.getColumnFamily(QueryFilter.getNamesFilter(dk(key), path, columns));

    for (int i : live) assert isLive(cf, cf.getColumn(b(i))) : "Column " + i + " should be live";
    for (int i : dead)
      assert !isLive(cf, cf.getColumn(b(i))) : "Column " + i + " shouldn't be live";

    // Queries by slices
    cf =
        cfs.getColumnFamily(
            QueryFilter.getSliceFilter(dk(key), path, b(7), b(30), false, Integer.MAX_VALUE));

    for (int i : new int[] {7, 8, 9, 11, 13, 15, 17, 28, 29, 30})
      assert isLive(cf, cf.getColumn(b(i))) : "Column " + i + " should be live";
    for (int i : new int[] {10, 12, 14, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27})
      assert !isLive(cf, cf.getColumn(b(i))) : "Column " + i + " shouldn't be live";
  }
  @Test
  public void overlappingRangeTest() throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Table table = Table.open(KSNAME);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(CFNAME);

    // Inserting data
    String key = "k2";
    RowMutation rm;
    ColumnFamily cf;

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    for (int i = 0; i < 20; i++) add(rm, i, 0);
    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    cf = rm.addOrGet(CFNAME);
    delete(cf, 5, 15, 1);
    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    cf = rm.addOrGet(CFNAME);
    delete(cf, 5, 10, 1);
    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
    cf = rm.addOrGet(CFNAME);
    delete(cf, 5, 8, 2);
    rm.apply();
    cfs.forceBlockingFlush();

    QueryPath path = new QueryPath(CFNAME);
    cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(dk(key), path));

    for (int i = 0; i < 5; i++)
      assert isLive(cf, cf.getColumn(b(i))) : "Column " + i + " should be live";
    for (int i = 16; i < 20; i++)
      assert isLive(cf, cf.getColumn(b(i))) : "Column " + i + " should be live";
    for (int i = 5; i <= 15; i++)
      assert !isLive(cf, cf.getColumn(b(i))) : "Column " + i + " shouldn't be live";

    // Compact everything and re-test
    CompactionManager.instance.performMaximal(cfs);
    cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(dk(key), path));

    for (int i = 0; i < 5; i++)
      assert isLive(cf, cf.getColumn(b(i))) : "Column " + i + " should be live";
    for (int i = 16; i < 20; i++)
      assert isLive(cf, cf.getColumn(b(i))) : "Column " + i + " should be live";
    for (int i = 5; i <= 15; i++)
      assert !isLive(cf, cf.getColumn(b(i))) : "Column " + i + " shouldn't be live";
  }
  private static class Statics {
    private final String KS = "Keyspace1";
    private final ByteBuffer Key = ByteBufferUtil.bytes("Key01");
    private final SortedSet<ByteBuffer> NamedCols =
        new TreeSet<ByteBuffer>(BytesType.instance) {
          {
            add(ByteBufferUtil.bytes("AAA"));
            add(ByteBufferUtil.bytes("BBB"));
            add(ByteBufferUtil.bytes("CCC"));
          }
        };
    private final ByteBuffer SC = ByteBufferUtil.bytes("SCName");
    private final SortedSet<ByteBuffer> NamedSCCols =
        new TreeSet<ByteBuffer>(BytesType.instance) {
          {
            add(CompositeType.build(SC, ByteBufferUtil.bytes("AAA")));
            add(CompositeType.build(SC, ByteBufferUtil.bytes("BBB")));
            add(CompositeType.build(SC, ByteBufferUtil.bytes("CCC")));
          }
        };
    private final String StandardCF = "Standard1";
    private final String SuperCF = "Super1";

    private final long readTs = 1369935512292L;

    private final ColumnFamily StandardCf =
        TreeMapBackedSortedColumns.factory.create(KS, StandardCF);
    private final ColumnFamily SuperCf = TreeMapBackedSortedColumns.factory.create(KS, SuperCF);

    private final Row StandardRow = new Row(Util.dk("key0"), StandardCf);
    private final Row SuperRow = new Row(Util.dk("key1"), SuperCf);
    private final Row NullRow = new Row(Util.dk("key2"), null);

    private Statics() {
      StandardCf.addColumn(new Column(bb("aaaa")));
      StandardCf.addColumn(new Column(bb("bbbb"), bb("bbbbb-value")));
      StandardCf.addColumn(new Column(bb("cccc"), bb("ccccc-value"), 1000L));
      StandardCf.addColumn(new DeletedColumn(bb("dddd"), 500, 1000));
      StandardCf.addColumn(new DeletedColumn(bb("eeee"), bb("eeee-value"), 1001));
      StandardCf.addColumn(new ExpiringColumn(bb("ffff"), bb("ffff-value"), 2000, 1000));
      StandardCf.addColumn(new ExpiringColumn(bb("gggg"), bb("gggg-value"), 2001, 1000, 2002));

      SuperCf.addColumn(new Column(CompositeType.build(SC, bb("aaaa"))));
      SuperCf.addColumn(new Column(CompositeType.build(SC, bb("bbbb")), bb("bbbbb-value")));
      SuperCf.addColumn(new Column(CompositeType.build(SC, bb("cccc")), bb("ccccc-value"), 1000L));
      SuperCf.addColumn(new DeletedColumn(CompositeType.build(SC, bb("dddd")), 500, 1000));
      SuperCf.addColumn(
          new DeletedColumn(CompositeType.build(SC, bb("eeee")), bb("eeee-value"), 1001));
      SuperCf.addColumn(
          new ExpiringColumn(CompositeType.build(SC, bb("ffff")), bb("ffff-value"), 2000, 1000));
      SuperCf.addColumn(
          new ExpiringColumn(
              CompositeType.build(SC, bb("gggg")), bb("gggg-value"), 2001, 1000, 2002));
    }
  }
 @Test
 public void testBytes() {
   BytesType comparator = new BytesType();
   assert comparator.compare(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.bytes("asdf")) < 0;
   assert comparator.compare(ByteBufferUtil.bytes("asdf"), ByteBufferUtil.EMPTY_BYTE_BUFFER) > 0;
   assert comparator.compare(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER)
       == 0;
   assert comparator.compare(ByteBufferUtil.bytes("z"), ByteBufferUtil.bytes("a")) > 0;
   assert comparator.compare(ByteBufferUtil.bytes("a"), ByteBufferUtil.bytes("z")) < 0;
   assert comparator.compare(ByteBufferUtil.bytes("asdf"), ByteBufferUtil.bytes("asdf")) == 0;
   assert comparator.compare(ByteBufferUtil.bytes("asdz"), ByteBufferUtil.bytes("asdf")) > 0;
 }
Exemple #15
0
 public static void addMutation(
     RowMutation rm,
     String columnFamilyName,
     String superColumnName,
     long columnName,
     String value,
     long timestamp) {
   rm.add(
       new QueryPath(
           columnFamilyName, ByteBufferUtil.bytes(superColumnName), getBytes(columnName)),
       ByteBufferUtil.bytes(value),
       timestamp);
 }
Exemple #16
0
 public static void addMutation(
     Mutation rm,
     String columnFamilyName,
     String superColumnName,
     long columnName,
     String value,
     long timestamp) {
   CellName cname =
       superColumnName == null
           ? CellNames.simpleDense(getBytes(columnName))
           : CellNames.compositeDense(ByteBufferUtil.bytes(superColumnName), getBytes(columnName));
   rm.add(columnFamilyName, cname, ByteBufferUtil.bytes(value), timestamp);
 }
Exemple #17
0
  private void testDontPurgeAccidentaly(String k, String cfname)
      throws IOException, ExecutionException, InterruptedException {
    // This test catches the regression of CASSANDRA-2786
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);

    // disable compaction while flushing
    cfs.clearUnsafe();
    cfs.disableAutoCompaction();

    // Add test row
    DecoratedKey key = Util.dk(k);
    RowMutation rm = new RowMutation(KEYSPACE1, key.key);
    rm.add(
        cfname,
        CompositeType.build(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        0);
    rm.apply();

    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstablesBefore = cfs.getSSTables();

    QueryFilter filter = QueryFilter.getIdentityFilter(key, cfname, System.currentTimeMillis());
    assert !(cfs.getColumnFamily(filter).getColumnCount() == 0);

    // Remove key
    rm = new RowMutation(KEYSPACE1, key.key);
    rm.delete(cfname, 2);
    rm.apply();

    ColumnFamily cf = cfs.getColumnFamily(filter);
    assert cf == null || cf.getColumnCount() == 0 : "should be empty: " + cf;

    // Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
    Thread.sleep(1000);

    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstablesAfter = cfs.getSSTables();
    Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
    for (SSTableReader sstable : sstablesAfter)
      if (!sstablesBefore.contains(sstable)) toCompact.add(sstable);

    Util.compact(cfs, toCompact);

    cf = cfs.getColumnFamily(filter);
    assert cf == null || cf.getColumnCount() == 0 : "should be empty: " + cf;
  }
Exemple #18
0
  private static Mutation getMutation(String colName, String value) {

    Column c = new Column();

    c.setName(ByteBufferUtil.bytes(colName));

    c.setValue(ByteBufferUtil.bytes(value));
    c.setTimestamp(System.currentTimeMillis());

    Mutation m = new Mutation();
    m.setColumn_or_supercolumn(new ColumnOrSuperColumn());
    m.column_or_supercolumn.setColumn(c);
    return m;
  }
    public void reduce(
        Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter)
        throws IOException {
      ColumnFamily columnFamily;
      String keyspace = "Keyspace1";
      String cfName = "Super1";
      Message message;
      List<ColumnFamily> columnFamilies;
      columnFamilies = new LinkedList<ColumnFamily>();
      String line;

      /* Create a column family */
      columnFamily = ColumnFamily.create(keyspace, cfName);
      while (values.hasNext()) {
        // Split the value (line based on your own delimiter)
        line = values.next().toString();
        String[] fields = line.split("\1");
        String SuperColumnName = fields[1];
        String ColumnName = fields[2];
        String ColumnValue = fields[3];
        int timestamp = 0;
        columnFamily.addColumn(
            new QueryPath(
                cfName, ByteBufferUtil.bytes(SuperColumnName), ByteBufferUtil.bytes(ColumnName)),
            ByteBufferUtil.bytes(ColumnValue),
            timestamp);
      }

      columnFamilies.add(columnFamily);

      /* Get serialized message to send to cluster */
      message = createMessage(keyspace, key.getBytes(), cfName, columnFamilies);
      List<IAsyncResult> results = new ArrayList<IAsyncResult>();
      for (InetAddress endpoint :
          StorageService.instance.getNaturalEndpoints(keyspace, ByteBufferUtil.bytes(key))) {
        /* Send message to end point */
        results.add(MessagingService.instance().sendRR(message, endpoint));
      }
      /* wait for acks */
      for (IAsyncResult result : results) {
        try {
          result.get(DatabaseDescriptor.getRpcTimeout(), TimeUnit.MILLISECONDS);
        } catch (TimeoutException e) {
          // you should probably add retry logic here
          throw new RuntimeException(e);
        }
      }

      output.collect(key, new Text(" inserted into Cassandra node(s)"));
    }
 @PooledConnection
 public String getColumn(
     String keyspace,
     String columnFamily,
     String key,
     String column,
     ConsistencyLevel consistencyLevel)
     throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException,
         TException, UnsupportedEncodingException {
   ColumnPath path = new ColumnPath(columnFamily);
   path.setColumn(ByteBufferUtil.bytes(column));
   ColumnOrSuperColumn column_result =
       getConnection(keyspace).get(ByteBufferUtil.bytes(key), path, consistencyLevel);
   return new String(column_result.getColumn().getValue(), "UTF8");
 }
  @Override
  protected void applyModifications(List<? extends Modification> mods) throws CacheLoaderException {
    Cassandra.Client cassandraClient = null;

    try {
      cassandraClient = dataSource.getConnection();
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
          new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

      for (Modification m : mods) {
        switch (m.getType()) {
          case STORE:
            store0(((Store) m).getStoredEntry(), mutationMap);
            break;
          case CLEAR:
            clear();
            break;
          case REMOVE:
            remove0(ByteBufferUtil.bytes(hashKey(((Remove) m).getKey())), mutationMap);
            break;
          default:
            throw new AssertionError();
        }
      }

      cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel);

    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }
  @Test
  public void test64kColumn() {
    // a byte buffer more than 64k
    ByteBuffer buffer = ByteBuffer.allocate(1024 * 65);
    buffer.clear();

    // read more than 64k
    for (int i = 0; i < 1024 * 64 / 4 + 1; i++) buffer.putInt(0);

    // for read
    buffer.flip();
    Column column = new Column(ByteBufferUtil.bytes("test"), buffer, 0);

    SecondaryIndexColumnSizeTest.MockRowIndex mockRowIndex =
        new SecondaryIndexColumnSizeTest.MockRowIndex();
    SecondaryIndexColumnSizeTest.MockColumnIndex mockColumnIndex =
        new SecondaryIndexColumnSizeTest.MockColumnIndex();

    assertTrue(mockRowIndex.validate(column));
    assertFalse(mockColumnIndex.validate(column));

    // test less than 64k value
    buffer.flip();
    buffer.clear();
    buffer.putInt(20);
    buffer.flip();

    assertTrue(mockRowIndex.validate(column));
    assertTrue(mockColumnIndex.validate(column));
  }
 @PooledConnection
 public JSONObject getSlice(
     String keyspace, String columnFamily, String key, ConsistencyLevel consistencyLevel)
     throws InvalidRequestException, UnavailableException, TimedOutException, TException,
         UnsupportedEncodingException {
   SlicePredicate predicate = new SlicePredicate();
   SliceRange range =
       new SliceRange(ByteBufferUtil.bytes(""), ByteBufferUtil.bytes(""), false, MAX_COLUMNS);
   predicate.setSlice_range(range);
   ColumnParent parent = new ColumnParent(columnFamily);
   List<ColumnOrSuperColumn> slice =
       getConnection(keyspace)
           .get_slice(ByteBufferUtil.bytes(key), parent, predicate, consistencyLevel);
   if (slice.size() > 0) return JsonMarshaller.marshallSlice(slice);
   else return null;
 }
  @Test
  public void testCompactions() throws IOException, ExecutionException, InterruptedException {
    CompactionManager.instance.disableAutoCompaction();

    // this test does enough rows to force multiple block indexes to be used
    Table table = Table.open(TABLE1);
    ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");

    final int ROWS_PER_SSTABLE = 10;
    Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
    for (int j = 0; j < (DatabaseDescriptor.getIndexInterval() * 3) / ROWS_PER_SSTABLE; j++) {
      for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
        DecoratedKey key = Util.dk(String.valueOf(i % 2));
        RowMutation rm = new RowMutation(TABLE1, key.key);
        rm.add(
            new QueryPath("Standard1", null, ByteBufferUtil.bytes(String.valueOf(i / 2))),
            ByteBufferUtil.EMPTY_BYTE_BUFFER,
            j * ROWS_PER_SSTABLE + i);
        rm.apply();
        inserted.add(key);
      }
      store.forceBlockingFlush();
      assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(store).size());
    }
    while (true) {
      Future<Integer> ft = CompactionManager.instance.submitMinorIfNeeded(store);
      if (ft.get() == 0) break;
    }
    if (store.getSSTables().size() > 1) {
      CompactionManager.instance.performMajor(store);
    }
    assertEquals(inserted.size(), Util.getRangeSlice(store).size());
  }
  public void writeLogEntry(LogEntry logEntry) throws Throwable {
    List<Mutation> slice = new ArrayList<Mutation>();
    slice.add(getMutation(LogEntryColumns.KS.toString(), logEntry.getKeyspace()));
    slice.add(getMutation(LogEntryColumns.CF.toString(), logEntry.getColumnFamily()));
    slice.add(getMutation(LogEntryColumns.ROW.toString(), logEntry.getRowKey()));
    slice.add(getMutation(LogEntryColumns.STATUS.toString(), logEntry.getStatus().toString()));
    slice.add(
        getMutation(LogEntryColumns.TIMESTAMP.toString(), Long.toString(logEntry.getTimestamp())));
    slice.add(getMutation(LogEntryColumns.HOST.toString(), logEntry.getHost()));
    if (logEntry.hasErrors()) {
      for (String errorKey : logEntry.getErrors().keySet()) {
        slice.add(getMutation(errorKey, logEntry.getErrors().get(errorKey)));
      }
    }

    if (ConfigurationStore.getStore().shouldWriteColumns()) {
      for (ColumnOperation operation : logEntry.getOperations()) {
        if (operation.isDelete()) {
          slice.add(getMutation(operation.getName(), OperationType.DELETE));
        } else {
          slice.add(getMutation(operation.getName(), OperationType.UPDATE));
        }
      }
    }
    Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
        new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
    Map<String, List<Mutation>> cfMutations = new HashMap<String, List<Mutation>>();
    cfMutations.put(COLUMN_FAMILY, slice);

    ByteBuffer rowKey = ByteBufferUtil.bytes(logEntry.getUuid());
    mutationMap.put(rowKey, cfMutations);
    getConnection(KEYSPACE).batch_mutate(mutationMap, logEntry.getConsistencyLevel());
  }
  @Override
  public void start() throws CacheLoaderException {

    try {
      dataSource = new DataSource(config.getPoolProperties());
      readConsistencyLevel = ConsistencyLevel.valueOf(config.readConsistencyLevel);
      writeConsistencyLevel = ConsistencyLevel.valueOf(config.writeConsistencyLevel);
      entryColumnPath =
          new ColumnPath(config.entryColumnFamily)
              .setColumn(ENTRY_COLUMN_NAME.getBytes(UTF8Charset));
      entryColumnParent = new ColumnParent(config.entryColumnFamily);
      entryKeyPrefix = ENTRY_KEY_PREFIX + (config.isSharedKeyspace() ? cacheName + "_" : "");
      expirationColumnParent = new ColumnParent(config.expirationColumnFamily);
      expirationKey =
          ByteBufferUtil.bytes(EXPIRATION_KEY + (config.isSharedKeyspace() ? "_" + cacheName : ""));
      keyMapper = (TwoWayKey2StringMapper) Util.getInstance(config.getKeyMapper());
    } catch (Exception e) {
      throw new ConfigurationException(e);
    }

    log.debug("cleaning up expired entries...");
    purgeInternal();

    log.debug("started");
    super.start();
  }
 private void addExpiryEntry(
     String cassandraKey,
     long expiryTime,
     Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap) {
   try {
     addMutation(
         mutationMap,
         expirationKey,
         config.expirationColumnFamily,
         ByteBufferUtil.bytes(expiryTime),
         ByteBufferUtil.bytes(cassandraKey),
         ByteBufferUtil.EMPTY_BYTE_BUFFER);
   } catch (Exception e) {
     // Should not happen
   }
 }
  @Test
  public void testCheckForExpiredSSTableBlockers() throws InterruptedException {
    String KEYSPACE1 = "Keyspace1";
    ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    cfs.metadata.gcGraceSeconds(0);

    RowMutation rm = new RowMutation(KEYSPACE1, Util.dk("test").key);
    rm.add(
        "Standard1",
        ByteBufferUtil.bytes("col1"),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        System.currentTimeMillis());
    rm.applyUnsafe();
    cfs.forceBlockingFlush();
    SSTableReader blockingSSTable = cfs.getSSTables().iterator().next();
    for (int i = 0; i < 10; i++) {
      rm = new RowMutation(KEYSPACE1, Util.dk("test").key);
      rm.delete("Standard1", System.currentTimeMillis());
      rm.applyUnsafe();
      cfs.forceBlockingFlush();
    }
    Multimap<SSTableReader, SSTableReader> blockers =
        SSTableExpiredBlockers.checkForExpiredSSTableBlockers(
            cfs.getSSTables(), (int) (System.currentTimeMillis() / 1000) + 100);
    assertEquals(1, blockers.keySet().size());
    assertTrue(blockers.keySet().contains(blockingSSTable));
    assertEquals(10, blockers.get(blockingSSTable).size());
  }
  @Test
  public void testSerializeDeserialize() throws IOException {
    CounterContext.ContextState state = CounterContext.ContextState.allocate(4, 2);
    state.writeElement(NodeId.fromInt(1), 4L, 4L);
    state.writeElement(NodeId.fromInt(2), 4L, 4L, true);
    state.writeElement(NodeId.fromInt(3), 4L, 4L);
    state.writeElement(NodeId.fromInt(4), 4L, 4L, true);

    CounterColumn original = new CounterColumn(ByteBufferUtil.bytes("x"), state.context, 1L);
    DataOutputBuffer bufOut = new DataOutputBuffer();
    Column.serializer().serialize(original, bufOut);
    byte[] serialized = bufOut.getData();

    ByteArrayInputStream bufIn = new ByteArrayInputStream(serialized, 0, serialized.length);
    CounterColumn deserialized =
        (CounterColumn) Column.serializer().deserialize(new DataInputStream(bufIn));
    assert original.equals(deserialized);

    bufIn = new ByteArrayInputStream(serialized, 0, serialized.length);
    CounterColumn deserializedOnRemote =
        (CounterColumn) Column.serializer().deserialize(new DataInputStream(bufIn), null, true);
    assert deserializedOnRemote.name().equals(original.name());
    assert deserializedOnRemote.total() == original.total();
    assert deserializedOnRemote.value().equals(cc.clearAllDelta(original.value()));
    assert deserializedOnRemote.timestamp() == deserialized.timestamp();
    assert deserializedOnRemote.timestampOfLastDelete() == deserialized.timestampOfLastDelete();
  }
Exemple #30
0
 private static List<ByteBuffer> toByteBufferParams(List<Object> params) {
   List<ByteBuffer> r = new ArrayList<>();
   for (Object param : params) {
     if (param instanceof ByteBuffer) r.add((ByteBuffer) param);
     else if (param instanceof Long) r.add(ByteBufferUtil.bytes((Long) param));
     else throw new AssertionError();
   }
   return r;
 }