Ejemplo n.º 1
0
    public void serialize(DropKeyspace dropKeyspace, DataOutputStream dos) throws IOException {
      dos.write(UUIDGen.decompose(dropKeyspace.newVersion));
      dos.write(UUIDGen.decompose(dropKeyspace.lastVersion));
      RowMutation.serializer().serialize(dropKeyspace.rm, dos);

      dos.writeUTF(dropKeyspace.name);
    }
Ejemplo n.º 2
0
 public CFRowAdder addListEntry(String cql3ColumnName, Object value) {
   ColumnDefinition def = getDefinition(cql3ColumnName);
   assert def.type instanceof ListType;
   CellName name =
       cf.getComparator().create(prefix, def, ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes()));
   return add(name, def, value);
 }
Ejemplo n.º 3
0
  @Test
  public void should_initialize_lazy_properties() throws Exception {
    Tweet tweet = new Tweet();
    tweet.setId(UUIDGen.getTimeUUID());
    tweet.setContent("welcome");

    CompleteBean entity =
        CompleteBeanTestBuilder.builder()
            .randomId()
            .name("name")
            .label("label")
            .age(45L)
            .addFriends("foo", "bar")
            .welcomeTweet(tweet)
            .version(CounterBuilder.incr(11L))
            .buid();

    manager.insert(entity);

    CompleteBean foundEntity = manager.find(CompleteBean.class, entity.getId());

    CompleteBean rawEntity = manager.initAndRemoveProxy(foundEntity);

    assertThat(rawEntity.getName()).isEqualTo("name");
    assertThat(rawEntity.getLabel()).isEqualTo("label");
    assertThat(rawEntity.getAge()).isEqualTo(45L);
    assertThat(rawEntity.getFriends()).containsExactly("foo", "bar");
    assertThat(rawEntity.getWelcomeTweet().getContent()).isEqualTo("welcome");
    assertThat(rawEntity.getVersion()).isInstanceOf(InternalCounterImpl.class);
    assertThat(rawEntity.getVersion().get()).isEqualTo(11L);
  }
  @Test
  public void canAddRecipeAndIngredients() throws Exception {
    UUID uuid = UUIDGen.getTimeUUID();

    String json =
        "{\"recipe\":{\"name\":\"Spaghetti Bolognaise\",\"id\":\""
            + uuid
            + "\"},\"ingredients\":["
            + "{\"recipeId\":\""
            + uuid
            + "\",\"name\":\"tomatoes\",\"quantity\":10,\"unit\":\"items\"},"
            + "{\"recipeId\":\""
            + uuid
            + "\",\"name\":\"mince meat\",\"quantity\":500,\"unit\":\"grams\"}]}";

    Response result =
        target("recipe/" + uuid)
            .request()
            .accept(APPLICATION_JSON_TYPE)
            .put(Entity.entity(json, MediaType.APPLICATION_JSON_TYPE));

    assertThat(result.getStatusInfo().getFamily(), equalTo(SUCCESSFUL));
    verify(mockRecipeDao).persist(new Recipe(uuid, "Spaghetti Bolognaise"));
    verify(mockIngredientDao).persist(new Ingredient(uuid, "tomatoes", 10, "items"));
    verify(mockIngredientDao).persist(new Ingredient(uuid, "mince meat", 500, "grams"));
  }
  @Test
  public void canRetrieveRecipe() throws Exception {
    UUID uuid = UUIDGen.getTimeUUID();

    when(mockRecipeDao.retrieve(uuid)).thenReturn(of(new Recipe(uuid, "Spaghetti Bolognaise")));
    when(mockIngredientDao.ingredientsForRecipe(uuid))
        .thenReturn(
            asList(
                new Ingredient(uuid, "tomatoes", 10, "items"),
                new Ingredient(uuid, "mince meat", 500, "grams")));

    String responseMsg =
        target("recipe/" + uuid).request().accept(APPLICATION_JSON_TYPE).get(String.class);

    assertThat(
        responseMsg,
        equalTo(
            "{\"recipe\":{\"name\":\"Spaghetti Bolognaise\",\"id\":\""
                + uuid
                + "\"},\"ingredients\":["
                + "{\"recipeId\":\""
                + uuid
                + "\",\"name\":\"tomatoes\",\"quantity\":10,\"unit\":\"items\"},"
                + "{\"recipeId\":\""
                + uuid
                + "\",\"name\":\"mince meat\",\"quantity\":500,\"unit\":\"grams\"}]}"));
  }
 public RepairSession(
     Range<Token> range, String tablename, boolean isSequential, String... cfnames) {
   this(
       UUIDGen.makeType1UUIDFromHost(FBUtilities.getBroadcastAddress()).toString(),
       range,
       tablename,
       isSequential,
       cfnames);
 }
Ejemplo n.º 7
0
 public DropKeyspace(String name, boolean blockOnFileDeletion)
     throws ConfigurationException, IOException {
   super(
       UUIDGen.makeType1UUIDFromHost(FBUtilities.getLocalAddress()),
       DatabaseDescriptor.getDefsVersion());
   this.name = name;
   this.blockOnFileDeletion = blockOnFileDeletion;
   KSMetaData ksm = DatabaseDescriptor.getTableDefinition(name);
   if (ksm == null) throw new ConfigurationException("Keyspace does not exist.");
   rm = makeDefinitionMutation(null, ksm, newVersion);
 }
Ejemplo n.º 8
0
  @Override
  public ByteBuffer fromString(String source) throws MarshalException {
    // Return an empty ByteBuffer for an empty string.
    if (source.isEmpty()) return ByteBufferUtil.EMPTY_BYTE_BUFFER;

    // ffffffff-ffff-ffff-ffff-ffffffffff
    if (TimeUUIDType.regexPattern.matcher(source).matches()) {
      try {
        return ByteBuffer.wrap(UUIDGen.decompose(UUID.fromString(source)));
      } catch (IllegalArgumentException e) {
        throw new MarshalException(String.format("unable to make UUID from '%s'", source), e);
      }
    }

    throw new MarshalException(String.format("unable to coerce '%s' to version 1 UUID", source));
  }
Ejemplo n.º 9
0
  public DropColumnFamily(String tableName, String cfName)
      throws ConfigurationException, IOException {
    super(
        UUIDGen.makeType1UUIDFromHost(FBUtilities.getLocalAddress()),
        DatabaseDescriptor.getDefsVersion());
    this.tableName = tableName;
    this.cfName = cfName;

    KSMetaData ksm = DatabaseDescriptor.getTableDefinition(tableName);
    if (ksm == null) throw new ConfigurationException("Keyspace does not already exist.");
    else if (!ksm.cfMetaData().containsKey(cfName))
      throw new ConfigurationException("CF is not defined in that keyspace.");

    KSMetaData newKsm = makeNewKeyspaceDefinition(ksm);
    rm = Migration.makeDefinitionMutation(newKsm, null, newVersion);
  }
Ejemplo n.º 10
0
  /**
   * Determines the tracing context from a message. Does NOT set the threadlocal state.
   *
   * @param message The internode message
   */
  public TraceState initializeFromMessage(final MessageIn<?> message) {
    final byte[] sessionBytes = message.parameters.get(Tracing.TRACE_HEADER);

    if (sessionBytes == null) return null;

    assert sessionBytes.length == 16;
    UUID sessionId = UUIDGen.getUUID(ByteBuffer.wrap(sessionBytes));
    TraceState ts = sessions.get(sessionId);
    if (ts != null) return ts;

    if (message.verb == MessagingService.Verb.REQUEST_RESPONSE) {
      // received a message for a session we've already closed out.  see CASSANDRA-5668
      return new ExpiredTraceState(sessionId);
    } else {
      ts = new TraceState(message.from, sessionId);
      sessions.put(sessionId, ts);
      return ts;
    }
  }
  @Test
  public void removeTransferLog() throws Exception {

    TransferLogSerialization logSerialization =
        getInjector().getInstance(TransferLogSerialization.class);

    CassandraClient cassandraClient = getInjector().getInstance(CassandraClientImpl.class);

    String queueName = "tlst_queue_" + RandomStringUtils.randomAlphanumeric(15);
    String source = RandomStringUtils.randomAlphanumeric(15);
    String dest = RandomStringUtils.randomAlphanumeric(15);

    UUID messageId = UUIDGen.getTimeUUID();
    logSerialization.recordTransferLog(queueName, source, dest, messageId);

    List<TransferLog> allLogs = getTransferLogs(logSerialization);

    // we only want entities for our queue
    List<TransferLog> logs =
        allLogs
            .stream()
            .filter(log -> log.getQueueName().equals(queueName))
            .collect(Collectors.toList());
    Assert.assertEquals(1, logs.size());

    logSerialization.removeTransferLog(queueName, source, dest, messageId);

    List<TransferLog> all = getTransferLogs(logSerialization);
    logs =
        all.stream()
            .filter(log -> log.getQueueName().equals(queueName))
            .collect(Collectors.toList());
    Assert.assertEquals(0, logs.size());

    try {
      logSerialization.removeTransferLog(queueName, source, dest, messageId);
      Assert.fail("Removing non-existent log should throw exception");

    } catch (QakkaException expected) {
      // success!
    }
  }
  @Test
  public void recordTransferLog() throws Exception {

    TransferLogSerialization logSerialization =
        getInjector().getInstance(TransferLogSerialization.class);

    CassandraClient cassandraClient = getInjector().getInstance(CassandraClientImpl.class);

    String queueName = "tlst_queue_" + RandomStringUtils.randomAlphanumeric(15);
    String source = RandomStringUtils.randomAlphanumeric(15);
    String dest = RandomStringUtils.randomAlphanumeric(15);

    int numLogs = 100;

    for (int i = 0; i < numLogs; i++) {
      logSerialization.recordTransferLog(queueName, source, dest, UUIDGen.getTimeUUID());
    }

    int count = 0;
    int fetchCount = 0;
    PagingState pagingState = null;
    while (true) {

      Result<TransferLog> all = logSerialization.getAllTransferLogs(pagingState, 10);

      // we only want entities for our queue
      List<TransferLog> logs =
          all.getEntities()
              .stream()
              .filter(log -> log.getQueueName().equals(queueName))
              .collect(Collectors.toList());

      count += logs.size();
      fetchCount++;
      if (all.getPagingState() == null) {
        break;
      }
      pagingState = all.getPagingState();
    }

    Assert.assertEquals(numLogs, count);
  }
Ejemplo n.º 13
0
  public Message.Response execute(QueryState state) {
    try {
      if (options.getPageSize() == 0) throw new ProtocolException("The page size cannot be 0");

      UUID tracingId = null;
      if (isTracingRequested()) {
        tracingId = UUIDGen.getTimeUUID();
        state.prepareTracingSession(tracingId);
      }

      if (state.traceNextQuery()) {
        state.createTracingSession();

        ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
        builder.put("query", query);
        if (options.getPageSize() > 0)
          builder.put("page_size", Integer.toString(options.getPageSize()));

        Tracing.instance.begin("Execute CQL3 query", builder.build());
      }

      Message.Response response =
          state.getClientState().getCQLQueryHandler().process(query, state, options);
      if (options.skipMetadata() && response instanceof ResultMessage.Rows)
        ((ResultMessage.Rows) response).result.metadata.setSkipMetadata();

      if (tracingId != null) response.setTracingId(tracingId);

      return response;
    } catch (Exception e) {
      if (!((e instanceof RequestValidationException) || (e instanceof RequestExecutionException)))
        logger.error("Unexpected error during query", e);
      return ErrorMessage.fromException(e);
    } finally {
      Tracing.instance.stopSession();
    }
  }
Ejemplo n.º 14
0
  public Message.Response execute(QueryState state) {
    try {
      UUID tracingId = null;
      if (isTracingRequested()) {
        tracingId = UUIDGen.getTimeUUID();
        state.prepareTracingSession(tracingId);
      }

      if (state.traceNextQuery()) {
        state.createTracingSession();
        Tracing.instance().begin("Preparing CQL3 query", ImmutableMap.of("query", query));
      }

      Message.Response response = QueryProcessor.prepare(query, state.getClientState(), false);

      if (tracingId != null) response.setTracingId(tracingId);

      return response;
    } catch (Exception e) {
      return ErrorMessage.fromException(e);
    } finally {
      Tracing.instance().stopSession();
    }
  }
Ejemplo n.º 15
0
  /**
   * For internal use and testing only. The rest of the system should go through the submit*
   * methods, which are properly serialized. Caller is in charge of marking/unmarking the sstables
   * as compacting.
   */
  protected void runMayThrow() throws Exception {
    // The collection of sstables passed may be empty (but not null); even if
    // it is not empty, it may compact down to nothing if all rows are deleted.
    assert transaction != null;

    if (transaction.originals().isEmpty()) return;

    // Note that the current compaction strategy, is not necessarily the one this task was created
    // under.
    // This should be harmless; see comments to CFS.maybeReloadCompactionStrategy.
    AbstractCompactionStrategy strategy = cfs.getCompactionStrategy();

    if (DatabaseDescriptor.isSnapshotBeforeCompaction())
      cfs.snapshotWithoutFlush(System.currentTimeMillis() + "-compact-" + cfs.name);

    // note that we need to do a rough estimate early if we can fit the compaction on disk - this is
    // pessimistic, but
    // since we might remove sstables from the compaction in checkAvailableDiskSpace it needs to be
    // done here
    long expectedWriteSize =
        cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
    long earlySSTableEstimate = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
    checkAvailableDiskSpace(earlySSTableEstimate, expectedWriteSize);

    // sanity check: all sstables must belong to the same cfs
    assert !Iterables.any(
        transaction.originals(),
        new Predicate<SSTableReader>() {
          @Override
          public boolean apply(SSTableReader sstable) {
            return !sstable.descriptor.cfname.equals(cfs.name);
          }
        });

    UUID taskId = SystemKeyspace.startCompaction(cfs, transaction.originals());

    // new sstables from flush can be added during a compaction, but only the compaction can remove
    // them,
    // so in our single-threaded compaction world this is a valid way of determining if we're
    // compacting
    // all the sstables (that existed when we started)
    StringBuilder ssTableLoggerMsg = new StringBuilder("[");
    for (SSTableReader sstr : transaction.originals()) {
      ssTableLoggerMsg.append(
          String.format("%s:level=%d, ", sstr.getFilename(), sstr.getSSTableLevel()));
    }
    ssTableLoggerMsg.append("]");
    String taskIdLoggerMsg = taskId == null ? UUIDGen.getTimeUUID().toString() : taskId.toString();
    logger.info("Compacting ({}) {}", taskIdLoggerMsg, ssTableLoggerMsg);

    long start = System.nanoTime();

    long totalKeysWritten = 0;

    long estimatedKeys = 0;
    try (CompactionController controller = getCompactionController(transaction.originals())) {
      Set<SSTableReader> actuallyCompact =
          Sets.difference(transaction.originals(), controller.getFullyExpiredSSTables());

      SSTableFormat.Type sstableFormat = getFormatType(transaction.originals());

      List<SSTableReader> newSStables;
      AbstractCompactionIterable ci;

      // SSTableScanners need to be closed before markCompactedSSTablesReplaced call as scanners
      // contain references
      // to both ifile and dfile and SSTR will throw deletion errors on Windows if it tries to
      // delete before scanner is closed.
      // See CASSANDRA-8019 and CASSANDRA-8399
      try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact);
          AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact)) {
        ci =
            new CompactionIterable(
                compactionType, scanners.scanners, controller, sstableFormat, taskId);
        try (CloseableIterator<AbstractCompactedRow> iter = ci.iterator()) {
          if (collector != null) collector.beginCompaction(ci);
          long lastCheckObsoletion = start;

          if (!controller.cfs.getCompactionStrategy().isActive)
            throw new CompactionInterruptedException(ci.getCompactionInfo());

          try (CompactionAwareWriter writer =
              getCompactionAwareWriter(cfs, transaction, actuallyCompact)) {
            estimatedKeys = writer.estimatedKeys();
            while (iter.hasNext()) {
              if (ci.isStopRequested())
                throw new CompactionInterruptedException(ci.getCompactionInfo());

              try (AbstractCompactedRow row = iter.next()) {
                if (writer.append(row)) totalKeysWritten++;

                if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
                  controller.maybeRefreshOverlaps();
                  lastCheckObsoletion = System.nanoTime();
                }
              }
            }

            // don't replace old sstables yet, as we need to mark the compaction finished in the
            // system table
            newSStables = writer.finish();
          } finally {
            // point of no return -- the new sstables are live on disk; next we'll start deleting
            // the old ones
            // (in replaceCompactedSSTables)
            if (taskId != null) SystemKeyspace.finishCompaction(taskId);

            if (collector != null) collector.finishCompaction(ci);
          }
        }
      }

      // log a bunch of statistics about the result and save to system table compaction_history
      long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
      long startsize = SSTableReader.getTotalBytes(transaction.originals());
      long endsize = SSTableReader.getTotalBytes(newSStables);
      double ratio = (double) endsize / (double) startsize;

      StringBuilder newSSTableNames = new StringBuilder();
      for (SSTableReader reader : newSStables)
        newSSTableNames.append(reader.descriptor.baseFilename()).append(",");

      double mbps = dTime > 0 ? (double) endsize / (1024 * 1024) / ((double) dTime / 1000) : 0;
      long totalSourceRows = 0;
      String mergeSummary =
          updateCompactionHistory(
              cfs.keyspace.getName(), cfs.getColumnFamilyName(), ci, startsize, endsize);
      logger.info(
          String.format(
              "Compacted (%s) %d sstables to [%s] to level=%d.  %,d bytes to %,d (~%d%% of original) in %,dms = %fMB/s.  %,d total partitions merged to %,d.  Partition merge counts were {%s}",
              taskIdLoggerMsg,
              transaction.originals().size(),
              newSSTableNames.toString(),
              getLevel(),
              startsize,
              endsize,
              (int) (ratio * 100),
              dTime,
              mbps,
              totalSourceRows,
              totalKeysWritten,
              mergeSummary));
      logger.debug(
          String.format(
              "CF Total Bytes Compacted: %,d", CompactionTask.addToTotalBytesCompacted(endsize)));
      logger.debug(
          "Actual #keys: {}, Estimated #keys:{}, Err%: {}",
          totalKeysWritten,
          estimatedKeys,
          ((double) (totalKeysWritten - estimatedKeys) / totalKeysWritten));

      if (offline) Refs.release(Refs.selfRefs(newSStables));
    }
  }
Ejemplo n.º 16
0
/**
 * {@link StreamPlan} is a helper class that builds StreamOperation of given configuration.
 *
 * <p>This is the class you want to use for building streaming plan and starting streaming.
 */
public class StreamPlan {
  private final UUID planId = UUIDGen.getTimeUUID();
  private final String description;
  private final List<StreamEventHandler> handlers = new ArrayList<>();
  private final long repairedAt;
  private final StreamCoordinator coordinator;

  private boolean flushBeforeTransfer = true;

  /**
   * Start building stream plan.
   *
   * @param description Stream type that describes this StreamPlan
   */
  public StreamPlan(String description) {
    this(description, ActiveRepairService.UNREPAIRED_SSTABLE, 1, false, false);
  }

  public StreamPlan(String description, boolean keepSSTableLevels) {
    this(description, ActiveRepairService.UNREPAIRED_SSTABLE, 1, keepSSTableLevels, false);
  }

  public StreamPlan(
      String description,
      long repairedAt,
      int connectionsPerHost,
      boolean keepSSTableLevels,
      boolean isIncremental) {
    this.description = description;
    this.repairedAt = repairedAt;
    this.coordinator =
        new StreamCoordinator(
            connectionsPerHost, keepSSTableLevels, isIncremental, new DefaultConnectionFactory());
  }

  /**
   * Request data in {@code keyspace} and {@code ranges} from specific node.
   *
   * @param from endpoint address to fetch data from.
   * @param connecting Actual connecting address for the endpoint
   * @param keyspace name of keyspace
   * @param ranges ranges to fetch
   * @return this object for chaining
   */
  public StreamPlan requestRanges(
      InetAddress from, InetAddress connecting, String keyspace, Collection<Range<Token>> ranges) {
    return requestRanges(from, connecting, keyspace, ranges, new String[0]);
  }

  /**
   * Request data in {@code columnFamilies} under {@code keyspace} and {@code ranges} from specific
   * node.
   *
   * @param from endpoint address to fetch data from.
   * @param connecting Actual connecting address for the endpoint
   * @param keyspace name of keyspace
   * @param ranges ranges to fetch
   * @param columnFamilies specific column families
   * @return this object for chaining
   */
  public StreamPlan requestRanges(
      InetAddress from,
      InetAddress connecting,
      String keyspace,
      Collection<Range<Token>> ranges,
      String... columnFamilies) {
    StreamSession session = coordinator.getOrCreateNextSession(from, connecting);
    session.addStreamRequest(keyspace, ranges, Arrays.asList(columnFamilies), repairedAt);
    return this;
  }

  /**
   * Add transfer task to send data of specific {@code columnFamilies} under {@code keyspace} and
   * {@code ranges}.
   *
   * @see #transferRanges(java.net.InetAddress, java.net.InetAddress, String, java.util.Collection,
   *     String...)
   */
  public StreamPlan transferRanges(
      InetAddress to, String keyspace, Collection<Range<Token>> ranges, String... columnFamilies) {
    return transferRanges(to, to, keyspace, ranges, columnFamilies);
  }

  /**
   * Add transfer task to send data of specific keyspace and ranges.
   *
   * @param to endpoint address of receiver
   * @param connecting Actual connecting address of the endpoint
   * @param keyspace name of keyspace
   * @param ranges ranges to send
   * @return this object for chaining
   */
  public StreamPlan transferRanges(
      InetAddress to, InetAddress connecting, String keyspace, Collection<Range<Token>> ranges) {
    return transferRanges(to, connecting, keyspace, ranges, new String[0]);
  }

  /**
   * Add transfer task to send data of specific {@code columnFamilies} under {@code keyspace} and
   * {@code ranges}.
   *
   * @param to endpoint address of receiver
   * @param connecting Actual connecting address of the endpoint
   * @param keyspace name of keyspace
   * @param ranges ranges to send
   * @param columnFamilies specific column families
   * @return this object for chaining
   */
  public StreamPlan transferRanges(
      InetAddress to,
      InetAddress connecting,
      String keyspace,
      Collection<Range<Token>> ranges,
      String... columnFamilies) {
    StreamSession session = coordinator.getOrCreateNextSession(to, connecting);
    session.addTransferRanges(
        keyspace, ranges, Arrays.asList(columnFamilies), flushBeforeTransfer, repairedAt);
    return this;
  }

  /**
   * Add transfer task to send given SSTable files.
   *
   * @param to endpoint address of receiver
   * @param sstableDetails sstables with file positions and estimated key count. this collection
   *     will be modified to remove those files that are successfully handed off
   * @return this object for chaining
   */
  public StreamPlan transferFiles(
      InetAddress to, Collection<StreamSession.SSTableStreamingSections> sstableDetails) {
    coordinator.transferFiles(to, sstableDetails);
    return this;
  }

  public StreamPlan listeners(StreamEventHandler handler, StreamEventHandler... handlers) {
    this.handlers.add(handler);
    if (handlers != null) Collections.addAll(this.handlers, handlers);
    return this;
  }

  /**
   * Set custom StreamConnectionFactory to be used for establishing connection
   *
   * @param factory StreamConnectionFactory to use
   * @return self
   */
  public StreamPlan connectionFactory(StreamConnectionFactory factory) {
    this.coordinator.setConnectionFactory(factory);
    return this;
  }

  /** @return true if this plan has no plan to execute */
  public boolean isEmpty() {
    return !coordinator.hasActiveSessions();
  }

  /**
   * Execute this {@link StreamPlan} asynchronously.
   *
   * @return Future {@link StreamState} that you can use to listen on progress of streaming.
   */
  public StreamResultFuture execute() {
    return StreamResultFuture.init(planId, description, handlers, coordinator);
  }

  /**
   * Set flushBeforeTransfer option. When it's true, will flush before streaming ranges. (Default:
   * true)
   *
   * @param flushBeforeTransfer set to true when the node should flush before transfer
   * @return this object for chaining
   */
  public StreamPlan flushBeforeTransfer(boolean flushBeforeTransfer) {
    this.flushBeforeTransfer = flushBeforeTransfer;
    return this;
  }
}
Ejemplo n.º 17
0
 private DropKeyspace(DataInputStream din) throws IOException {
   super(UUIDGen.makeType1UUID(din), UUIDGen.makeType1UUID(din));
   rm = RowMutation.serializer().deserialize(din);
   name = din.readUTF();
 }
Ejemplo n.º 18
0
 public UUID newSession() {
   return newSession(TimeUUIDType.instance.compose(ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes())));
 }
Ejemplo n.º 19
0
  protected void run(CQLQueryExecutor executor) throws IOException {
    if (session.getColumnFamilyType() == ColumnFamilyType.Super)
      throw new RuntimeException("Super columns are not implemented for CQL");

    if (values == null) values = generateValues();

    // Construct a query string once.
    if (cqlQuery == null) {
      StringBuilder query =
          new StringBuilder("UPDATE ").append(wrapInQuotesIfRequired("Standard1"));

      if (session.cqlVersion.startsWith("2"))
        query.append(" USING CONSISTENCY ").append(session.getConsistencyLevel().toString());

      query.append(" SET ");

      for (int i = 0; i < session.getColumnsPerKey(); i++) {
        if (i > 0) query.append(',');

        if (session.timeUUIDComparator) {
          if (session.cqlVersion.startsWith("3"))
            throw new UnsupportedOperationException("Cannot use UUIDs in column names with CQL3");

          query.append(wrapInQuotesIfRequired(UUIDGen.getTimeUUID().toString())).append(" = ?");
        } else {
          query.append(wrapInQuotesIfRequired("C" + i)).append(" = ?");
        }
      }

      query.append(" WHERE KEY=?");
      cqlQuery = query.toString();
    }

    List<String> queryParms = new ArrayList<String>();
    for (int i = 0; i < session.getColumnsPerKey(); i++) {
      // Column value
      queryParms.add(
          getUnQuotedCqlBlob(
              values.get(i % values.size()).array(), session.cqlVersion.startsWith("3")));
    }

    String key = String.format("%0" + session.getTotalKeysLength() + "d", index);
    queryParms.add(getUnQuotedCqlBlob(key, session.cqlVersion.startsWith("3")));

    TimerContext context = session.latency.time();

    boolean success = false;
    String exceptionMessage = null;

    for (int t = 0; t < session.getRetryTimes(); t++) {
      if (success) break;

      try {
        success = executor.execute(cqlQuery, queryParms);
      } catch (Exception e) {
        exceptionMessage = getExceptionMessage(e);
        success = false;
      }
    }

    if (!success) {
      error(
          String.format(
              "Operation [%d] retried %d times - error inserting key %s %s%n with query %s",
              index,
              session.getRetryTimes(),
              key,
              (exceptionMessage == null) ? "" : "(" + exceptionMessage + ")",
              cqlQuery));
    }

    session.operations.getAndIncrement();
    session.keys.getAndIncrement();
    context.stop();
  }