private Statement createDeleteStatementFor(String userId, String applicationId) {
    UUID appUuid = UUID.fromString(applicationId);
    UUID userUuid = UUID.fromString(userId);

    BatchStatement batch = new BatchStatement();

    Statement deleteFromAppFollowersTable =
        QueryBuilder.delete()
            .all()
            .from(Follow.TABLE_NAME_APP_FOLLOWERS)
            .where(eq(APP_ID, appUuid))
            .and(eq(USER_ID, userUuid));

    batch.add(deleteFromAppFollowersTable);

    Statement deleteFromUserFollowingsTable =
        QueryBuilder.delete()
            .all()
            .from(Follow.TABLE_NAME_USER_FOLLOWING)
            .where(eq(APP_ID, appUuid))
            .and(eq(USER_ID, userUuid));

    batch.add(deleteFromUserFollowingsTable);

    return batch;
  }
 @Override
 public void processBatch(Collection<T> tuples) {
   BatchStatement batchCommand = store.getBatchCommand();
   for (T tuple : tuples) {
     batchCommand.add(getUpdateStatement(tuple));
   }
 }
  private Statement createStatementToSaveFollowing(User user, Application app) {
    UUID userId = UUID.fromString(user.userId);
    UUID appId = UUID.fromString(app.applicationId);

    BatchStatement batch = new BatchStatement();

    Statement insertIntoAppFollowersTable =
        QueryBuilder.insertInto(Follow.TABLE_NAME_APP_FOLLOWERS)
            .value(APP_ID, appId)
            .value(USER_ID, userId)
            .value(APP_NAME, app.name)
            .value(USER_FIRST_NAME, user.firstName)
            .value(TIME_OF_FOLLOW, Instant.now().toEpochMilli());

    batch.add(insertIntoAppFollowersTable);

    Statement insertIntoUserFollowingsTable =
        QueryBuilder.insertInto(Follow.TABLE_NAME_USER_FOLLOWING)
            .value(APP_ID, appId)
            .value(USER_ID, userId)
            .value(APP_NAME, app.name)
            .value(USER_FIRST_NAME, user.firstName)
            .value(TIME_OF_FOLLOW, Instant.now().toEpochMilli());

    batch.add(insertIntoUserFollowingsTable);

    return batch;
  }
  @SuppressWarnings("unchecked")
  @Override
  public void multiPut(List<List<Object>> keys, List<T> values) {
    LOG.debug("Putting the following keys: {} with values: {}", keys, values);
    try {
      List<Statement> statements = new ArrayList<Statement>();

      // Retrieve the mapping statement for the key,val pair
      for (int i = 0; i < keys.size(); i++) {
        List<Object> key = keys.get(i);
        T val = values.get(i);
        Statement retrievedStatment = mapper.map(key, val);
        if (retrievedStatment
            instanceof BatchStatement) { // Allows for BatchStatements to be returned by the mapper.
          BatchStatement batchedStatment = (BatchStatement) retrievedStatment;
          statements.addAll(batchedStatment.getStatements());
        } else {
          statements.add(retrievedStatment);
        }
      }

      // Execute all the statements as a batch.
      BatchStatement batch = new BatchStatement(batchType);
      batch.addAll(statements);
      session.execute(batch);

      _mwrites.incrBy(statements.size());
    } catch (Exception e) {
      checkCassandraException(e);
      LOG.error("Exception {} caught.", e);
    }
  }
Exemplo n.º 5
0
  private void addToBatch(Metric metric) {
    batch.add(
        statement.bind(
            metric.getRollup() * metric.getPeriod(),
            Collections.singletonList(metric.getValue()),
            metric.getTenant(),
            metric.getRollup(),
            metric.getPeriod(),
            metric.getPath(),
            metric.getTimestamp()));

    // todo: interval via config?
    if (batch.size() >= batchSize
        || (lastFlushTimestamp < System.currentTimeMillis() / 1000L - 60)) {
      flush();
      lastFlushTimestamp = System.currentTimeMillis() / 1000L;
    }
  }
Exemplo n.º 6
0
  @Override
  public void run() {
    while (!shutdown) {
      Metric metric = metrics.poll();
      if (metric != null) {
        addToBatch(metric);
      } else {
        try {
          Thread.sleep(100);
        } catch (InterruptedException ignored) {
        }
      }
    }

    if (batch.size() > 0) {
      flush();
    }
  }
Exemplo n.º 7
0
  private void flush() {
    final int batchSize = batch.size();
    ResultSetFuture future = session.executeAsync(batch);
    Futures.addCallback(
        future,
        new FutureCallback<ResultSet>() {
          @Override
          public void onSuccess(ResultSet result) {
            bus.post(new StoreSuccessEvent(batchSize)).now();
          }

          @Override
          public void onFailure(Throwable t) {
            bus.post(new StoreErrorEvent(batchSize)).now();
            logger.error(t);
          }
        },
        executor);

    batch = new BatchStatement();
  }
  public static void main(String args[]) {
    String[] calles_28001 = {"Alcala", "Preciados", "Gran Via", "Princesa"};
    String[] calles_28002 = {"Castellana", "Goya", "Serrano", "Velazquez"};

    String addres = "Alcala";
    String cpCode = "28001";

    int index_28001 = 0;
    int index_28002 = 0;

    List<User> users = new ArrayList<User>();

    for (int i = 0; i < 2000; i++) {

      String id = (i + 1) + "";
      String email = "user" + id + "@void.com";
      String nombre = "nombre_" + id;
      String cp;
      String calle;
      if (i % 2 == 0) {
        cp = "28001";
        calle = calles_28001[index_28001];
        index_28001++;
        index_28001 = index_28001 % 4;
      } else {
        cp = "28002";
        calle = calles_28002[index_28002];
        index_28002++;
        index_28002 = index_28002 % 4;
      }

      User user = new User(id, email, nombre, cp, calle);
      users.add(user);
    }

    // conectar y crear column family
    Cluster cluster;
    String node = "127.0.0.1";

    cluster = Cluster.builder().addContactPoint(node).build();

    Session session;
    session = cluster.connect();

    session.execute("drop keyspace utad_cql");

    session.execute(
        "CREATE KEYSPACE utad_cql WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};");

    // Creo la tabla con la PK compuesta por CP, Calle, e Id Usuario, para
    // asegurarme que cada registro que inserto es único (lo discrimino por
    // id_usuario)
    session.execute(
        "CREATE TABLE utad_cql.usersByCPAddress (id_usuario int, cp int, nombre text, email text, calle text, primary key(cp, calle, id_usuario));");

    PreparedStatement ps1 =
        session.prepare(
            "Insert into utad_cql.usersByCPAddress(id_usuario, cp, nombre, email, calle) values (?, ?, ?, ?, ?)");

    BatchStatement batch = new BatchStatement();

    // Realizamos las inserciones en BD
    for (User user : users) {
      int id = Integer.parseInt(user.id);
      int cp = Integer.parseInt(user.cp);
      String nombre = user.nombre;
      String email = user.email;
      String calle = user.calle;

      batch.add(ps1.bind(id, cp, nombre, email, calle));
    }

    session.execute(batch);
    System.out.println("\nRegistros insertados en Cassandra");

    // Muestro el resultado correspondiente, pero comento los campos que no
    // se piden en el resultado, para confirmar, descomentandolos, que los
    // datos son los correctos. Limitamos la muestra resultado a 20 registros.
    StringBuilder sbQuery = new StringBuilder();
    sbQuery.append("select id_usuario, nombre, email ");
    // sbQuery.append(",cp, calle  ");
    sbQuery.append("from utad_cql.usersByCPAddress where calle='").append(addres);
    sbQuery.append("' and cp=").append(cpCode);
    sbQuery.append(" limit 20");

    ResultSet results = session.execute(sbQuery.toString());

    // Leemos los datos recuperados.
    for (Row row : results) {
      System.out.println("\nid : " + row.getInt("id_usuario"));
      System.out.println("nombre : " + row.getString("nombre"));
      System.out.println("email : " + row.getString("email"));
      //			System.out.println("cp : " + row.getInt("cp"));
      //			System.out.println("calle : " + row.getString("calle"));
    }

    cluster.close();
    System.out.println("\nFin de la ejecución");
  }