public BoundStatementWrapper bindForSimpleCounterIncrementDecrement(
      PersistentStateHolder context,
      PreparedStatement ps,
      PropertyMeta pm,
      Long increment,
      ConsistencyLevel consistencyLevel) {

    EntityMeta entityMeta = context.getEntityMeta();
    Object primaryKey = context.getPrimaryKey();

    log.trace(
        "Bind prepared statement {} for simple counter increment of {} using primary key {} and value {}",
        ps.getQueryString(),
        pm,
        primaryKey,
        increment);
    Object[] boundValues =
        ArrayUtils.add(
            extractValuesForSimpleCounterBinding(entityMeta, pm, primaryKey), 0, increment);

    BoundStatement bs = ps.bind(boundValues);
    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        boundValues,
        getCQLLevel(consistencyLevel),
        NO_LISTENER,
        NO_SERIAL_CONSISTENCY);
  }
  public BoundStatementWrapper bindForClusteredCounterIncrementDecrement(
      PersistentStateHolder context,
      PreparedStatement ps,
      PropertyMeta counterMeta,
      Long increment) {

    EntityMeta entityMeta = context.getEntityMeta();
    Object primaryKey = context.getPrimaryKey();

    log.trace(
        "Bind prepared statement {} for clustered counter increment/decrement for {} using primary key {} and value {}",
        ps.getQueryString(),
        entityMeta,
        primaryKey,
        increment);

    ConsistencyLevel consistencyLevel = overrider.getWriteLevel(context);

    List<Object> primaryKeys =
        bindPrimaryKey(
            primaryKey, entityMeta.getIdMeta(), counterMeta.structure().isStaticColumn());
    Object[] keys = addAll(new Object[] {increment}, primaryKeys.toArray());

    BoundStatement bs = ps.bind(keys);

    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        keys,
        getCQLLevel(consistencyLevel),
        NO_LISTENER,
        NO_SERIAL_CONSISTENCY);
  }
  public BoundStatementWrapper bindForClusteredCounterSelect(
      PersistentStateHolder context,
      PreparedStatement ps,
      boolean onlyStaticColumns,
      ConsistencyLevel consistencyLevel) {
    EntityMeta entityMeta = context.getEntityMeta();
    Object primaryKey = context.getPrimaryKey();

    log.trace(
        "Bind prepared statement {} for clustered counter read for {} using primary key {}",
        ps.getQueryString(),
        entityMeta,
        primaryKey);

    List<Object> primaryKeys =
        bindPrimaryKey(primaryKey, entityMeta.getIdMeta(), onlyStaticColumns);
    Object[] boundValues = primaryKeys.toArray();

    BoundStatement bs = ps.bind(boundValues);
    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        boundValues,
        getCQLLevel(consistencyLevel),
        NO_LISTENER,
        NO_SERIAL_CONSISTENCY);
  }
Пример #4
0
  public Optional<Auction> getAuction(String auctionName) {
    BoundStatement auctionBoundStatement = getAuction.bind(auctionName);
    Row auction = session.execute(auctionBoundStatement).one();

    LOGGER.debug("Getting auction information for auction {} rows {}", auctionName, auction);

    BoundStatement bidsBound = getAuctionBids.bind(auctionName);
    List<BidVo> bids =
        session
            .execute(bidsBound)
            .all()
            .stream()
            .map(
                row ->
                    new BidVo(
                        row.getString("bid_user"),
                        row.getLong("bid_amount"),
                        UUIDs.unixTimestamp(row.getUUID("bid_time"))))
            .collect(Collectors.toList());

    return Optional.of(
        new Auction(
            auction.getString("name"),
            Instant.ofEpochMilli(auction.getLong("ends")),
            bids,
            auction.getString("owner")));
  }
  public BoundStatementWrapper bindForUpdate(
      PersistentStateHolder context, PreparedStatement ps, List<PropertyMeta> pms) {
    EntityMeta entityMeta = context.getEntityMeta();
    Object entity = context.getEntity();

    log.trace(
        "Bind prepared statement {} for properties {} update of entity {}",
        ps.getQueryString(),
        pms,
        entity);

    ConsistencyLevel consistencyLevel = overrider.getWriteLevel(context);

    List<Object> values = new ArrayList<>();

    final int staticColumnsCount =
        FluentIterable.from(pms).filter(PropertyMeta.STATIC_COLUMN_FILTER).size();
    final boolean onlyStaticColumns = staticColumnsCount > 0 && pms.size() == staticColumnsCount;

    values.addAll(fetchTTLAndTimestampValues(context));
    values.addAll(fetchPropertiesValues(pms, entity));
    values.addAll(fetchPrimaryKeyValues(entityMeta, entity, onlyStaticColumns));
    values.addAll(fetchCASConditionsValues(context, entityMeta));
    BoundStatement bs = ps.bind(values.toArray());

    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        values.toArray(),
        getCQLLevel(consistencyLevel),
        context.getCASResultListener(),
        context.getSerialConsistencyLevel());
  }
Пример #6
0
  public void execute(Tuple tuple) {
    LOG.info("Story found");

    String tweet = tuple.getString(0);
    outputCollector.ack(tuple);
    PreparedStatement preparedStatement =
        session.prepare("INSERT INTO tweets (seen, tweet) VALUES (?, ?)");
    session.execute(preparedStatement.bind(new Timestamp(System.currentTimeMillis()), tweet));
  }
  @Override
  public Company getCompany(final String firstName, final String lastName) {
    final String cql = "select * from company where first_name = ? and last_name = ?";
    final PreparedStatement cachedPreparedStatement = getCachedPreparedStatement(cql);
    final BoundStatement statement = cachedPreparedStatement.bind(firstName, lastName);

    final ResultSet resultSet = cassandraOperations.getSession().execute(statement);
    final Row row = resultSet.one();
    return toCompany(row);
  }
  private PreparedStatement getCachedPreparedStatement(final String cql) {
    final CachedPreparedStatementCreator cachedPreparedStatementCreator =
        new CachedPreparedStatementCreator(cql);
    final PreparedStatement preparedStatement =
        cachedPreparedStatementCreator.createPreparedStatement(cassandraTemplate.getSession());

    preparedStatement.setConsistencyLevel(ConsistencyLevel.ONE);

    return preparedStatement;
  }
  /*
   * (non-Javadoc)
   *
   * @see dal.SchoolDAL#delete(models.School)
   */
  @Override
  public int delete(School school) {

    final PreparedStatement statement = session.prepare("DELETE FROM school WHERE school_id = ?");
    final BoundStatement boundStatement = statement.bind(school.getSchoolId());

    try {
      session.executeAsync(boundStatement);
    } catch (Exception e) {
      return 0;
    }

    return 1;
  }
  /*
   * (non-Javadoc)
   *
   * @see dal.SchoolDAL#getById(java.lang.String)
   */
  @Override
  public School getById(String id) {

    final PreparedStatement statement = session.prepare("SELECT * FROM school WHERE school_id = ?");
    final BoundStatement boundedStatement = statement.bind(id);

    final ResultSetFuture schoolFuture = session.executeAsync(boundedStatement);
    final Row schoolRow = schoolFuture.getUninterruptibly().one();

    School school;
    if (schoolRow != null) {
      school = createSchoolForRow(schoolRow);
    } else {
      school = null;
    }

    return school;
  }
Пример #11
0
  public static void waitForSchemaVersionsToCoalesce(
      String encapsulatingOperationDescription, CQLKeyValueService kvs) {
    PreparedStatement peerInfoQuery =
        kvs.getPreparedStatement(
            CassandraConstants.NO_TABLE,
            "select peer, schema_version from system.peers;",
            kvs.session);
    peerInfoQuery.setConsistencyLevel(ConsistencyLevel.ALL);

    Multimap<UUID, InetAddress> peerInfo = ArrayListMultimap.create();
    long start = System.currentTimeMillis();
    long sleepTime = 100;
    do {
      peerInfo.clear();
      for (Row row : kvs.session.execute(peerInfoQuery.bind()).all()) {
        peerInfo.put(row.getUUID("schema_version"), row.getInet("peer"));
      }

      if (peerInfo.keySet().size() <= 1) { // full schema agreement
        return;
      }
      sleepTime = Math.min(sleepTime * 2, 5000);
    } while (System.currentTimeMillis()
        < start + CassandraConstants.SECONDS_WAIT_FOR_VERSIONS * 1000);

    StringBuilder sb = new StringBuilder();
    sb.append(
        String.format(
            "Cassandra cluster cannot come to agreement on schema versions, during operation: %s.",
            encapsulatingOperationDescription));

    for (Entry<UUID, Collection<InetAddress>> versionToPeer : peerInfo.asMap().entrySet()) {
      sb.append(String.format("\nAt schema version %s:", versionToPeer.getKey()));
      for (InetAddress peer : versionToPeer.getValue()) {
        sb.append(String.format("\n\tNode: %s", peer));
      }
    }
    sb.append(
        "\nFind the nodes above that diverge from the majority schema "
            + "(or have schema 'UNKNOWN', which likely means they are down/unresponsive) "
            + "and examine their logs to determine the issue. Fixing the underlying issue and restarting Cassandra "
            + "should resolve the problem. You can quick-check this with 'nodetool describecluster'.");
    throw new IllegalStateException(sb.toString());
  }
Пример #12
0
  private void migrate(Set<Integer> scheduleIds, PreparedStatement query, final Bucket bucket) {
    log.info("Migrating " + bucket + " data for " + scheduleIds.size() + " schedules");

    CountDownLatch latch = new CountDownLatch(scheduleIds.size());
    MigrationProgressLogger progressLogger = new MigrationProgressLogger(bucket, latch);
    File logFile = new File(dataDir, bucket + "_migration.log");
    MigrationLog migrationLog = null;
    try {
      migrationLog = new MigrationLog(logFile);
      Set<Integer> migratedScheduleIds = migrationLog.read();
      threadPool.submit(progressLogger);
      for (Integer scheduleId : scheduleIds) {
        if (migratedScheduleIds.contains(scheduleId)) {
          log.debug(
              bucket
                  + " data for schedule id "
                  + scheduleId
                  + " has already been migrated. It will "
                  + "be skipped.");
          latch.countDown();
        } else {
          readPermits.acquire();
          ResultSet resultSet = session.execute(query.bind(scheduleId));
          ListenableFuture<Integer> migrationFuture =
              threadPool.submit(new MetricsWriter(scheduleId, bucket, resultSet));
          Futures.addCallback(
              migrationFuture, migrationFinished(scheduleId, bucket, latch, migrationLog));
        }
      }
      latch.await();
      log.info("Finished migrating " + bucket + " data");
    } catch (InterruptedException e) {
      threadPool.shutdownNow();
      throw new RuntimeException(
          "Migration of "
              + bucket
              + " data did not complete due to an interrupt. The "
              + "upgrade will have to be run again to finish the migration",
          e);
    } catch (IOException e) {
      throw new RuntimeException(
          "Migration of "
              + bucket
              + " data did not complete due to an I/O error. The "
              + "upgrade will have to be run again to finish the migration",
          e);
    } finally {
      progressLogger.finished();
      try {
        migrationLog.close();
      } catch (IOException e) {
        log.warn("There was an error closing " + logFile.getAbsolutePath(), e);
      }
    }
  }
Пример #13
0
  private void insert(Session session) {
    try {
      PreparedStatement pstmt =
          session.prepare(
              "insert into sens_by_day(s_id, s_date, s_time, s_val)  values(?, ?, ?, ?);");
      BoundStatement boundStatement = new BoundStatement(pstmt);

      // Date型変換
      SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ssZ");
      Date formatDate = sdf.parse("2016-07-25 10:00:00JST");

      // Bind
      boundStatement.bind("s001.home", "2016-07-25", formatDate, "28.24");
      System.out.println("cql=" + pstmt.getQueryString());

      // Insert one record into the users table
      session.execute(boundStatement);
    } catch (ParseException e) {
      e.printStackTrace();
    }
  }
Пример #14
0
  public static Set<Peer> getPeers(Session session) {
    PreparedStatement selectPeerInfo =
        session.prepare(
            "select peer, data_center, rack, release_version, rpc_address, schema_version, tokens from system.peers;");

    Set<Peer> peers = Sets.newHashSet();

    for (Row row : session.execute(selectPeerInfo.bind()).all()) {
      Peer peer = new Peer();
      peer.peer = row.getInet("peer");
      peer.data_center = row.getString("data_center");
      peer.rack = row.getString("rack");
      peer.release_version = row.getString("release_version");
      peer.rpc_address = row.getInet("rpc_address");
      peer.schema_version = row.getUUID("schema_version");
      peer.tokens = row.getSet("tokens", String.class);
      peers.add(peer);
    }

    return peers;
  }
  /*
   * (non-Javadoc)
   *
   * @see dal.SchoolDAL#save(models.School)
   */
  @Override
  public School save(School school) {

    final PreparedStatement statement =
        session.prepare(
            "INSERT INTO student_info.school (school_id,name,address,email,gender_type) "
                + "VALUES (?,?,?,?,?)");
    final BoundStatement boundStatement =
        statement.bind(
            school.getSchoolId(),
            school.getName(),
            school.getAddress(),
            school.getEmail(),
            school.getGenderType());

    final ResultSetFuture savedSchoolFuture = session.executeAsync(boundStatement);

    // final Row response = savedSchoolFuture.getUninterruptibly().one();

    return school;
  }
  public BoundStatementWrapper bindStatementWithOnlyPKInWhereClause(
      PersistentStateHolder context,
      PreparedStatement ps,
      boolean onlyStaticColumns,
      ConsistencyLevel consistencyLevel) {

    Object primaryKey = context.getPrimaryKey();

    log.trace("Bind prepared statement {} with primary key {}", ps.getQueryString(), primaryKey);

    PropertyMeta idMeta = context.getIdMeta();
    List<Object> values = bindPrimaryKey(primaryKey, idMeta, onlyStaticColumns);

    BoundStatement bs = ps.bind(values.toArray());
    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        values.toArray(),
        getCQLLevel(consistencyLevel),
        context.getCASResultListener(),
        context.getSerialConsistencyLevel());
  }
Пример #17
0
 public List<Auction> getAllAuctionsSparse() {
   BoundStatement bound = getAllAuctionSparse.bind();
   return session
       .execute(bound)
       .all()
       .stream()
       .map(
           row ->
               new Auction(
                   row.getString("name"),
                   row.getString("owner"),
                   Instant.ofEpochMilli(row.getLong("ends"))))
       .collect(Collectors.toList());
 }
  public BoundStatementWrapper bindForSimpleCounterDelete(
      PersistentStateHolder context, PreparedStatement ps, PropertyMeta pm) {
    EntityMeta entityMeta = context.getEntityMeta();
    Object primaryKey = context.getPrimaryKey();

    log.trace(
        "Bind prepared statement {} for simple counter delete for {} using primary key {}",
        ps.getQueryString(),
        pm,
        primaryKey);

    ConsistencyLevel consistencyLevel = overrider.getWriteLevel(context);

    Object[] boundValues = extractValuesForSimpleCounterBinding(entityMeta, pm, primaryKey);
    BoundStatement bs = ps.bind(boundValues);
    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        boundValues,
        getCQLLevel(consistencyLevel),
        NO_LISTENER,
        NO_SERIAL_CONSISTENCY);
  }
Пример #19
0
    @Override
    public void indexTimeseriesId(
        final LabelId metric, final List<LabelId> tags, final ByteBuffer timeSeriresId) {
      final long longMetric = toLong(metric);
      final Map<Long, Long> longTags = toMap(tags);

      session.executeAsync(
          insertTagsStatement
              .bind()
              .setLong(0, longMetric)
              .setString(1, LabelType.METRIC.toValue())
              .setBytesUnsafe(2, timeSeriresId)
              .setLong(3, longMetric)
              .setMap(4, longTags));

      final Iterator<LabelId> tagIterator = tags.iterator();

      while (tagIterator.hasNext()) {
        session.executeAsync(
            insertTagsStatement
                .bind()
                .setLong(0, toLong(tagIterator.next()))
                .setString(1, LabelType.TAGK.toValue())
                .setBytesUnsafe(2, timeSeriresId)
                .setLong(3, longMetric)
                .setMap(4, longTags));

        session.executeAsync(
            insertTagsStatement
                .bind()
                .setLong(0, toLong(tagIterator.next()))
                .setString(1, LabelType.TAGV.toValue())
                .setBytesUnsafe(2, timeSeriresId)
                .setLong(3, longMetric)
                .setMap(4, longTags));
      }
    }
  public BoundStatementWrapper bindForClusteredCounterDelete(
      PersistentStateHolder context, PreparedStatement ps) {
    EntityMeta entityMeta = context.getEntityMeta();
    Object primaryKey = context.getPrimaryKey();

    log.trace(
        "Bind prepared statement {} for simple counter delete for {} using primary key {}",
        ps.getQueryString(),
        entityMeta,
        primaryKey);

    ConsistencyLevel consistencyLevel = overrider.getWriteLevel(context);
    List<Object> primaryKeys = bindPrimaryKey(primaryKey, entityMeta.getIdMeta(), false);
    Object[] boundValues = primaryKeys.toArray(new Object[primaryKeys.size()]);
    BoundStatement bs = ps.bind(boundValues);

    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        boundValues,
        getCQLLevel(consistencyLevel),
        NO_LISTENER,
        NO_SERIAL_CONSISTENCY);
  }
  public BoundStatementWrapper bindForInsert(
      PersistentStateHolder context, PreparedStatement ps, List<PropertyMeta> pms) {

    EntityMeta entityMeta = context.getEntityMeta();
    Object entity = context.getEntity();

    log.trace("Bind prepared statement {} for insert of entity {}", ps.getQueryString(), entity);

    ConsistencyLevel consistencyLevel = overrider.getWriteLevel(context);

    List<Object> values = new ArrayList<>();
    values.addAll(fetchPrimaryKeyValues(entityMeta, entity, false));
    values.addAll(fetchPropertiesValues(pms, entity));
    values.addAll(fetchTTLAndTimestampValues(context));

    BoundStatement bs = ps.bind(values.toArray());
    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        values.toArray(),
        getCQLLevel(consistencyLevel),
        context.getCASResultListener(),
        context.getSerialConsistencyLevel());
  }
 public List<ProjectRegionMilestone> findAll() {
   List<ProjectRegionMilestone> projectRegionMilestones = new ArrayList<>();
   BoundStatement stmt = findAllStmt.bind();
   session
       .execute(stmt)
       .all()
       .stream()
       .map(
           row -> {
             ProjectRegionMilestone projectRegionMilestone = new ProjectRegionMilestone();
             projectRegionMilestone.setId(row.getUUID("id"));
             projectRegionMilestone.setPlanned(row.getDate("planned"));
             projectRegionMilestone.setActual(row.getDate("actual"));
             return projectRegionMilestone;
           })
       .forEach(projectRegionMilestones::add);
   return projectRegionMilestones;
 }
  public BoundStatementWrapper bindForCollectionAndMapUpdate(
      PersistentStateHolder context, PreparedStatement ps, DirtyCheckChangeSet changeSet) {
    EntityMeta entityMeta = context.getEntityMeta();
    Object entity = context.getEntity();

    log.trace(
        "Bind prepared statement {} for collection/map update of entity {}",
        ps.getQueryString(),
        entity);

    ConsistencyLevel consistencyLevel = overrider.getWriteLevel(context);

    List<Object> values = new ArrayList<>();
    final CollectionAndMapChangeType changeType = changeSet.getChangeType();

    values.addAll(fetchTTLAndTimestampValues(context));

    switch (changeType) {
      case ASSIGN_VALUE_TO_LIST:
        values.add(changeSet.getEncodedListChanges());
        break;
      case ASSIGN_VALUE_TO_SET:
        values.add(changeSet.getEncodedSetChanges());
        break;
      case ASSIGN_VALUE_TO_MAP:
        values.add(changeSet.getEncodedMapChanges());
        break;
      case REMOVE_COLLECTION_OR_MAP:
        values.add(null);
        break;
      case ADD_TO_SET:
      case REMOVE_FROM_SET:
        values.add(changeSet.getEncodedSetChanges());
        break;
      case APPEND_TO_LIST:
      case PREPEND_TO_LIST:
      case REMOVE_FROM_LIST:
        values.add(changeSet.getEncodedListChanges());
        break;
      case SET_TO_LIST_AT_INDEX:
        // No prepared statement for set list element at index
        throw new IllegalStateException("Cannot bind statement to set element at index for list");
      case REMOVE_FROM_LIST_AT_INDEX:
        // No prepared statement for set list element at index
        throw new IllegalStateException(
            "Cannot bind statement to remove element at index for list");
      case ADD_TO_MAP:
        values.add(changeSet.getEncodedMapChanges());
        break;
      case REMOVE_FROM_MAP:
        values.add(changeSet.getEncodedMapChanges().keySet().iterator().next());
        values.add(null);
        break;
    }

    values.addAll(
        fetchPrimaryKeyValues(
            entityMeta, entity, changeSet.getPropertyMeta().structure().isStaticColumn()));
    values.addAll(fetchCASConditionsValues(context, entityMeta));
    BoundStatement bs = ps.bind(values.toArray());

    return new BoundStatementWrapper(
        context.getEntityClass(),
        bs,
        values.toArray(),
        getCQLLevel(consistencyLevel),
        context.getCASResultListener(),
        context.getSerialConsistencyLevel());
  }
Пример #24
0
 public UUID placeBid(String auctionName, String user, Long amount) {
   UUID uuid = UUIDs.timeBased();
   BoundStatement bound = storeBid.bind(auctionName, uuid, amount, user);
   session.execute(bound);
   return uuid;
 }
  public static void main(String args[]) {
    String[] calles_28001 = {"Alcala", "Preciados", "Gran Via", "Princesa"};
    String[] calles_28002 = {"Castellana", "Goya", "Serrano", "Velazquez"};

    String addres = "Alcala";
    String cpCode = "28001";

    int index_28001 = 0;
    int index_28002 = 0;

    List<User> users = new ArrayList<User>();

    for (int i = 0; i < 2000; i++) {

      String id = (i + 1) + "";
      String email = "user" + id + "@void.com";
      String nombre = "nombre_" + id;
      String cp;
      String calle;
      if (i % 2 == 0) {
        cp = "28001";
        calle = calles_28001[index_28001];
        index_28001++;
        index_28001 = index_28001 % 4;
      } else {
        cp = "28002";
        calle = calles_28002[index_28002];
        index_28002++;
        index_28002 = index_28002 % 4;
      }

      User user = new User(id, email, nombre, cp, calle);
      users.add(user);
    }

    // conectar y crear column family
    Cluster cluster;
    String node = "127.0.0.1";

    cluster = Cluster.builder().addContactPoint(node).build();

    Session session;
    session = cluster.connect();

    session.execute("drop keyspace utad_cql");

    session.execute(
        "CREATE KEYSPACE utad_cql WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};");

    // Creo la tabla con la PK compuesta por CP, Calle, e Id Usuario, para
    // asegurarme que cada registro que inserto es único (lo discrimino por
    // id_usuario)
    session.execute(
        "CREATE TABLE utad_cql.usersByCPAddress (id_usuario int, cp int, nombre text, email text, calle text, primary key(cp, calle, id_usuario));");

    PreparedStatement ps1 =
        session.prepare(
            "Insert into utad_cql.usersByCPAddress(id_usuario, cp, nombre, email, calle) values (?, ?, ?, ?, ?)");

    BatchStatement batch = new BatchStatement();

    // Realizamos las inserciones en BD
    for (User user : users) {
      int id = Integer.parseInt(user.id);
      int cp = Integer.parseInt(user.cp);
      String nombre = user.nombre;
      String email = user.email;
      String calle = user.calle;

      batch.add(ps1.bind(id, cp, nombre, email, calle));
    }

    session.execute(batch);
    System.out.println("\nRegistros insertados en Cassandra");

    // Muestro el resultado correspondiente, pero comento los campos que no
    // se piden en el resultado, para confirmar, descomentandolos, que los
    // datos son los correctos. Limitamos la muestra resultado a 20 registros.
    StringBuilder sbQuery = new StringBuilder();
    sbQuery.append("select id_usuario, nombre, email ");
    // sbQuery.append(",cp, calle  ");
    sbQuery.append("from utad_cql.usersByCPAddress where calle='").append(addres);
    sbQuery.append("' and cp=").append(cpCode);
    sbQuery.append(" limit 20");

    ResultSet results = session.execute(sbQuery.toString());

    // Leemos los datos recuperados.
    for (Row row : results) {
      System.out.println("\nid : " + row.getInt("id_usuario"));
      System.out.println("nombre : " + row.getString("nombre"));
      System.out.println("email : " + row.getString("email"));
      //			System.out.println("cp : " + row.getInt("cp"));
      //			System.out.println("calle : " + row.getString("calle"));
    }

    cluster.close();
    System.out.println("\nFin de la ejecución");
  }
 public void deleteAll() {
   BoundStatement stmt = truncateStmt.bind();
   session.execute(stmt);
 }
Пример #27
0
 public void createAuction(Auction auction) {
   BoundStatement bound =
       createAuction.bind(auction.getName(), auction.getOwner(), auction.getEnds().toEpochMilli());
   session.execute(bound);
 }