/** {@inheritDoc} */ @Override public void txEnd(GridCacheTx tx, boolean commit) throws GridException { init(); Session ses = tx.removeMeta(ATTR_SES); if (ses != null) { Transaction hTx = ses.getTransaction(); if (hTx != null) { try { if (commit) { ses.flush(); hTx.commit(); } else hTx.rollback(); if (log.isDebugEnabled()) log.debug("Transaction ended [xid=" + tx.xid() + ", commit=" + commit + ']'); } catch (HibernateException e) { throw new GridException( "Failed to end transaction [xid=" + tx.xid() + ", commit=" + commit + ']', e); } finally { ses.close(); } } } }
/** * Gets Hibernate session. * * @param tx Cache transaction. * @return Session. */ Session session(@Nullable GridCacheTx tx) { Session ses; if (tx != null) { ses = tx.meta(ATTR_SES); if (ses == null) { ses = sesFactory.openSession(); ses.beginTransaction(); // Store session in transaction metadata, so it can be accessed // for other operations on the same transaction. tx.addMeta(ATTR_SES, ses); if (log.isDebugEnabled()) log.debug("Hibernate session open [ses=" + ses + ", tx=" + tx.xid() + "]"); } } else { ses = sesFactory.openSession(); ses.beginTransaction(); } return ses; }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked", "RedundantTypeArguments"}) @Override public V load(@Nullable GridCacheTx tx, K key) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store load [key=" + key + ", tx=" + tx + ']'); Session ses = session(tx); try { GridCacheHibernateBlobStoreEntry entry = (GridCacheHibernateBlobStoreEntry) ses.get(GridCacheHibernateBlobStoreEntry.class, toBytes(key)); if (entry == null) return null; return fromBytes(entry.getValue()); } catch (HibernateException e) { rollback(ses, tx); throw new GridException("Failed to load value from cache store with key: " + key, e); } finally { end(ses, tx); } }
/** {@inheritDoc} */ @Override public void put(@Nullable GridCacheTx tx, K key, @Nullable V val) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store put [key=" + key + ", val=" + val + ", tx=" + tx + ']'); if (val == null) { remove(tx, key); return; } Session ses = session(tx); try { GridCacheHibernateBlobStoreEntry entry = new GridCacheHibernateBlobStoreEntry(toBytes(key), toBytes(val)); ses.saveOrUpdate(entry); } catch (HibernateException e) { rollback(ses, tx); throw new GridException( "Failed to put value to cache store [key=" + key + ", val" + val + "]", e); } finally { end(ses, tx); } }
/** * Ends hibernate session. * * @param ses Hibernate session. * @param tx Cache ongoing transaction. */ private void end(Session ses, GridCacheTx tx) { // Commit only if there is no cache transaction, // otherwise txEnd() will do all required work. if (tx == null) { Transaction hTx = ses.getTransaction(); if (hTx != null && hTx.isActive()) hTx.commit(); ses.close(); } }
private void refreshEndpointMap() { String keyspace = ConfigHelper.getOutputKeyspace(conf); try (Session session = CqlConfigHelper.getOutputCluster(ConfigHelper.getOutputInitialAddress(conf), conf) .connect(keyspace)) { rangeMap = new HashMap<>(); metadata = session.getCluster().getMetadata(); Set<TokenRange> ranges = metadata.getTokenRanges(); for (TokenRange range : ranges) { rangeMap.put(range, metadata.getReplicas(keyspace, range)); } } }
private DarkBotMCSpambot(MinecraftBotData data, String owner) throws AuthenticationException, UnsupportedProtocolException { super(data); synchronized (bots) { bots.add(this); } addOwner(owner); addBackend(new ChatBackend(this)); TaskManager taskManager = bot.getTaskManager(); taskManager.registerTask(new FallTask(bot)); taskManager.registerTask(new FollowTask(bot)); taskManager.registerTask(new DefendTask(bot)); taskManager.registerTask(new AttackTask(bot)); taskManager.registerTask(new HostileTask(bot)); taskManager.registerTask(new EatTask(bot)); commandManager.register(new AttackAllCommand(this)); commandManager.register(new AttackCommand(this)); commandManager.register(new CalcCommand(this)); commandManager.register(new ChatDelayCommand(this)); commandManager.register(new DropAllCommand(this)); commandManager.register(new DropCommand(this)); commandManager.register(new DropIdCommand(this)); commandManager.register(new EquipCommand(this)); commandManager.register(new FollowCommand(this)); commandManager.register(new InteractCommand(this)); commandManager.register(new OwnerCommand(this)); commandManager.register(new QuitCommand(this)); commandManager.register(new SayCommand(this)); commandManager.register(new SetWalkCommand(this)); commandManager.register(new SpamCommand(this)); commandManager.register(new StatusCommand(this)); commandManager.register(new StopCommand(this)); commandManager.register(new SwitchCommand(this)); commandManager.register(new ToolCommand(this)); commandManager.register(new WalkCommand(this)); connectionHandler = bot.getConnectionHandler(); Session session = bot.getSession(); System.out.println("[" + session.getUsername() + "] Done! (" + bots.size() + ")"); }
/** {@inheritDoc} */ @SuppressWarnings({"JpaQueryApiInspection", "JpaQlInspection"}) @Override public void remove(@Nullable GridCacheTx tx, K key) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store remove [key=" + key + ", tx=" + tx + ']'); Session ses = session(tx); try { Object obj = ses.get(GridCacheHibernateBlobStoreEntry.class, toBytes(key)); if (obj != null) ses.delete(obj); } catch (HibernateException e) { rollback(ses, tx); throw new GridException("Failed to remove value from cache store with key: " + key, e); } finally { end(ses, tx); } }
/** get prepared statement id from cache, otherwise prepare it from Cassandra server */ private PreparedStatement preparedStatement(Session client) { PreparedStatement statement = preparedStatements.get(client); if (statement == null) { PreparedStatement result; try { result = client.prepare(cql); } catch (NoHostAvailableException e) { throw new RuntimeException("failed to prepare cql query " + cql, e); } PreparedStatement previousId = preparedStatements.putIfAbsent(client, result); statement = previousId == null ? result : previousId; } return statement; }
CqlRecordWriter(Configuration conf) { this.conf = conf; this.queueSize = conf.getInt(ColumnFamilyOutputFormat.QUEUE_SIZE, 32 * FBUtilities.getAvailableProcessors()); batchThreshold = conf.getLong(ColumnFamilyOutputFormat.BATCH_THRESHOLD, 32); this.clients = new HashMap<>(); try { String keyspace = ConfigHelper.getOutputKeyspace(conf); try (Session client = CqlConfigHelper.getOutputCluster(ConfigHelper.getOutputInitialAddress(conf), conf) .connect(keyspace)) { ringCache = new NativeRingCache(conf); if (client != null) { TableMetadata tableMetadata = client .getCluster() .getMetadata() .getKeyspace(client.getLoggedKeyspace()) .getTable(ConfigHelper.getOutputColumnFamily(conf)); clusterColumns = tableMetadata.getClusteringColumns(); partitionKeyColumns = tableMetadata.getPartitionKey(); String cqlQuery = CqlConfigHelper.getOutputCql(conf).trim(); if (cqlQuery.toLowerCase().startsWith("insert")) throw new UnsupportedOperationException( "INSERT with CqlRecordWriter is not supported, please use UPDATE/DELETE statement"); cql = appendKeyWhereClauses(cqlQuery); } else { throw new IllegalArgumentException("Invalid configuration specified " + conf); } } } catch (Exception e) { throw new RuntimeException(e); } }
public String[] mobTimes() { long totalMOBMillis = 0; long topMOBMillis = 0; String topMOBClient = ""; for (Session S : CMLib.sessions().toArray()) { totalMOBMillis += S.getTotalMillis(); if (S.getTotalMillis() > topMOBMillis) { topMOBMillis = S.getTotalMillis(); MOB M = S.mob(); topMOBClient = (M == null) ? S.getAddress() : M.name(); } } return new String[] { CMLib.english().returnTime(totalMOBMillis, 0), CMLib.english().returnTime(topMOBMillis, 0), topMOBClient }; }
protected void closeInternal() { if (client != null) { client.close(); } }
/** Loops collecting cql binded variable values from the queue and sending to Cassandra */ public void run() { outer: while (run || !queue.isEmpty()) { List<ByteBuffer> bindVariables; try { bindVariables = queue.take(); } catch (InterruptedException e) { // re-check loop condition after interrupt continue; } ListIterator<InetAddress> iter = endpoints.listIterator(); while (true) { // send the mutation to the last-used endpoint. first time through, this will NPE // harmlessly. try { int i = 0; PreparedStatement statement = preparedStatement(client); while (bindVariables != null) { BoundStatement boundStatement = new BoundStatement(statement); for (int columnPosition = 0; columnPosition < bindVariables.size(); columnPosition++) { boundStatement.setBytesUnsafe(columnPosition, bindVariables.get(columnPosition)); } client.execute(boundStatement); i++; if (i >= batchThreshold) break; bindVariables = queue.poll(); } break; } catch (Exception e) { closeInternal(); if (!iter.hasNext()) { lastException = new IOException(e); break outer; } } // attempt to connect to a different endpoint try { InetAddress address = iter.next(); String host = address.getHostName(); client = CqlConfigHelper.getOutputCluster(host, conf).connect(); } catch (Exception e) { // If connection died due to Interrupt, just try connecting to the endpoint again. // There are too many ways for the Thread.interrupted() state to be cleared, so // we can't rely on that here. Until the java driver gives us a better way of knowing // that this exception came from an InterruptedException, this is the best solution. if (canRetryDriverConnection(e)) { iter.previous(); } closeInternal(); // Most exceptions mean something unexpected went wrong to that endpoint, so // we should try again to another. Other exceptions (auth or invalid request) are // fatal. if ((e instanceof AuthenticationException || e instanceof InvalidQueryException) || !iter.hasNext()) { lastException = new IOException(e); break outer; } } } } // close all our connections once we are done. closeInternal(); }