public void prepare(String query, InetAddress toExclude) throws InterruptedException { for (Map.Entry<Host, HostConnectionPool> entry : pools.entrySet()) { if (entry.getKey().getAddress().equals(toExclude)) continue; // Let's not wait too long if we can't get a connection. Things // will fix themselves once the user tries a query anyway. Connection c = null; try { c = entry.getValue().borrowConnection(200, TimeUnit.MILLISECONDS); c.write(new PrepareMessage(query)).get(); } catch (ConnectionException e) { // Again, not being able to prepare the query right now is no big deal, so just ignore } catch (BusyConnectionException e) { // Same as above } catch (TimeoutException e) { // Same as above } catch (ExecutionException e) { // We shouldn't really get exception while preparing a // query, so log this (but ignore otherwise as it's not a big deal) logger.error( String.format( "Unexpected error while preparing query (%s) on %s", query, entry.getKey()), e); } finally { if (c != null) entry.getValue().returnConnection(c); } } }
public void sendResponse(ClientEndpoint endpoint, Object response) { if (response instanceof Throwable) { response = ClientExceptionConverters.get(endpoint.getClientType()).convert((Throwable) response); } final Data resultData = response != null ? serializationService.toData(response) : NULL; Connection conn = endpoint.getConnection(); conn.write(new DataAdapter(resultData, serializationService.getSerializationContext())); }
void bind(final ClientEndpoint endpoint) { final Connection conn = endpoint.getConnection(); if (conn instanceof TcpIpConnection) { Address address = new Address(conn.getRemoteSocketAddress()); TcpIpConnectionManager connectionManager = (TcpIpConnectionManager) node.getConnectionManager(); connectionManager.bind((TcpIpConnection) conn, address, null, false); } sendClientEvent(endpoint); }
private boolean discardAvailableConnections(long timeout, TimeUnit unit) throws InterruptedException { long start = System.nanoTime(); boolean success = true; for (Connection connection : connections) { success &= connection.close(timeout - Cluster.timeSince(start, unit), unit); open.decrementAndGet(); } return success; }
public void connectionRemoved(Connection connection) { if (connection.isClient() && connection instanceof TcpIpConnection && nodeEngine.isActive()) { final ClientEndpoint endpoint = endpoints.get(connection); if (endpoint != null && node.getLocalMember().getUuid().equals(endpoint.getPrincipal().getOwnerUuid())) { removeEndpoint(connection, true); if (!endpoint.isFirstConnection()) { return; } NodeEngine nodeEngine = node.nodeEngine; final Collection<MemberImpl> memberList = nodeEngine.getClusterService().getMemberList(); for (MemberImpl member : memberList) { final ClientDisconnectionOperation op = new ClientDisconnectionOperation(endpoint.getUuid()); op.setNodeEngine(nodeEngine) .setServiceName(SERVICE_NAME) .setService(this) .setResponseHandler(ResponseHandlerFactory.createEmptyResponseHandler()); if (member.localMember()) { nodeEngine.getOperationService().runOperation(op); } else { nodeEngine.getOperationService().send(op, member.getAddress()); } } } } }
public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { if (isShutdown.get()) // Note: throwing a ConnectionException is probably fine in practice as it will trigger the // creation of a new host. // That being said, maybe having a specific exception could be cleaner. throw new ConnectionException(host.getAddress(), "Pool is shutdown"); if (connections.isEmpty()) { for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) { // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to // protect against creating connection in excess of core too quickly scheduledForCreation.incrementAndGet(); manager.executor().submit(newConnectionTask); } Connection c = waitForConnection(timeout, unit); c.setKeyspace(manager.poolsState.keyspace); return c; } int minInFlight = Integer.MAX_VALUE; Connection leastBusy = null; for (Connection connection : connections) { int inFlight = connection.inFlight.get(); if (inFlight < minInFlight) { minInFlight = inFlight; leastBusy = connection; } } if (minInFlight >= options().getMaxSimultaneousRequestsPerConnectionThreshold(hostDistance) && connections.size() < options().getMaxConnectionsPerHost(hostDistance)) maybeSpawnNewConnection(); while (true) { int inFlight = leastBusy.inFlight.get(); if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) { leastBusy = waitForConnection(timeout, unit); break; } if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) break; } leastBusy.setKeyspace(manager.poolsState.keyspace); return leastBusy; }
/** * Remove RabbitReconnector if it's not null (to prevent reconnect). Close connection if it's not * null. Swallow IOExceptions. * * @param conn * @param reconnector */ public static void closeConnectionAndRemoveReconnector( Connection conn, RabbitReconnector reconnector) { if (conn == null) { return; } if (reconnector != null) { conn.removeShutdownListener(reconnector); } try { conn.close(); } catch (IOException e) { e.printStackTrace(); // throw new RuntimeException(e); } }
private Connection _newConnection(int numThreads) throws IOException { ConnectionFactory connFactory = new ConnectionFactory(); ThreadFactory threadFactory = DaemonThreadFactory.getInstance("RabbitMQ-ConsumerThread", true); final ExecutorService executor = Executors.newFixedThreadPool(numThreads, threadFactory); Address[] array = addresses.toArray(new Address[0]); Connection conn = connFactory.newConnection(executor, array); conn.addShutdownListener( new ShutdownListener() { @Override public void shutdownCompleted(ShutdownSignalException sse) { executor.shutdown(); } }); return conn; }
public void shutdown() { for (ClientEndpoint endpoint : endpoints.values()) { try { endpoint.destroy(); } catch (LoginException e) { logger.finest(e.getMessage()); } try { final Connection conn = endpoint.getConnection(); if (conn.live()) { conn.close(); } } catch (Exception e) { logger.finest(e); } } endpoints.clear(); }
public void returnConnection(Connection connection) { int inFlight = connection.inFlight.decrementAndGet(); if (connection.isDefunct()) { if (host.getMonitor().signalConnectionFailure(connection.lastException())) shutdown(); else replace(connection); } else { if (trash.contains(connection) && inFlight == 0) { if (trash.remove(connection)) close(connection); return; } if (connections.size() > options().getCoreConnectionsPerHost(hostDistance) && inFlight <= options().getMinSimultaneousRequestsPerConnectionThreshold(hostDistance)) { trashConnection(connection); } else { signalAvailableConnection(); } } }
private void destroyEndpoint(ClientEndpoint endpoint, boolean closeImmediately) { if (endpoint != null) { logger.info("Destroying " + endpoint); try { endpoint.destroy(); } catch (LoginException e) { logger.warning(e); } final Connection connection = endpoint.getConnection(); if (closeImmediately) { try { connection.close(); } catch (Throwable e) { logger.warning("While closing client connection: " + connection, e); } } else { nodeEngine .getExecutionService() .schedule( new Runnable() { public void run() { if (connection.live()) { try { connection.close(); } catch (Throwable e) { logger.warning("While closing client connection: " + e.toString()); } } } }, 1111, TimeUnit.MILLISECONDS); } sendClientEvent(endpoint); } }
public ClientEndpoint createNew(Connection conn) { return new ClientEndpoint( ClientEngineImpl.this, conn, UuidUtil.createClientUuid(conn.getEndPoint())); }
public void loadTables(String tableNames, String procNames) throws SQLException, IOException, InterruptedException, ExecutionException { String[] tableNameArray = tableNames != null && !"".equals(tableNames) ? tableNames.split(",") : null; String[] procNameArray = procNames != null && !"".equals(procNames) ? procNames.split(",") : null; ExecutorService executor = Executors.newFixedThreadPool(tableNameArray.length * 3); CompletionService completion = new ExecutorCompletionService(executor); for (int j = 0; j < tableNameArray.length && tableNameArray != null; j++) { String tableName = tableNameArray[j]; String procName = procNameArray != null ? procNameArray[j] : ""; // if procName not provided, use the default VoltDB TABLENAME.insert procedure if (procName.length() == 0) { if (tableName.contains("..")) { procName = tableName.split("\\.\\.")[1].toUpperCase() + ".insert"; } else { procName = tableName.toUpperCase() + ".insert"; } } // query the table String jdbcSelect = "SELECT * FROM " + tableName + ";"; // create query to find count String countquery = jdbcSelect.replace("*", "COUNT(*)"); int pages = 1; String base = ""; if (config.srisvoltdb) { if (config.isPaginated) { try { // find count if (countquery.contains("<") || countquery.contains(">")) { int bracketOpen = countquery.indexOf("<"); int bracketClose = countquery.indexOf(">"); String orderCol = countquery.substring(bracketOpen + 1, bracketClose); countquery = countquery.replace("<" + orderCol + ">", ""); } VoltTable vcount = client.callProcedure("@AdHoc", countquery).getResults()[0]; int count = Integer.parseInt(vcount.toString()); // determine number of pages from total data and page size pages = (int) Math.ceil((double) count / config.pageSize); System.out.println(pages); } catch (Exception e) { System.out.println("Count formation failure!"); } } } else { // find count Connection conn = DriverManager.getConnection(config.jdbcurl, config.jdbcuser, config.jdbcpassword); base = conn.getMetaData().getDatabaseProductName().toLowerCase(); Statement jdbcStmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); if (countquery.contains("<") || countquery.contains(">")) { int bracketOpen = countquery.indexOf("<"); int bracketClose = countquery.indexOf(">"); String orderCol = countquery.substring(bracketOpen + 1, bracketClose); countquery = countquery.replace("<" + orderCol + ">", ""); } ResultSet rcount = jdbcStmt.executeQuery( countquery); // determine number of pages from total data and page size if (base.contains("postgres") && config.isPaginated) { int count = Integer.parseInt(rcount.toString()); pages = (int) Math.ceil((double) count / config.pageSize); } } // establish new SourceReaders and DestinationWriters for pages SourceReader[] sr = new SourceReader[pages]; DestinationWriter[] cr = new DestinationWriter[pages]; for (int i = 0; i < pages; i++) { sr[i] = new SourceReader(); cr[i] = new DestinationWriter(); } Controller processor = new Controller<ArrayList<Object[]>>( client, sr, cr, jdbcSelect, procName, config, pages, base); completion.submit(processor); } // wait for all tasks to complete. for (int i = 0; i < tableNameArray.length; ++i) { logger.info( "****************" + completion.take().get() + " completed *****************"); // will block until the next sub task has // completed. } executor.shutdown(); }
public void load(String queryFile, String modules, String tables) throws SQLException, IOException, InterruptedException, ExecutionException { Properties properties = new Properties(); properties.load(new FileInputStream(queryFile)); Collection<String> keys = properties.stringPropertyNames(); // Filtering by validating if property starts with any of the module names if (!Config.ALL.equalsIgnoreCase(modules)) { keys = Util.filter( keys, "^(" + modules.replaceAll(Config.COMMA_SEPARATOR, Config.MODULE_SUFFIX) + ")"); } // Filtering by table names if (!Config.ALL.equalsIgnoreCase(tables)) { keys = Util.filter( keys, "(" + tables.replaceAll(Config.COMMA_SEPARATOR, Config.TABLE_SUFFIX) + ")$"); } logger.info("The final modules and tables that are being considered" + keys.toString()); ExecutorService executor = Executors.newFixedThreadPool(keys.size() * 3); CompletionService completion = new ExecutorCompletionService(executor); for (String key : keys) { String query = properties.getProperty(key); key = (key.contains(Config.DOT_SEPARATOR) ? key.substring(key.indexOf(Config.DOT_SEPARATOR) + 1) : key); while (query.contains("[:")) { String param = query.substring(query.indexOf("[:") + 2, query.indexOf("]")); query = query.replaceFirst("\\[\\:" + param + "\\]", properties.getProperty(param)); } int pages = 1; String base = ""; if (config.srisvoltdb) { if (config.isPaginated) { try { // find count String countquery = query; if (countquery.contains("<") || countquery.contains(">")) { int bracketOpen = countquery.indexOf("<"); int bracketClose = countquery.indexOf(">"); String orderCol = countquery.substring(bracketOpen + 1, bracketClose); countquery = countquery.replace("<" + orderCol + ">", ""); } VoltTable vcount = client.callProcedure("@AdHoc", countquery).getResults()[0]; int count = vcount.getRowCount(); pages = (int) Math.ceil((double) count / config.pageSize); } catch (Exception e) { System.out.println("Count formation failure!"); } } // set up data in order } else { // find count String countquery = query.replace("*", "COUNT(*)"); Connection conn = DriverManager.getConnection(config.jdbcurl, config.jdbcuser, config.jdbcpassword); base = conn.getMetaData().getDatabaseProductName().toLowerCase(); System.out.println("BASE: " + base); Statement jdbcStmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); if (countquery.contains("<") || countquery.contains(">")) { int bracketOpen = countquery.indexOf("<"); int bracketClose = countquery.indexOf(">"); String orderCol = countquery.substring(bracketOpen + 1, bracketClose); countquery = countquery.replace("<" + orderCol + ">", ""); } ResultSet rcount = jdbcStmt.executeQuery(countquery); rcount.next(); int count = Integer.parseInt(rcount.getArray(1).toString()); // THIS IF NEEDS A WAY TO DETERMINE IF POSTGRES if (base.contains("postgres") && config.isPaginated) { pages = (int) Math.ceil((double) count / config.pageSize); } // set up data in order } // establish new SourceReaders and DestinationWriters for pages SourceReader[] sr = new SourceReader[pages]; DestinationWriter[] cr = new DestinationWriter[pages]; for (int i = 0; i < pages; i++) { sr[i] = new SourceReader(); cr[i] = new DestinationWriter(); } Controller processor = new Controller<ArrayList<Object[]>>( client, sr, cr, query, key.toUpperCase() + ".insert", config, pages, base); completion.submit(processor); } // wait for all tasks to complete. for (int i = 0; i < keys.size(); ++i) { logger.info( "****************" + completion.take().get() + " completed *****************"); // will block until the next sub task has // completed. } executor.shutdown(); }