public static void main(String[] args) throws Exception { VoltDB.setDefaultTimezone(); config = new SchemaChangeConfig(); config.parse("SchemaChangeClient", args); ClientConfig clientConfig = new ClientConfig(); clientConfig.setProcedureCallTimeout(30 * 60 * 1000); // 30 min client = ClientFactory.createClient(clientConfig); String[] servers = config.servers.split(","); for (String server : servers) { server = server.trim(); client.createConnection(server); } // get the topo topo = getCluterTopology(client); // kick this off with a random schema VoltTable t = catalogChange(null, true); for (int i = 0; i < 50; i++) { // make sure the table is full and mess around with it loadTable(t); for (int j = 0; j < 50; j++) { String tableName = TableHelper.getTableName(t); // deterministically sample some rows VoltTable preT = sample(t); // System.out.printf("First sample:\n%s\n", preT.toFormattedString()); // move to an entirely new table or migrated schema t = catalogChange(t, (j == 0) && (rand.nextInt(5) == 0)); // if the table has been migrated, check the data if (TableHelper.getTableName(t).equals(tableName)) { VoltTable guessT = t.clone(4096 * 1024); // System.out.printf("Empty clone:\n%s\n", guessT.toFormattedString()); TableHelper.migrateTable(preT, guessT); // System.out.printf("Java migration:\n%s\n", guessT.toFormattedString()); // deterministically sample the same rows VoltTable postT = sample(t); // System.out.printf("Second sample:\n%s\n", postT.toFormattedString()); postT.resetRowPosition(); preT.resetRowPosition(); StringBuilder sb = new StringBuilder(); if (!TableHelper.deepEqualsWithErrorMsg(postT, guessT, sb)) { System.err.println(sb.toString()); assert (false); } } } } client.close(); }
public Client getClient(long timeout, ClientAuthHashScheme scheme, boolean useAdmin) throws IOException { final Random r = new Random(); String listener = null; if (useAdmin) { listener = m_config.getAdminAddress(r.nextInt(m_config.getListenerCount())); } else { listener = m_config.getListenerAddress(r.nextInt(m_config.getListenerCount())); } ClientConfig config = new ClientConfigForTest(m_username, m_password, scheme); config.setConnectionResponseTimeout(timeout); config.setProcedureCallTimeout(timeout); final Client client = ClientFactory.createClient(config); // Use the port generated by LocalCluster if applicable try { client.createConnection(listener); } // retry once catch (ConnectException e) { if (useAdmin) { listener = m_config.getAdminAddress(r.nextInt(m_config.getListenerCount())); } else { listener = m_config.getListenerAddress(r.nextInt(m_config.getListenerCount())); } client.createConnection(listener); } m_clients.add(client); return client; }
/** * Get a VoltClient instance connected to a specific server driven by the VoltServerConfig * instance. Find the server by the config's HostId. * * @return A VoltClient instance connected to the server driven by the VoltServerConfig instance. */ public Client getClientToHostId(int hostId, long timeout) throws IOException { final String listener = m_config.getListenerAddress(hostId); ClientConfig config = new ClientConfigForTest(m_username, m_password); config.setConnectionResponseTimeout(timeout); config.setProcedureCallTimeout(timeout); final Client client = ClientFactory.createClient(config); try { client.createConnection(listener); } // retry once catch (ConnectException e) { client.createConnection(listener); } m_clients.add(client); return client; }
public void processKafkaMessages() throws Exception { // Split server list final String[] serverlist = m_config.servers.split(","); // Create connection final ClientConfig c_config = new ClientConfig(m_config.user, m_config.password); c_config.setProcedureCallTimeout(0); // Set procedure all to infinite m_client = getClient(c_config, serverlist, m_config.port); if (m_config.useSuppliedProcedure) { m_loader = new CSVTupleDataLoader( (ClientImpl) m_client, m_config.procedure, new KafkaBulkLoaderCallback()); } else { m_loader = new CSVBulkDataLoader( (ClientImpl) m_client, m_config.table, m_config.batch, new KafkaBulkLoaderCallback()); } m_loader.setFlushInterval(m_config.flush, m_config.flush); m_consumer = new KafkaConsumerConnector( m_config.zookeeper, m_config.useSuppliedProcedure ? m_config.procedure : m_config.table); try { m_es = getConsumerExecutor(m_consumer, m_loader); if (m_config.useSuppliedProcedure) { m_log.info( "Kafka Consumer from topic: " + m_config.topic + " Started using procedure: " + m_config.procedure); } else { m_log.info( "Kafka Consumer from topic: " + m_config.topic + " Started for table: " + m_config.table); } m_es.awaitTermination(365, TimeUnit.DAYS); } catch (Exception ex) { m_log.error("Error in Kafka Consumer", ex); System.exit(-1); } close(); }
public Client getFullyConnectedClient(long timeout) throws IOException { final List<String> listeners = m_config.getListenerAddresses(); final Random r = new Random(); ClientConfig config = new ClientConfigForTest(m_username, m_password); config.setConnectionResponseTimeout(timeout); config.setProcedureCallTimeout(timeout); final Client client = ClientFactory.createClient(config); for (String listener : listeners) { // Use the port generated by LocalCluster if applicable try { client.createConnection(listener); } // retry once catch (ConnectException e) { listener = listeners.get(r.nextInt(listeners.size())); client.createConnection(listener); } } m_clients.add(client); return client; }