protected static void verifyProcFails( Client client, String expectedPattern, String storedProc, Object... args) throws IOException { String what; if (storedProc.compareTo("@AdHoc") == 0) { what = "the statement \"" + args[0] + "\""; } else { what = "the stored procedure \"" + storedProc + "\""; } try { client.callProcedure(storedProc, args); } catch (ProcCallException pce) { String msg = pce.getMessage(); String diagnostic = "Expected " + what + " to throw an exception matching the pattern \"" + expectedPattern + "\", but instead it threw an exception containing \"" + msg + "\"."; Pattern pattern = Pattern.compile(expectedPattern, Pattern.MULTILINE); assertTrue(diagnostic, pattern.matcher(msg).find()); return; } String diagnostic = "Expected " + what + " to throw an exception matching the pattern \"" + expectedPattern + "\", but instead it threw nothing."; fail(diagnostic); }
public void testInvalidCalls() throws Exception { System.out.println("\n\nTESTING INVALID CALLS\n\n\n"); Client client = getFullyConnectedClient(); // // invalid selector // try { // No selector at all. client.callProcedure("@Statistics"); fail(); } catch (ProcCallException ex) { // All badness gets turned into ProcCallExceptions, so we need // to check specifically for this error, otherwise things that // crash the cluster also turn into ProcCallExceptions and don't // trigger failure (ENG-2347) assertEquals( "Incorrect number of arguments to @Statistics (expects 2, received 0)", ex.getMessage()); } try { // extra stuff client.callProcedure("@Statistics", "table", 0, "OHHAI"); fail(); } catch (ProcCallException ex) { assertEquals( "Incorrect number of arguments to @Statistics (expects 2, received 3)", ex.getMessage()); } try { // Invalid selector client.callProcedure("@Statistics", "garbage", 0); fail(); } catch (ProcCallException ex) { } }
public void testGiantDelete() throws IOException, ProcCallException { /* * Times out with valgrind */ if (isValgrind()) { return; } Client client = getClient(1000 * 60 * 10); for (int i = 0; i < 100; i++) { client.callProcedure("InsertBatch", 200000, 0, i * 200000); } try { client.callProcedure("Delete"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Expected to be able to delete large batch but failed"); } // Test repeatability for (int i = 0; i < 100; i++) { client.callProcedure("InsertBatch", 200000, 0, i * 200000); } try { client.callProcedure("Delete"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Expected to be able to delete large batch but failed"); } }
private void runAndCheck(boolean expectException) throws Exception { try { VoltTable[] results = m_env.m_client.callProcedure("@AdHoc", StringUtils.join(m_queries, "; ")).getResults(); int i = 0; assertEquals(m_expectedCounts.size(), results.length); for (String query : m_queries) { int expectedCount = m_expectedCounts.get(i); if (expectedCount >= 0) { String s = query.toLowerCase().trim(); if (!s.isEmpty()) { if (s.startsWith("insert") || s.startsWith("update") || s.startsWith("delete")) { assertEquals(String.format("%s (row count):", query), 1, results[i].getRowCount()); assertEquals( String.format("%s (result count):", query), expectedCount, results[i].asScalarLong()); } else { assertEquals( String.format("%s (row count):", query), expectedCount, results[i].getRowCount()); } i++; } } } } catch (ProcCallException e) { assertTrue("Unexpected exception for batch: " + e.getMessage(), expectException); } finally { m_queries.clear(); m_expectedCounts.clear(); } }
void migrateSchemaWithDataExpectFail(String schema1, String schema2, String pattern) throws Exception { try { migrateSchema(schema1, schema2); fail(); } catch (ProcCallException e) { ClientResponseImpl cri = (ClientResponseImpl) e.getClientResponse(); assertEquals(cri.getStatus(), ClientResponse.GRACEFUL_FAILURE); assertTrue(cri.getStatusString().contains(pattern)); } catch (Exception e) { e.printStackTrace(); fail("Expected ProcCallException but got: " + e); } }
/** * Print planner and cache statistics. * * @throws IOException * @throws NoConnectionsException */ public void printPlannerStatistics() throws IOException, NoConnectionsException { try { VoltTable result = client.callProcedure("@Statistics", "PLANNER", 0).getResults()[0]; while (result.advanceRow()) { String hostname = result.getString("HOSTNAME"); long siteId = result.getLong("SITE_ID"); long partitionId = result.getLong("PARTITION_ID"); long hits1 = result.getLong("CACHE1_HITS"); long hits2 = result.getLong("CACHE2_HITS"); long level1 = result.getLong("CACHE1_LEVEL"); long level2 = result.getLong("CACHE2_LEVEL"); long misses = result.getLong("CACHE_MISSES"); long total = hits1 + hits2 + misses; double hitpc1 = (100.0 * hits1) / total; double hitpc2 = (100.0 * hits2) / total; double planTimeMin = result.getLong("PLAN_TIME_MIN") / 1000000.0; double planTimeMax = result.getLong("PLAN_TIME_MAX") / 1000000.0; double planTimeAvg = result.getLong("PLAN_TIME_AVG") / 1000000.0; long failures = result.getLong("FAILURES"); // Global stats System.out.printf(" HOSTNAME: %s\n", hostname); if (siteId == -1) { System.out.printf(" SITE: (global)\n"); } else { System.out.printf(" SITE: %d\n", siteId); System.out.printf(" PARTITION: %d\n", partitionId); } System.out.printf(" TOTAL PLANS: %d\n", total); System.out.printf(" CACHE MISSES: %d\n", misses); if (siteId == -1) { System.out.printf("LEVEL 1 CACHE HITS: %d (%.1f%%)\n", hits1, hitpc1); System.out.printf("LEVEL 2 CACHE HITS: %d (%.1f%%)\n", hits2, hitpc2); System.out.printf("LEVEL 1 CACHE SIZE: %d\n", level1); System.out.printf("LEVEL 2 CACHE SIZE: %d\n", level2); } else { System.out.printf(" PLAN CACHE HITS: %d (%.1f%%)\n", hits1, hitpc1); System.out.printf(" PLAN CACHE SIZE: %d\n", level1); } System.out.printf(" PLAN TIME MIN: %6.2f ms\n", planTimeMin); System.out.printf(" PLAN TIME MAX: %6.2f ms\n", planTimeMax); System.out.printf(" PLAN TIME AVG: %6.2f ms\n", planTimeAvg); System.out.printf(" FAILURES: %d\n\n", failures); } } catch (ProcCallException e) { e.printStackTrace(); } }
public void testBasicCreateStatementProc() throws Exception { String pathToCatalog = Configuration.getPathToCatalogForTest("adhocddl.jar"); String pathToDeployment = Configuration.getPathToCatalogForTest("adhocddl.xml"); VoltProjectBuilder builder = new VoltProjectBuilder(); builder.addLiteralSchema( "create table FOO (" + "ID integer not null," + "VAL bigint, " + "constraint PK_TREE primary key (ID)" + ");\n" + "create table FOO_R (" + "ID integer not null," + "VAL bigint, " + "constraint PK_TREE_R primary key (ID)" + ");\n"); builder.addPartitionInfo("FOO", "ID"); builder.setUseDDLSchema(true); boolean success = builder.compile(pathToCatalog, 2, 1, 0); assertTrue("Schema compilation failed", success); MiscUtils.copyFile(builder.getPathToDeployment(), pathToDeployment); VoltDB.Configuration config = new VoltDB.Configuration(); config.m_pathToCatalog = pathToCatalog; config.m_pathToDeployment = pathToDeployment; try { startSystem(config); // Procedure shouldn't exist boolean threw = false; assertFalse(findProcedureInSystemCatalog("FOOCOUNT")); try { m_client.callProcedure("FOOCOUNT", 1000L); } catch (ProcCallException pce) { assertTrue(pce.getMessage().contains("Procedure FOOCOUNT was not found")); threw = true; } assertTrue("FOOCOUNT procedure shouldn't exist", threw); try { m_client.callProcedure( "@AdHoc", "create procedure FOOCOUNT as select * from FOO where ID=?;"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to create statement procedure"); } assertTrue(findProcedureInSystemCatalog("FOOCOUNT")); assertFalse(verifySinglePartitionProcedure("FOOCOUNT")); // Make sure we can call it try { m_client.callProcedure("FOOCOUNT", 1000L); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to call procedure FOOCOUNT"); } // partition that sucker try { m_client.callProcedure( "@AdHoc", "partition procedure FOOCOUNT on table FOO column ID parameter 0;"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to partition the procedure FOOCOUNT"); } // Make sure we can call it assertTrue(verifySinglePartitionProcedure("FOOCOUNT")); try { m_client.callProcedure("FOOCOUNT", 1000L); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to call procedure FOOCOUNT"); } // now drop it try { m_client.callProcedure("@AdHoc", "drop procedure FOOCOUNT"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to drop procedure FOOCOUNT"); } assertFalse(findProcedureInSystemCatalog("FOOCOUNT")); // Can't drop it twice threw = false; try { m_client.callProcedure("@AdHoc", "drop procedure FOOCOUNT"); } catch (ProcCallException pce) { pce.printStackTrace(); threw = true; } assertTrue("Can't vanilla drop procedure FOOCOUNT twice", threw); // unless we use if exists try { m_client.callProcedure("@AdHoc", "drop procedure FOOCOUNT if exists"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to drop procedure FOOCOUNT twice with if exists"); } // Create it again so we can destroy it with drop with if exists, just to be sure try { m_client.callProcedure( "@AdHoc", "create procedure FOOCOUNT as select * from FOO where ID=?;"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to create statement procedure"); } assertTrue(findProcedureInSystemCatalog("FOOCOUNT")); // now drop it try { m_client.callProcedure("@AdHoc", "drop procedure FOOCOUNT if exists"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to drop procedure FOOCOUNT"); } assertFalse(findProcedureInSystemCatalog("FOOCOUNT")); } finally { teardownSystem(); } }
/** * Takes a snapshot of all the tables in the database now and check all the rows in each table to * see if they satisfy the constraints. The constraints should be added with the table name and * table id 0. * * <p>Since the snapshot files reside on the servers, we have to copy them over to the client in * order to check. This might be an overkill, but the alternative is to ask the user to write * stored procedure for each table and execute them on all nodes. That's not significantly better, * either. * * <p>This function blocks. Should only be run at the end. * * @return true if all tables passed the test, false otherwise. */ protected boolean checkTables() { String dir = "/tmp"; String nonce = "data_verification"; ClientConfig clientConfig = new ClientConfig(m_username, m_password); clientConfig.setExpectedOutgoingMessageSize(getExpectedOutgoingMessageSize()); clientConfig.setHeavyweight(false); Client client = ClientFactory.createClient(clientConfig); // Host ID to IP mappings LinkedHashMap<Integer, String> hostMappings = new LinkedHashMap<Integer, String>(); /* * The key is the table name. the first one in the pair is the hostname, * the second one is file name */ LinkedHashMap<String, Pair<String, String>> snapshotMappings = new LinkedHashMap<String, Pair<String, String>>(); boolean isSatisfied = true; // Load the native library for loading table from snapshot file org.voltdb.EELibraryLoader.loadExecutionEngineLibrary(true); try { boolean keepTrying = true; VoltTable[] response = null; client.createConnection(m_host); // Only initiate the snapshot if it's the first client while (m_id == 0) { // Take a snapshot of the database. This call is blocking. response = client.callProcedure("@SnapshotSave", dir, nonce, 1).getResults(); if (response.length != 1 || !response[0].advanceRow() || !response[0].getString("RESULT").equals("SUCCESS")) { if (keepTrying && response[0].getString("ERR_MSG").contains("ALREADY EXISTS")) { client.callProcedure("@SnapshotDelete", new String[] {dir}, new String[] {nonce}); keepTrying = false; continue; } System.err.println("Failed to take snapshot"); return false; } break; } // Clients other than the one that initiated the snapshot // have to check if the snapshot has completed if (m_id > 0) { int maxTry = 10; while (maxTry-- > 0) { boolean found = false; response = client.callProcedure("@SnapshotStatus").getResults(); if (response.length != 2) { System.err.println("Failed to get snapshot status"); return false; } while (response[0].advanceRow()) { if (response[0].getString("NONCE").equals(nonce)) { found = true; break; } } if (found) { // This probably means the snapshot is done if (response[0].getLong("END_TIME") > 0) break; } try { Thread.sleep(500); } catch (InterruptedException e) { return false; } } } // Get host ID to hostname mappings response = client.callProcedure("@SystemInformation").getResults(); if (response.length != 1) { System.err.println("Failed to get host ID to IP address mapping"); return false; } while (response[0].advanceRow()) { if (!response[0].getString("KEY").equals("HOSTNAME")) { continue; } hostMappings.put( (Integer) response[0].get("HOST_ID", VoltType.INTEGER), response[0].getString("VALUE")); } /* DUMP THE HOST MAPPINGS: System.err.println("\n\nhostMappings: "); for (Integer i : hostMappings.keySet()) { System.err.println("\tkey: " + i + " value: " + hostMappings.get(i)); } */ // Do a scan to get all the file names and table names response = client.callProcedure("@SnapshotScan", dir).getResults(); if (response.length != 3) { System.err.println("Failed to get snapshot filenames"); return false; } // Only copy the snapshot files we just created while (response[0].advanceRow()) { if (!response[0].getString("NONCE").equals(nonce)) continue; String[] tables = response[0].getString("TABLES_REQUIRED").split(","); for (String t : tables) { snapshotMappings.put(t, null); } break; } /* DUMP THE SNAPSHOT MAPPINGS: System.err.println("\n\nsnapshotMappings: "); for (String i : snapshotMappings.keySet()) { System.err.println("\tkey: " + i + " value: " + snapshotMappings.get(i)); } */ while (response[2].advanceRow()) { int id = (Integer) response[2].get("HOST_ID", VoltType.INTEGER); String tableName = response[2].getString("TABLE"); if (!snapshotMappings.containsKey(tableName) || !hostMappings.containsKey(id)) { System.err.println("FAILED configuring snapshotMappings for: "); System.err.println( "snapshottingMapping[" + tableName + "] " + snapshotMappings.get(tableName)); System.err.println("hostMappings[" + id + "] " + hostMappings.get(id)); continue; } snapshotMappings.put( tableName, Pair.of(hostMappings.get(id), response[2].getString("NAME"))); } } catch (NoConnectionsException e) { e.printStackTrace(); return false; } catch (ProcCallException e) { e.printStackTrace(); return false; } catch (UnknownHostException e) { e.printStackTrace(); return false; } catch (IOException e) { e.printStackTrace(); return false; } // Iterate through all the tables System.err.println("Checking " + m_tableCheckOrder.size() + " table contraints"); for (String tableName : m_tableCheckOrder) { Pair<String, String> value = snapshotMappings.get(tableName); if (value == null) { System.err.println("No snapshot mapping for table: " + tableName); continue; } String hostName = value.getFirst(); File file = new File(dir, value.getSecond()); FileInputStream inputStream = null; TableSaveFile saveFile = null; long rowCount = 0; Pair<String, Integer> key = Pair.of(tableName, 0); if (!m_constraints.containsKey(key) || hostName == null) { System.err.println("No constraint for : " + tableName); continue; } // Copy the file over String localhostName = ConnectionUtil.getHostnameOrAddress(); final SSHTools ssh = new SSHTools(m_username); if (!hostName.equals("localhost") && !hostName.equals(localhostName)) { if (!ssh.copyFromRemote(file.getPath(), hostName, file.getPath())) { System.err.println( "Failed to copy the snapshot file " + file.getPath() + " from host " + hostName); return false; } } if (!file.exists()) { System.err.println( "Snapshot file " + file.getPath() + " cannot be copied from " + hostName + " to localhost"); return false; } try { try { inputStream = new FileInputStream(file); saveFile = new TableSaveFile(inputStream.getChannel(), 3, null); // Get chunks from table while (isSatisfied && saveFile.hasMoreChunks()) { final BBContainer chunk = saveFile.getNextChunk(); VoltTable table = null; // This probably should not happen if (chunk == null) continue; table = PrivateVoltTableFactory.createVoltTableFromBuffer(chunk.b, true); // Now, check each row while (isSatisfied && table.advanceRow()) { isSatisfied = Verification.checkRow(m_constraints.get(key), table); rowCount++; } // Release the memory of the chunk we just examined, be good chunk.discard(); } } finally { if (saveFile != null) { saveFile.close(); } if (inputStream != null) inputStream.close(); if (!hostName.equals("localhost") && !hostName.equals(localhostName) && !file.delete()) System.err.println("Failed to delete snapshot file " + file.getPath()); } } catch (FileNotFoundException e) { e.printStackTrace(); return false; } catch (IOException e) { e.printStackTrace(); return false; } if (isSatisfied) { System.err.println("Table " + tableName + " with " + rowCount + " rows passed check"); } else { System.err.println("Table " + tableName + " failed check"); break; } } // Clean up the snapshot we made try { if (m_id == 0) { client .callProcedure("@SnapshotDelete", new String[] {dir}, new String[] {nonce}) .getResults(); } } catch (IOException e) { e.printStackTrace(); } catch (ProcCallException e) { e.printStackTrace(); } System.err.println( "Table checking finished " + (isSatisfied ? "successfully" : "with failures")); return isSatisfied; }
public void testBasic() throws Exception { System.out.println("\n\n-----\n testBasic \n-----\n\n"); String pathToCatalog = Configuration.getPathToCatalogForTest("adhocddl.jar"); String pathToDeployment = Configuration.getPathToCatalogForTest("adhocddl.xml"); VoltProjectBuilder builder = new VoltProjectBuilder(); // Need to parallel dbuilder as we modify builder DeploymentBuilder dbuilder = new DeploymentBuilder(2, 1, 0); builder.addLiteralSchema( "create table FOO (" + "ID integer not null," + "VAL bigint, " + "constraint PK_TREE primary key (ID)" + ");\n" + "create table FOO_R (" + "ID integer not null," + "VAL bigint, " + "constraint PK_TREE_R primary key (ID)" + ");\n"); builder.addPartitionInfo("FOO", "ID"); dbuilder.setUseDDLSchema(true); // Use random caps in role names to check case-insensitivity dbuilder.addUsers( new DeploymentBuilder.UserInfo[] { new DeploymentBuilder.UserInfo("admin", "admin", new String[] {"Administrator"}) }); dbuilder.setSecurityEnabled(true); dbuilder.setEnableCommandLogging(false); boolean success = builder.compile(pathToCatalog, 2, 1, 0); assertTrue("Schema compilation failed", success); dbuilder.writeXML(pathToDeployment); // MiscUtils.copyFile(builder.getPathToDeployment(), pathToDeployment); VoltDB.Configuration config = new VoltDB.Configuration(); config.m_pathToCatalog = pathToCatalog; config.m_pathToDeployment = pathToDeployment; try { startServer(config); ClientConfig adminConfig = new ClientConfig("admin", "admin"); Client adminClient = ClientFactory.createClient(adminConfig); ClientConfig userConfig = new ClientConfig("user", "user"); Client userClient = ClientFactory.createClient(userConfig); adminClient.createConnection("localhost"); // Can't connect a user which doesn't exist boolean threw = false; try { userClient.createConnection("localhost"); } catch (IOException ioe) { threw = true; assertTrue(ioe.getMessage().contains("Authentication rejected")); } assertTrue("Connecting bad user should have failed", threw); // Add the user with the new role dbuilder.addUsers(new UserInfo[] {new UserInfo("user", "user", new String[] {"NEWROLE"})}); dbuilder.writeXML(pathToDeployment); try { adminClient.updateApplicationCatalog(null, new File(pathToDeployment)); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to add a user even with a role that doesn't exist"); } // Check that we can connect the new user try { userClient.createConnection("localhost"); } catch (IOException ioe) { ioe.printStackTrace(); fail("Should have been able to connect 'user'"); } // Make sure the user doesn't actually have DEFAULTPROC permissions yet threw = false; try { userClient.callProcedure("FOO.insert", 0, 0); } catch (ProcCallException pce) { pce.printStackTrace(); threw = true; } assertTrue("'user' shouldn't be able to call procedures yet", threw); // Okay, it's showtime. Let's add the role through live DDL try { adminClient.callProcedure("@AdHoc", "create role NEWROLE with DEFAULTPROC"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Creating role should have succeeded"); } try { adminClient.updateApplicationCatalog(null, new File(pathToDeployment)); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Adding 'user' should have succeeded this time"); } // Make sure the user now has DEFAULTPROC permissions try { userClient.callProcedure("FOO.insert", 0, 0); } catch (ProcCallException pce) { pce.printStackTrace(); fail("'user' should be able to call default procs now"); } threw = false; try { adminClient.callProcedure("@AdHoc", "create role NEWROLE with ALLPROC"); } catch (ProcCallException pce) { assertTrue(pce.getMessage().contains("already exists")); threw = true; } assertTrue("Shouldn't be able to 'create' same role twice", threw); threw = false; try { // Use random caps in role names to check case-insensitivity adminClient.callProcedure("@AdHoc", "create role aDministrator with ALLPROC"); } catch (ProcCallException pce) { assertTrue(pce.getMessage().contains("already exists")); threw = true; } assertTrue("Shouldn't be able to 'create' ADMINISTRATOR role", threw); threw = false; try { adminClient.callProcedure("@AdHoc", "create role USER with ALLPROC"); } catch (ProcCallException pce) { assertTrue(pce.getMessage().contains("already exists")); threw = true; } assertTrue("Shouldn't be able to 'create' USER role", threw); try { adminClient.callProcedure("@AdHoc", "drop role NEWROLE;"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to drop role NEWROLE"); } // Can't drop twice try { adminClient.callProcedure("@AdHoc", "drop role NEWROLE;"); } catch (ProcCallException pce) { pce.printStackTrace(); threw = true; } assertTrue("Can't vanilla DROP a role which doesn't exist", threw); // unless you use IF EXISTS try { adminClient.callProcedure("@AdHoc", "drop role NEWROLE if exists;"); } catch (ProcCallException pce) { pce.printStackTrace(); fail("Should be able to drop role NEWROLE if exists"); } // Make sure the user doesn't actually have DEFAULTPROC permissions any more threw = false; try { userClient.callProcedure("FOO.insert", 0, 0); } catch (ProcCallException pce) { pce.printStackTrace(); threw = true; } assertTrue("'user' shouldn't be able to call procedures yet", threw); threw = false; try { adminClient.callProcedure("@AdHoc", "drop role USER;"); } catch (ProcCallException pce) { threw = true; assertTrue(pce.getMessage().contains("You may not drop the built-in role")); pce.printStackTrace(); } assertTrue("Shouldn't be able to drop role USER", threw); // CHeck the administrator error message, there should end up being multiple // reasons why we can't get rid of this role (like, we will require you to always // have a user with this role) threw = false; try { // Use random caps in role names to check case-insensitivity adminClient.callProcedure("@AdHoc", "drop role adMinistrator;"); } catch (ProcCallException pce) { threw = true; assertTrue(pce.getMessage().contains("You may not drop the built-in role")); pce.printStackTrace(); } assertTrue("Shouldn't be able to drop role ADMINISTRATOR", threw); // Make sure that we can't get rid of the administrator user dbuilder.removeUser("admin"); dbuilder.writeXML(pathToDeployment); threw = false; try { adminClient.updateApplicationCatalog(null, new File(pathToDeployment)); } catch (ProcCallException pce) { pce.printStackTrace(); threw = true; } assertTrue("Shouldn't be able to remove the last remaining ADMINSTRATOR user", threw); } finally { teardownSystem(); } }