@Test public void testQuery() throws Throwable { QNodeHandler handler = new QNodeHandler(); handler.init(SploutConfiguration.getTestConfig()); SploutConfiguration config = SploutConfiguration.getTestConfig(); DNode dnode = TestUtils.getTestDNode(config, dHandler, "dnode-" + this.getClass().getName() + "-2"); try { ReplicationEntry repEntry = new ReplicationEntry(0, dnode.getAddress()); Tablespace tablespace1 = new Tablespace( PartitionMap.oneShardOpenedMap(), new ReplicationMap(Arrays.asList(repEntry)), 0l, 0l); handler .getContext() .getTablespaceVersionsMap() .put(new TablespaceVersion("tablespace1", 0l), tablespace1); handler.getContext().getCurrentVersionsMap().put("tablespace1", 0l); // Query key 2 (> 1 < 10) QueryStatus qStatus = handler.query("tablespace1", "2", "SELECT 1;", null); Assert.assertEquals(new Integer(0), qStatus.getShard()); Assert.assertEquals("[1]", qStatus.getResult().toString()); } finally { handler.close(); dnode.stop(); Hazelcast.shutdownAll(); } }
@Test public void testDeployEnding() throws Throwable { // Test what happens when DNodes complete the deploy process final QNodeHandler handler = new QNodeHandler(); SploutConfiguration config = SploutConfiguration.getTestConfig(); DNodeHandler dHandler = new DNodeHandler(); DNode dnode = TestUtils.getTestDNode(config, dHandler, "dnode-" + this.getClass().getName() + "-1"); try { handler.init(config); DeployRequest deployRequest1 = new DeployRequest(); deployRequest1.setTablespace("partition1"); deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest1.setReplicationMap( ReplicationMap.oneToOneMap(dnode.getAddress()).getReplicationEntries()); File fakeDeployFolder = new File(FAKE_DEPLOY_FOLDER); fakeDeployFolder.mkdir(); File deployData = new File(fakeDeployFolder, "0.db"); deployData.createNewFile(); deployRequest1.setData_uri(fakeDeployFolder.toURI().toString()); List<DeployRequest> l = new ArrayList<DeployRequest>(); l.add(deployRequest1); handler.deploy(l); new TestUtils.NotWaitingForeverCondition() { @Override public boolean endCondition() { boolean cond1 = handler.getContext().getTablespaceVersionsMap().values().size() == 1; boolean cond2 = handler.getContext().getCurrentVersionsMap().get("partition1") != null; return cond1 && cond2; } }.waitAtMost(5000); assertEquals( (long) handler .getContext() .getTablespaceVersionsMap() .keySet() .iterator() .next() .getVersion(), (long) handler.getContext().getCurrentVersionsMap().values().iterator().next()); // everything OK } finally { handler.close(); dnode.stop(); Hazelcast.shutdownAll(); } }
@After @Before public void cleanUp() throws IOException { FileUtils.deleteDirectory(new File(FAKE_DEPLOY_FOLDER)); TestUtils.cleanUpTmpFolders(this.getClass().getName(), 7); }
@Test public void testDeployFiring() throws Throwable { // Test the business logic that produces the firing of the deployment (not the continuation of // it) For that, we will // use dummy DNodeHandlers QNodeHandler handler = new QNodeHandler(); SploutConfiguration config = SploutConfiguration.getTestConfig(); DNode dnode = TestUtils.getTestDNode( config, new IDNodeHandler() { @Override public void init(SploutConfiguration config) throws Exception {} @Override public String sqlQuery(String tablespace, long version, int partition, String query) throws DNodeException { return null; } @Override public String deploy(List<DeployAction> deployActions, long version) throws DNodeException { Assert.assertEquals(1, deployActions.size()); Assert.assertEquals("hdfs://foo/bar/0.db", deployActions.get(0).getDataURI()); Assert.assertEquals("partition1", deployActions.get(0).getTablespace()); Assert.assertTrue(version >= 0); // TODO Is this the right checking here? return "FOO"; } @Override public String rollback(List<RollbackAction> rollbackActions, String ignoreMe) throws DNodeException { return null; } @Override public String status() throws DNodeException { return null; } @Override public void stop() throws Exception {} @Override public void giveGreenLigth() {} @Override public String abortDeploy(long version) throws DNodeException { return null; } @Override public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions) throws DNodeException { return null; } @Override public String testCommand(String command) throws DNodeException { // TODO Auto-generated method stub return null; } }, "dnode-" + this.getClass().getName() + "-5"); try { handler.init(config); ReplicationEntry repEntry = new ReplicationEntry(0, dnode.getAddress()); DeployRequest deployRequest = new DeployRequest(); deployRequest.setTablespace("partition1"); deployRequest.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest.setReplicationMap(Arrays.asList(repEntry)); deployRequest.setData_uri("hdfs://foo/bar"); List<DeployRequest> l = new ArrayList<DeployRequest>(); l.add(deployRequest); handler.deploy(l); } finally { handler.close(); dnode.stop(); Hazelcast.shutdownAll(); } }
@Test public void testMultiDeployFiring() throws Throwable { // Same as test deploy firing, but with more than one DNode and different deploy actions SploutConfiguration config1 = SploutConfiguration.getTestConfig(); DNode dnode1 = TestUtils.getTestDNode( config1, new IDNodeHandler() { @Override public void init(SploutConfiguration config) throws Exception {} @Override public String sqlQuery(String tablespace, long version, int partition, String query) throws DNodeException { return null; } @Override public String deploy(List<DeployAction> deployActions, long distributedBarrier) throws DNodeException { /* * DNode1 asserts */ Assert.assertEquals(2, deployActions.size()); Assert.assertEquals("hdfs://foo/bar1/0.db", deployActions.get(0).getDataURI()); Assert.assertEquals("hdfs://foo/bar2/0.db", deployActions.get(1).getDataURI()); Assert.assertEquals("partition1", deployActions.get(0).getTablespace()); Assert.assertEquals("partition2", deployActions.get(1).getTablespace()); return "FOO"; } @Override public String rollback(List<RollbackAction> rollbackActions, String ignoreMe) throws DNodeException { return null; } @Override public String status() throws DNodeException { return null; } @Override public void stop() throws Exception {} @Override public void giveGreenLigth() {} @Override public String abortDeploy(long version) throws DNodeException { return null; } @Override public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions) throws DNodeException { return null; } @Override public String testCommand(String command) throws DNodeException { // TODO Auto-generated method stub return null; } }, "dnode-" + this.getClass().getName() + "-3"); SploutConfiguration config2 = SploutConfiguration.getTestConfig(); DNode dnode2 = TestUtils.getTestDNode( config2, new IDNodeHandler() { @Override public void init(SploutConfiguration config) throws Exception {} @Override public String sqlQuery(String tablespace, long version, int partition, String query) throws DNodeException { return null; } @Override public String deploy(List<DeployAction> deployActions, long distributedBarrier) throws DNodeException { /* * DNode2 asserts */ Assert.assertEquals(1, deployActions.size()); Assert.assertEquals("hdfs://foo/bar1/0.db", deployActions.get(0).getDataURI()); Assert.assertEquals("partition1", deployActions.get(0).getTablespace()); return "FOO"; } @Override public String rollback(List<RollbackAction> rollbackActions, String ignoreMe) throws DNodeException { return null; } @Override public String status() throws DNodeException { return null; } @Override public void stop() throws Exception {} @Override public void giveGreenLigth() {} @Override public String abortDeploy(long version) throws DNodeException { return null; } @Override public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions) throws DNodeException { return null; } @Override public String testCommand(String command) throws DNodeException { // TODO Auto-generated method stub return null; } }, "dnode-" + this.getClass().getName() + "-4"); QNodeHandler handler = new QNodeHandler(); try { handler.init(config1); ReplicationEntry repEntry1 = new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress()); ReplicationEntry repEntry2 = new ReplicationEntry(0, dnode1.getAddress()); DeployRequest deployRequest1 = new DeployRequest(); deployRequest1.setTablespace("partition1"); deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest1.setReplicationMap(Arrays.asList(repEntry1)); deployRequest1.setData_uri("hdfs://foo/bar1"); DeployRequest deployRequest2 = new DeployRequest(); deployRequest2.setTablespace("partition2"); deployRequest2.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest2.setReplicationMap(Arrays.asList(repEntry2)); deployRequest2.setData_uri("hdfs://foo/bar2"); List<DeployRequest> l = new ArrayList<DeployRequest>(); l.add(deployRequest1); l.add(deployRequest2); handler.deploy(l); } finally { handler.close(); dnode1.stop(); dnode2.stop(); Hazelcast.shutdownAll(); } }