@Test public void testQuery() throws Throwable { QNodeHandler handler = new QNodeHandler(); handler.init(SploutConfiguration.getTestConfig()); SploutConfiguration config = SploutConfiguration.getTestConfig(); DNode dnode = TestUtils.getTestDNode(config, dHandler, "dnode-" + this.getClass().getName() + "-2"); try { ReplicationEntry repEntry = new ReplicationEntry(0, dnode.getAddress()); Tablespace tablespace1 = new Tablespace( PartitionMap.oneShardOpenedMap(), new ReplicationMap(Arrays.asList(repEntry)), 0l, 0l); handler .getContext() .getTablespaceVersionsMap() .put(new TablespaceVersion("tablespace1", 0l), tablespace1); handler.getContext().getCurrentVersionsMap().put("tablespace1", 0l); // Query key 2 (> 1 < 10) QueryStatus qStatus = handler.query("tablespace1", "2", "SELECT 1;", null); Assert.assertEquals(new Integer(0), qStatus.getShard()); Assert.assertEquals("[1]", qStatus.getResult().toString()); } finally { handler.close(); dnode.stop(); Hazelcast.shutdownAll(); } }
@Test public void testDNodeDownAndUp() throws Throwable { SploutConfiguration config = SploutConfiguration.getTestConfig(); QNodeHandler handler = new QNodeHandler(); HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config)); try { CoordinationStructures coord = new CoordinationStructures(hz); SploutConfiguration dNodeConfig = SploutConfiguration.getTestConfig(); dNodeConfig.setProperty(DNodeProperties.PORT, 1000); coord.getDNodes().put("/localhost:1000", new DNodeInfo(dNodeConfig)); try { handler.init(config); } catch (Exception e) { // since the handler will try to connect to "localhost:1000" we skip the Exception and // continue // the things we want to assert should be present anyway. } Assert.assertEquals( handler.getContext().getCoordinationStructures().getDNodes().values().size(), 1); coord.getDNodes().remove(coord.getDNodes().entrySet().iterator().next().getKey()); Thread.sleep(100); Assert.assertEquals( handler.getContext().getCoordinationStructures().getDNodes().values().size(), 0); dNodeConfig = SploutConfiguration.getTestConfig(); dNodeConfig.setProperty(DNodeProperties.PORT, 1001); coord.getDNodes().put("/localhost:1001", new DNodeInfo(dNodeConfig)); Thread.sleep(100); Assert.assertEquals( handler.getContext().getCoordinationStructures().getDNodes().values().size(), 1); dNodeConfig = SploutConfiguration.getTestConfig(); dNodeConfig.setProperty(DNodeProperties.PORT, 1000); coord.getDNodes().put("/localhost:1000", new DNodeInfo(dNodeConfig)); Thread.sleep(100); Assert.assertEquals( handler.getContext().getCoordinationStructures().getDNodes().values().size(), 2); coord.getDNodes().remove(coord.getDNodes().entrySet().iterator().next().getKey()); coord.getDNodes().remove(coord.getDNodes().entrySet().iterator().next().getKey()); Thread.sleep(100); Assert.assertEquals( handler.getContext().getCoordinationStructures().getDNodes().values().size(), 0); } finally { handler.close(); Hazelcast.shutdownAll(); } }
@Test public void testDeployEnding() throws Throwable { // Test what happens when DNodes complete the deploy process final QNodeHandler handler = new QNodeHandler(); SploutConfiguration config = SploutConfiguration.getTestConfig(); DNodeHandler dHandler = new DNodeHandler(); DNode dnode = TestUtils.getTestDNode(config, dHandler, "dnode-" + this.getClass().getName() + "-1"); try { handler.init(config); DeployRequest deployRequest1 = new DeployRequest(); deployRequest1.setTablespace("partition1"); deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest1.setReplicationMap( ReplicationMap.oneToOneMap(dnode.getAddress()).getReplicationEntries()); File fakeDeployFolder = new File(FAKE_DEPLOY_FOLDER); fakeDeployFolder.mkdir(); File deployData = new File(fakeDeployFolder, "0.db"); deployData.createNewFile(); deployRequest1.setData_uri(fakeDeployFolder.toURI().toString()); List<DeployRequest> l = new ArrayList<DeployRequest>(); l.add(deployRequest1); handler.deploy(l); new TestUtils.NotWaitingForeverCondition() { @Override public boolean endCondition() { boolean cond1 = handler.getContext().getTablespaceVersionsMap().values().size() == 1; boolean cond2 = handler.getContext().getCurrentVersionsMap().get("partition1") != null; return cond1 && cond2; } }.waitAtMost(5000); assertEquals( (long) handler .getContext() .getTablespaceVersionsMap() .keySet() .iterator() .next() .getVersion(), (long) handler.getContext().getCurrentVersionsMap().values().iterator().next()); // everything OK } finally { handler.close(); dnode.stop(); Hazelcast.shutdownAll(); } }
/** Get the Splout configuration using double configuration: defaults + custom */ public static SploutConfiguration get(String rootDir) { SploutConfiguration properties = new SploutConfiguration(); PropertiesConfiguration config = load(rootDir, SPLOUT_PROPERTIES, false); if (config != null) { properties.addConfiguration(config); } config = load(rootDir, SPLOUT_PROPERTIES + ".default", true); properties.addConfiguration(config); // The following lines replaces the default "localhost" by the local IP for convenience: String myIp = "localhost"; try { Collection<InetAddress> iNetAddresses = GetIPAddresses.getAllLocalIPs(); // but only if there is Internet connectivity! if (iNetAddresses != null) { Iterator<InetAddress> it = iNetAddresses.iterator(); if (it.hasNext()) { InetAddress address = it.next(); if (address.getHostAddress() != null) { myIp = address.getHostAddress(); } } } } catch (IOException e) { throw new RuntimeException(e); } if (config.getString(QNodeProperties.HOST) != null && config.getString(QNodeProperties.HOST).equals("localhost")) { config.setProperty(QNodeProperties.HOST, myIp); } if (config.getString(DNodeProperties.HOST) != null && config.getString(DNodeProperties.HOST).equals("localhost")) { config.setProperty(DNodeProperties.HOST, myIp); } return properties; }
@Test public void testUniqueIdGenerator() throws HazelcastConfigBuilderException { try { SploutConfiguration config = SploutConfiguration.getTestConfig(); HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config)); CoordinationStructures cS = new CoordinationStructures(hz); for (int i = 0; i < 1000; i++) { long version1 = cS.uniqueVersionId(); long version2 = cS.uniqueVersionId(); assertTrue(version2 > version1); } } finally { Hazelcast.shutdownAll(); } }
/** Gets only the default values and adds some desirable properties for testing, */ public static SploutConfiguration getTestConfig() { SploutConfiguration properties = new SploutConfiguration(); PropertiesConfiguration config = load("", SPLOUT_PROPERTIES + ".default", true); properties.addConfiguration(config); // Activate replica balancing for tests properties.setProperty(QNodeProperties.REPLICA_BALANCE_ENABLE, true); // Disable wait for testing speedup. properties.setProperty(HazelcastProperties.DISABLE_WAIT_WHEN_JOINING, true); // Disable warming up - set it to only one second // that's enough since Hazelcast joining is by far slower properties.setProperty(QNodeProperties.WARMING_TIME, 1); // Disable HZ state storage properties.clearProperty(HazelcastProperties.HZ_PERSISTENCE_FOLDER); return properties; }
public Fetcher(SploutConfiguration config) { tempDir = new File(config.getString(FetcherProperties.TEMP_DIR)); accessKey = config.getString(FetcherProperties.S3_ACCESS_KEY, null); secretKey = config.getString(FetcherProperties.S3_SECRET_KEY, null); downloadBufferSize = config.getInt(FetcherProperties.DOWNLOAD_BUFFER); bytesPerSecThrottle = config.getInt(FetcherProperties.BYTES_PER_SEC_THROTTLE); bytesToReportProgress = config.getLong(FetcherProperties.BYTES_TO_REPORT_PROGRESS); String fsName = config.getString(FetcherProperties.HADOOP_FS_NAME); hadoopConf = new Configuration(); if (fsName != null) { hadoopConf.set("fs.default.name", fsName); } log.info("Created " + Fetcher.class + " with tempDir = " + tempDir); if (bytesPerSecThrottle > 0) { log.info("Throttling at: " + bytesPerSecThrottle + " bytes per sec."); } else { log.warn( "No throttling. Fetched data will be transferred at full speed. This may affect query servicing."); } }
@Test public void testInitVersionListAndVersionChange() throws Throwable { final QNodeHandler handler = new QNodeHandler(); SploutConfiguration config = SploutConfiguration.getTestConfig(); try { HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config)); CoordinationStructures coord = new CoordinationStructures(hz); handler.init(config); Map<String, Long> versionsBeingServed = new HashMap<String, Long>(); versionsBeingServed.put("t1", 0l); coord .getVersionsBeingServed() .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed); versionsBeingServed.put("t2", 1l); coord .getVersionsBeingServed() .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed); new TestUtils.NotWaitingForeverCondition() { @Override public boolean endCondition() { return handler.getContext().getCurrentVersionsMap().get("t1") != null && handler.getContext().getCurrentVersionsMap().get("t1") == 0l && handler.getContext().getCurrentVersionsMap().get("t2") != null && handler.getContext().getCurrentVersionsMap().get("t2") == 1l; } }.waitAtMost(5000); versionsBeingServed.put("t2", 0l); versionsBeingServed.put("t1", 1l); coord .getVersionsBeingServed() .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed); new TestUtils.NotWaitingForeverCondition() { @Override public boolean endCondition() { return handler.getContext().getCurrentVersionsMap().get("t1") != null && handler.getContext().getCurrentVersionsMap().get("t1") == 1l && handler.getContext().getCurrentVersionsMap().get("t2") != null && handler.getContext().getCurrentVersionsMap().get("t2") == 0l; } }.waitAtMost(5000); versionsBeingServed.put("t2", 1l); versionsBeingServed.put("t1", 0l); coord .getVersionsBeingServed() .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed); Thread.sleep(100); Assert.assertEquals(0l, (long) handler.getContext().getCurrentVersionsMap().get("t1")); Assert.assertEquals(1l, (long) handler.getContext().getCurrentVersionsMap().get("t2")); } finally { handler.close(); Hazelcast.shutdownAll(); } }
@Test public void testDeployFiring() throws Throwable { // Test the business logic that produces the firing of the deployment (not the continuation of // it) For that, we will // use dummy DNodeHandlers QNodeHandler handler = new QNodeHandler(); SploutConfiguration config = SploutConfiguration.getTestConfig(); DNode dnode = TestUtils.getTestDNode( config, new IDNodeHandler() { @Override public void init(SploutConfiguration config) throws Exception {} @Override public String sqlQuery(String tablespace, long version, int partition, String query) throws DNodeException { return null; } @Override public String deploy(List<DeployAction> deployActions, long version) throws DNodeException { Assert.assertEquals(1, deployActions.size()); Assert.assertEquals("hdfs://foo/bar/0.db", deployActions.get(0).getDataURI()); Assert.assertEquals("partition1", deployActions.get(0).getTablespace()); Assert.assertTrue(version >= 0); // TODO Is this the right checking here? return "FOO"; } @Override public String rollback(List<RollbackAction> rollbackActions, String ignoreMe) throws DNodeException { return null; } @Override public String status() throws DNodeException { return null; } @Override public void stop() throws Exception {} @Override public void giveGreenLigth() {} @Override public String abortDeploy(long version) throws DNodeException { return null; } @Override public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions) throws DNodeException { return null; } @Override public String testCommand(String command) throws DNodeException { // TODO Auto-generated method stub return null; } }, "dnode-" + this.getClass().getName() + "-5"); try { handler.init(config); ReplicationEntry repEntry = new ReplicationEntry(0, dnode.getAddress()); DeployRequest deployRequest = new DeployRequest(); deployRequest.setTablespace("partition1"); deployRequest.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest.setReplicationMap(Arrays.asList(repEntry)); deployRequest.setData_uri("hdfs://foo/bar"); List<DeployRequest> l = new ArrayList<DeployRequest>(); l.add(deployRequest); handler.deploy(l); } finally { handler.close(); dnode.stop(); Hazelcast.shutdownAll(); } }
@Test public void testMultiDeployFiring() throws Throwable { // Same as test deploy firing, but with more than one DNode and different deploy actions SploutConfiguration config1 = SploutConfiguration.getTestConfig(); DNode dnode1 = TestUtils.getTestDNode( config1, new IDNodeHandler() { @Override public void init(SploutConfiguration config) throws Exception {} @Override public String sqlQuery(String tablespace, long version, int partition, String query) throws DNodeException { return null; } @Override public String deploy(List<DeployAction> deployActions, long distributedBarrier) throws DNodeException { /* * DNode1 asserts */ Assert.assertEquals(2, deployActions.size()); Assert.assertEquals("hdfs://foo/bar1/0.db", deployActions.get(0).getDataURI()); Assert.assertEquals("hdfs://foo/bar2/0.db", deployActions.get(1).getDataURI()); Assert.assertEquals("partition1", deployActions.get(0).getTablespace()); Assert.assertEquals("partition2", deployActions.get(1).getTablespace()); return "FOO"; } @Override public String rollback(List<RollbackAction> rollbackActions, String ignoreMe) throws DNodeException { return null; } @Override public String status() throws DNodeException { return null; } @Override public void stop() throws Exception {} @Override public void giveGreenLigth() {} @Override public String abortDeploy(long version) throws DNodeException { return null; } @Override public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions) throws DNodeException { return null; } @Override public String testCommand(String command) throws DNodeException { // TODO Auto-generated method stub return null; } }, "dnode-" + this.getClass().getName() + "-3"); SploutConfiguration config2 = SploutConfiguration.getTestConfig(); DNode dnode2 = TestUtils.getTestDNode( config2, new IDNodeHandler() { @Override public void init(SploutConfiguration config) throws Exception {} @Override public String sqlQuery(String tablespace, long version, int partition, String query) throws DNodeException { return null; } @Override public String deploy(List<DeployAction> deployActions, long distributedBarrier) throws DNodeException { /* * DNode2 asserts */ Assert.assertEquals(1, deployActions.size()); Assert.assertEquals("hdfs://foo/bar1/0.db", deployActions.get(0).getDataURI()); Assert.assertEquals("partition1", deployActions.get(0).getTablespace()); return "FOO"; } @Override public String rollback(List<RollbackAction> rollbackActions, String ignoreMe) throws DNodeException { return null; } @Override public String status() throws DNodeException { return null; } @Override public void stop() throws Exception {} @Override public void giveGreenLigth() {} @Override public String abortDeploy(long version) throws DNodeException { return null; } @Override public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions) throws DNodeException { return null; } @Override public String testCommand(String command) throws DNodeException { // TODO Auto-generated method stub return null; } }, "dnode-" + this.getClass().getName() + "-4"); QNodeHandler handler = new QNodeHandler(); try { handler.init(config1); ReplicationEntry repEntry1 = new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress()); ReplicationEntry repEntry2 = new ReplicationEntry(0, dnode1.getAddress()); DeployRequest deployRequest1 = new DeployRequest(); deployRequest1.setTablespace("partition1"); deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest1.setReplicationMap(Arrays.asList(repEntry1)); deployRequest1.setData_uri("hdfs://foo/bar1"); DeployRequest deployRequest2 = new DeployRequest(); deployRequest2.setTablespace("partition2"); deployRequest2.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries()); deployRequest2.setReplicationMap(Arrays.asList(repEntry2)); deployRequest2.setData_uri("hdfs://foo/bar2"); List<DeployRequest> l = new ArrayList<DeployRequest>(); l.add(deployRequest1); l.add(deployRequest2); handler.deploy(l); } finally { handler.close(); dnode1.stop(); dnode2.stop(); Hazelcast.shutdownAll(); } }