@Test
  public void testQuery() throws Throwable {
    QNodeHandler handler = new QNodeHandler();
    handler.init(SploutConfiguration.getTestConfig());

    SploutConfiguration config = SploutConfiguration.getTestConfig();
    DNode dnode =
        TestUtils.getTestDNode(config, dHandler, "dnode-" + this.getClass().getName() + "-2");
    try {
      ReplicationEntry repEntry = new ReplicationEntry(0, dnode.getAddress());
      Tablespace tablespace1 =
          new Tablespace(
              PartitionMap.oneShardOpenedMap(),
              new ReplicationMap(Arrays.asList(repEntry)),
              0l,
              0l);
      handler
          .getContext()
          .getTablespaceVersionsMap()
          .put(new TablespaceVersion("tablespace1", 0l), tablespace1);
      handler.getContext().getCurrentVersionsMap().put("tablespace1", 0l);

      // Query key 2 (> 1 < 10)
      QueryStatus qStatus = handler.query("tablespace1", "2", "SELECT 1;", null);
      Assert.assertEquals(new Integer(0), qStatus.getShard());
      Assert.assertEquals("[1]", qStatus.getResult().toString());
    } finally {
      handler.close();
      dnode.stop();
      Hazelcast.shutdownAll();
    }
  }
  @Test
  public void testInitDNodeList() throws Throwable {
    SploutConfiguration config = SploutConfiguration.getTestConfig();
    QNodeHandler handler = new QNodeHandler();
    try {
      HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config));
      CoordinationStructures coord = new CoordinationStructures(hz);

      SploutConfiguration dNodeConfig = SploutConfiguration.getTestConfig();
      dNodeConfig.setProperty(DNodeProperties.PORT, 1000);

      coord.getDNodes().put("/localhost:1000", new DNodeInfo(dNodeConfig));

      dNodeConfig = SploutConfiguration.getTestConfig();
      dNodeConfig.setProperty(DNodeProperties.PORT, 1001);

      coord.getDNodes().put("/localhost:1001", new DNodeInfo(dNodeConfig));

      try {
        handler.init(config);
      } catch (Exception e) {
        // since the handler will try to connect to "localhost:1000" we skip the Exception and
        // continue
        // the things we want to assert should be present anyway.
      }
      Assert.assertEquals(
          handler.getContext().getCoordinationStructures().getDNodes().values().size(), 2);
    } finally {
      handler.close();
      Hazelcast.shutdownAll();
    }
  }
  @Test
  public void testDeployEnding() throws Throwable {
    // Test what happens when DNodes complete the deploy process
    final QNodeHandler handler = new QNodeHandler();
    SploutConfiguration config = SploutConfiguration.getTestConfig();
    DNodeHandler dHandler = new DNodeHandler();
    DNode dnode =
        TestUtils.getTestDNode(config, dHandler, "dnode-" + this.getClass().getName() + "-1");

    try {
      handler.init(config);

      DeployRequest deployRequest1 = new DeployRequest();
      deployRequest1.setTablespace("partition1");
      deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries());
      deployRequest1.setReplicationMap(
          ReplicationMap.oneToOneMap(dnode.getAddress()).getReplicationEntries());

      File fakeDeployFolder = new File(FAKE_DEPLOY_FOLDER);
      fakeDeployFolder.mkdir();
      File deployData = new File(fakeDeployFolder, "0.db");
      deployData.createNewFile();
      deployRequest1.setData_uri(fakeDeployFolder.toURI().toString());

      List<DeployRequest> l = new ArrayList<DeployRequest>();
      l.add(deployRequest1);

      handler.deploy(l);

      new TestUtils.NotWaitingForeverCondition() {
        @Override
        public boolean endCondition() {
          boolean cond1 = handler.getContext().getTablespaceVersionsMap().values().size() == 1;
          boolean cond2 = handler.getContext().getCurrentVersionsMap().get("partition1") != null;
          return cond1 && cond2;
        }
      }.waitAtMost(5000);

      assertEquals(
          (long)
              handler
                  .getContext()
                  .getTablespaceVersionsMap()
                  .keySet()
                  .iterator()
                  .next()
                  .getVersion(),
          (long) handler.getContext().getCurrentVersionsMap().values().iterator().next());
      // everything OK
    } finally {
      handler.close();
      dnode.stop();
      Hazelcast.shutdownAll();
    }
  }
    public void handle(HttpExchange t) throws IOException {

      Hazelcast.shutdownAll();

      String response = "Shutting down";
      t.sendResponseHeaders(200, response.length());
      OutputStream os = t.getResponseBody();
      os.write(response.getBytes());
      os.close();

      System.exit(-1);
    }
  @Before
  public void setUp() {
    Hazelcast.shutdownAll();

    instanceFactory = createHazelcastInstanceFactory(10000);
    config = new Config();
    config.getGroupConfig().setName(generateRandomString(10));
    MapConfig mapConfig = new MapConfig("map");
    // mapConfig.setBackupCount(0);
    config.addMapConfig(mapConfig);
    hz = createHazelcastInstance();

    for (int i = 0; i < INITIAL_MEMBER_COUNT; i++) {
      queue.add(createHazelcastInstance());
    }
  }
  @Test
  public void testUniqueIdGenerator() throws HazelcastConfigBuilderException {
    try {
      SploutConfiguration config = SploutConfiguration.getTestConfig();
      HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config));
      CoordinationStructures cS = new CoordinationStructures(hz);

      for (int i = 0; i < 1000; i++) {
        long version1 = cS.uniqueVersionId();
        long version2 = cS.uniqueVersionId();
        assertTrue(version2 > version1);
      }

    } finally {
      Hazelcast.shutdownAll();
    }
  }
 @After
 public void teardown() {
   Hazelcast.shutdownAll();
 }
 @BeforeClass
 public static void init() throws Exception {
   System.setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "1");
   System.setProperty(GroupProperties.PROP_VERSION_CHECK_ENABLED, "false");
   Hazelcast.shutdownAll();
 }
 @After
 @Before
 public void cleanup() throws Exception {
   Hazelcast.shutdownAll();
   HazelcastClient.shutdownAll();
 }
Exemple #10
0
 @BeforeClass
 public static void init() throws Exception {
   Hazelcast.shutdownAll();
 }
 @BeforeClass
 @AfterClass
 public static void start() {
   Hazelcast.shutdownAll();
 }
 @After
 public final void cleanup() {
   Hazelcast.shutdownAll();
 }
  @Test
  public void testDeployFiring() throws Throwable {
    // Test the business logic that produces the firing of the deployment (not the continuation of
    // it) For that, we will
    // use dummy DNodeHandlers
    QNodeHandler handler = new QNodeHandler();
    SploutConfiguration config = SploutConfiguration.getTestConfig();

    DNode dnode =
        TestUtils.getTestDNode(
            config,
            new IDNodeHandler() {
              @Override
              public void init(SploutConfiguration config) throws Exception {}

              @Override
              public String sqlQuery(String tablespace, long version, int partition, String query)
                  throws DNodeException {
                return null;
              }

              @Override
              public String deploy(List<DeployAction> deployActions, long version)
                  throws DNodeException {
                Assert.assertEquals(1, deployActions.size());
                Assert.assertEquals("hdfs://foo/bar/0.db", deployActions.get(0).getDataURI());
                Assert.assertEquals("partition1", deployActions.get(0).getTablespace());
                Assert.assertTrue(version >= 0); // TODO Is this the right checking here?
                return "FOO";
              }

              @Override
              public String rollback(List<RollbackAction> rollbackActions, String ignoreMe)
                  throws DNodeException {
                return null;
              }

              @Override
              public String status() throws DNodeException {
                return null;
              }

              @Override
              public void stop() throws Exception {}

              @Override
              public void giveGreenLigth() {}

              @Override
              public String abortDeploy(long version) throws DNodeException {
                return null;
              }

              @Override
              public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions)
                  throws DNodeException {
                return null;
              }

              @Override
              public String testCommand(String command) throws DNodeException {
                // TODO Auto-generated method stub
                return null;
              }
            },
            "dnode-" + this.getClass().getName() + "-5");

    try {
      handler.init(config);

      ReplicationEntry repEntry = new ReplicationEntry(0, dnode.getAddress());

      DeployRequest deployRequest = new DeployRequest();
      deployRequest.setTablespace("partition1");
      deployRequest.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries());
      deployRequest.setReplicationMap(Arrays.asList(repEntry));
      deployRequest.setData_uri("hdfs://foo/bar");

      List<DeployRequest> l = new ArrayList<DeployRequest>();
      l.add(deployRequest);
      handler.deploy(l);
    } finally {
      handler.close();
      dnode.stop();
      Hazelcast.shutdownAll();
    }
  }
  @Test
  public void testMultiDeployFiring() throws Throwable {
    // Same as test deploy firing, but with more than one DNode and different deploy actions
    SploutConfiguration config1 = SploutConfiguration.getTestConfig();

    DNode dnode1 =
        TestUtils.getTestDNode(
            config1,
            new IDNodeHandler() {
              @Override
              public void init(SploutConfiguration config) throws Exception {}

              @Override
              public String sqlQuery(String tablespace, long version, int partition, String query)
                  throws DNodeException {
                return null;
              }

              @Override
              public String deploy(List<DeployAction> deployActions, long distributedBarrier)
                  throws DNodeException {
                /*
                 * DNode1 asserts
                 */
                Assert.assertEquals(2, deployActions.size());
                Assert.assertEquals("hdfs://foo/bar1/0.db", deployActions.get(0).getDataURI());
                Assert.assertEquals("hdfs://foo/bar2/0.db", deployActions.get(1).getDataURI());
                Assert.assertEquals("partition1", deployActions.get(0).getTablespace());
                Assert.assertEquals("partition2", deployActions.get(1).getTablespace());
                return "FOO";
              }

              @Override
              public String rollback(List<RollbackAction> rollbackActions, String ignoreMe)
                  throws DNodeException {
                return null;
              }

              @Override
              public String status() throws DNodeException {
                return null;
              }

              @Override
              public void stop() throws Exception {}

              @Override
              public void giveGreenLigth() {}

              @Override
              public String abortDeploy(long version) throws DNodeException {
                return null;
              }

              @Override
              public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions)
                  throws DNodeException {
                return null;
              }

              @Override
              public String testCommand(String command) throws DNodeException {
                // TODO Auto-generated method stub
                return null;
              }
            },
            "dnode-" + this.getClass().getName() + "-3");

    SploutConfiguration config2 = SploutConfiguration.getTestConfig();
    DNode dnode2 =
        TestUtils.getTestDNode(
            config2,
            new IDNodeHandler() {
              @Override
              public void init(SploutConfiguration config) throws Exception {}

              @Override
              public String sqlQuery(String tablespace, long version, int partition, String query)
                  throws DNodeException {
                return null;
              }

              @Override
              public String deploy(List<DeployAction> deployActions, long distributedBarrier)
                  throws DNodeException {
                /*
                 * DNode2 asserts
                 */
                Assert.assertEquals(1, deployActions.size());
                Assert.assertEquals("hdfs://foo/bar1/0.db", deployActions.get(0).getDataURI());
                Assert.assertEquals("partition1", deployActions.get(0).getTablespace());
                return "FOO";
              }

              @Override
              public String rollback(List<RollbackAction> rollbackActions, String ignoreMe)
                  throws DNodeException {
                return null;
              }

              @Override
              public String status() throws DNodeException {
                return null;
              }

              @Override
              public void stop() throws Exception {}

              @Override
              public void giveGreenLigth() {}

              @Override
              public String abortDeploy(long version) throws DNodeException {
                return null;
              }

              @Override
              public String deleteOldVersions(List<com.splout.db.thrift.TablespaceVersion> versions)
                  throws DNodeException {
                return null;
              }

              @Override
              public String testCommand(String command) throws DNodeException {
                // TODO Auto-generated method stub
                return null;
              }
            },
            "dnode-" + this.getClass().getName() + "-4");

    QNodeHandler handler = new QNodeHandler();
    try {
      handler.init(config1);
      ReplicationEntry repEntry1 =
          new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress());
      ReplicationEntry repEntry2 = new ReplicationEntry(0, dnode1.getAddress());

      DeployRequest deployRequest1 = new DeployRequest();
      deployRequest1.setTablespace("partition1");
      deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries());
      deployRequest1.setReplicationMap(Arrays.asList(repEntry1));
      deployRequest1.setData_uri("hdfs://foo/bar1");

      DeployRequest deployRequest2 = new DeployRequest();
      deployRequest2.setTablespace("partition2");
      deployRequest2.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries());
      deployRequest2.setReplicationMap(Arrays.asList(repEntry2));
      deployRequest2.setData_uri("hdfs://foo/bar2");

      List<DeployRequest> l = new ArrayList<DeployRequest>();
      l.add(deployRequest1);
      l.add(deployRequest2);

      handler.deploy(l);
    } finally {
      handler.close();
      dnode1.stop();
      dnode2.stop();
      Hazelcast.shutdownAll();
    }
  }
Exemple #15
0
  @Override
  public void start() throws Exception {
    System.out.println("Elapsed time: " + sw); // if(true) return;
    EventBus eb = vertx.eventBus();

    //    eb.addInterceptor(sc->{
    //      System.out.println("----- "+sc.message());
    //      sc.next();
    //    });

    // Send a message every second

    AtomicInteger count = new AtomicInteger(0);
    // vertx.setPeriodic(1000, v -> { sendPing(eb, count); });
    // sendPing(eb, count);
    for (int i = 1; i < 5; ++i) {
      vertx.setTimer(
          1000 * i,
          v -> {
            sendPing(eb, count);
          });
    }

    //    vertx.setTimer(1000*6, v -> { throw new RuntimeException("test"); });

    if (!true) {
      // This works!!
      vertx.setTimer(
          1000 * 7,
          v -> {
            System.out.println("Calling vertx.close()");
            log.info("Calling vertx.close()");
            vertx.close(
                c -> {
                  System.out.println("Callback to vertx.close() " + c);
                  System.err.println("Callback to vertx.close() " + c);
                  log.info("Callback to vertx.close() " + c);
                });
            try {
              System.out.println("Sleeping");
              Thread.sleep(2000);
              System.out.println("Waking");
            } catch (Exception e) {
              e.printStackTrace();
            }
          });
    } else {
      Thread thread =
          new Thread(
              () -> {
                System.out.println("calling vertx.close()");
                if (false)
                  vertx.close(
                      c -> {
                        // this executes only if I hit Ctrl-C (perhaps because Vertx already has a
                        // shutdownHook!)
                        log.info("callback to vertx.close() " + c);
                        System.out.println("vertx closed");
                        // Runner.mgr.leave(r->{});
                        Hazelcast.getAllHazelcastInstances()
                            .forEach(
                                h -> {
                                  System.err.println("calling h.close() " + h);
                                  h.getLifecycleService().shutdown();
                                });
                        System.err.println("calling Hazelcast.shutdownAll() ");
                        Hazelcast.shutdownAll();
                      });

                try {
                  int sec = 0;
                  log.info(
                      "Sleeping {} seconds to allow Vertx's shutdown hook to finish completely...",
                      sec);
                  Thread.sleep(sec * 1000);
                  System.out.println("Waking");
                } catch (Exception e) {
                  e.printStackTrace();
                }
              },
              "vertx-shutdown-hook");
      // thread.setDaemon(false);
      Runtime.getRuntime().addShutdownHook(thread);
    }

    vertx.setTimer(
        1000 * 7,
        v -> {
          System.exit(0);
        });
  }
 @After
 public void reset() {
   HazelcastClient.shutdownAll();
   Hazelcast.shutdownAll();
 }
 @BeforeClass
 @AfterClass
 public static void tearUpAndDown() {
   Hazelcast.shutdownAll();
 }
  @Test
  public void testInitVersionListAndVersionChange() throws Throwable {
    final QNodeHandler handler = new QNodeHandler();
    SploutConfiguration config = SploutConfiguration.getTestConfig();
    try {

      HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config));
      CoordinationStructures coord = new CoordinationStructures(hz);

      handler.init(config);

      Map<String, Long> versionsBeingServed = new HashMap<String, Long>();
      versionsBeingServed.put("t1", 0l);
      coord
          .getVersionsBeingServed()
          .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed);

      versionsBeingServed.put("t2", 1l);
      coord
          .getVersionsBeingServed()
          .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed);

      new TestUtils.NotWaitingForeverCondition() {

        @Override
        public boolean endCondition() {
          return handler.getContext().getCurrentVersionsMap().get("t1") != null
              && handler.getContext().getCurrentVersionsMap().get("t1") == 0l
              && handler.getContext().getCurrentVersionsMap().get("t2") != null
              && handler.getContext().getCurrentVersionsMap().get("t2") == 1l;
        }
      }.waitAtMost(5000);

      versionsBeingServed.put("t2", 0l);
      versionsBeingServed.put("t1", 1l);
      coord
          .getVersionsBeingServed()
          .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed);

      new TestUtils.NotWaitingForeverCondition() {

        @Override
        public boolean endCondition() {
          return handler.getContext().getCurrentVersionsMap().get("t1") != null
              && handler.getContext().getCurrentVersionsMap().get("t1") == 1l
              && handler.getContext().getCurrentVersionsMap().get("t2") != null
              && handler.getContext().getCurrentVersionsMap().get("t2") == 0l;
        }
      }.waitAtMost(5000);

      versionsBeingServed.put("t2", 1l);
      versionsBeingServed.put("t1", 0l);
      coord
          .getVersionsBeingServed()
          .put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed);

      Thread.sleep(100);

      Assert.assertEquals(0l, (long) handler.getContext().getCurrentVersionsMap().get("t1"));
      Assert.assertEquals(1l, (long) handler.getContext().getCurrentVersionsMap().get("t2"));
    } finally {
      handler.close();
      Hazelcast.shutdownAll();
    }
  }
 @PreDestroy
 public void destroy() {
   log.info("Closing Cache Manager");
   Hazelcast.shutdownAll();
 }
 @AfterClass
 public static void destroy() {
   hz.getLifecycleService().shutdown();
   Hazelcast.shutdownAll();
 }
 public boolean shutdownAll() {
   LOG.info("Shutting down the cluster.");
   Hazelcast.shutdownAll();
   return true;
 }
 public void tearDown() {
   Hazelcast.shutdownAll();
 }
 @After
 public void cleanup() {
   HazelcastClient.shutdownAll();
   Hazelcast.shutdownAll();
 }
 @BeforeClass
 public static void beforeClass() {
   Hazelcast.shutdownAll();
   serverInstance = TestUtils.newServerInstance();
   clientInstance = TestUtils.newClientInstance();
 }
Exemple #25
0
 @After
 public void cleanup() throws Exception {
   Hazelcast.shutdownAll();
 }
 @AfterClass
 public static void afterClass() {
   Hazelcast.shutdownAll();
 }
 @AfterClass
 public static void destroy() {
   hz.shutdown();
   Hazelcast.shutdownAll();
 }
 @AfterClass
 public static void tearDown() {
   HazelcastClient.shutdownAll();
   Hazelcast.shutdownAll();
 }