@Test public void testCountDown() throws IOException { ICountDownLatch countDownLatch = getCountDownLatch(); countDownLatch.trySetCount(1); final SimpleClient client = getClient(); client.send(new CountDownRequest(name)); client.receive(); assertEquals(0, countDownLatch.getCount()); }
@Test public void testHazelcastInstances() { assertNotNull(map1); assertNotNull(map2); assertNotNull(multiMap); assertNotNull(replicatedMap); assertNotNull(queue); assertNotNull(topic); assertNotNull(set); assertNotNull(list); assertNotNull(executorService); assertNotNull(idGenerator); assertNotNull(atomicLong); assertNotNull(atomicReference); assertNotNull(countDownLatch); assertNotNull(semaphore); assertNotNull(lock); assertEquals("map1", map1.getName()); assertEquals("map2", map2.getName()); assertEquals("testMultimap", multiMap.getName()); assertEquals("replicatedMap", replicatedMap.getName()); assertEquals("testQ", queue.getName()); assertEquals("testTopic", topic.getName()); assertEquals("set", set.getName()); assertEquals("list", list.getName()); assertEquals("idGenerator", idGenerator.getName()); assertEquals("atomicLong", atomicLong.getName()); assertEquals("atomicReference", atomicReference.getName()); assertEquals("countDownLatch", countDownLatch.getName()); assertEquals("semaphore", semaphore.getName()); }
/** * Call this method for starting an asynchronous deployment given a proper deploy request - proxy * method for {@link QNodeHandler}. Returns a {@link QueryStatus} with the status of the request. */ public DeployInfo deploy(List<DeployRequest> deployRequests) { // A new unique version number is generated. long version = context.getCoordinationStructures().uniqueVersionId(); // Generate the list of actions per DNode Map<String, List<DeployAction>> actionsPerDNode = generateDeployActionsPerDNode(deployRequests, version); // Starting the countdown latch. ICountDownLatch countDownLatchForDeploy = context.getCoordinationStructures().getCountDownLatchForDeploy(version); Set<String> dnodesInvolved = actionsPerDNode.keySet(); countDownLatchForDeploy.setCount(dnodesInvolved.size()); // Sending deploy signals to each DNode for (Map.Entry<String, List<DeployAction>> actionPerDNode : actionsPerDNode.entrySet()) { DNodeService.Client client = null; try { client = context.getDNodeClient(actionPerDNode.getKey(), false); client.deploy(actionPerDNode.getValue(), version); } catch (Exception e) { log.error("Error sending deploy actions to DNode [" + actionPerDNode.getKey() + "]", e); abortDeploy(new ArrayList<String>(actionsPerDNode.keySet()), version); DeployInfo errDeployInfo = new DeployInfo(); errDeployInfo.setError("Error connecting to DNode " + actionPerDNode.getKey()); return errDeployInfo; } finally { client.getOutputProtocol().getTransport().close(); } } // Initiating an asynchronous process to manage the deployment deployThread.execute( new ManageDeploy( new ArrayList(actionsPerDNode.keySet()), deployRequests, version, context.getConfig().getLong(QNodeProperties.DEPLOY_TIMEOUT, -1), context.getConfig().getLong(QNodeProperties.DEPLOY_SECONDS_TO_CHECK_ERROR))); DeployInfo deployInfo = new DeployInfo(); deployInfo.setVersion(version); deployInfo.setStartedAt(SimpleDateFormat.getInstance().format(new Date())); return deployInfo; }
@Override public void run() { log.info( context.getConfig().getProperty(QNodeProperties.PORT) + " Executing deploy for version [" + version + "]"); CoordinationStructures.DEPLOY_IN_PROGRESS.incrementAndGet(); try { long waitSeconds = 0; ICountDownLatch countDownLatchForDeploy = context.getCoordinationStructures().getCountDownLatchForDeploy(version); boolean finished; do { finished = countDownLatchForDeploy.await(secondsToCheckFailureOrTimeout, TimeUnit.SECONDS); waitSeconds += secondsToCheckFailureOrTimeout; if (!finished) { // If any of the DNodes failed, then we cancel the deployment. if (checkForFailure()) { explainErrors(); abortDeploy(dnodes, version); return; } // Let's see if we reached the timeout. // Negative timeoutSeconds => waits forever if (waitSeconds > timeoutSeconds && timeoutSeconds >= 0) { log.warn( "Deploy of version [" + version + "] timed out. Reached [" + waitSeconds + "] seconds."); abortDeploy(dnodes, version); return; } } } while (!finished); log.info( "All DNodes performed the deploy of version [" + version + "]. Publishing tablespaces..."); // We finish by publishing the versions table with the new versions. try { switchVersions(switchActions()); } catch (UnexistingVersion e) { throw new RuntimeException( "Unexisting version after deploying this version. Sounds like a bug.", e); } log.info("Deploy of version [" + version + "] Finished PROPERLY. :-)"); // After a deploy we must synchronize tablespace versions to see if we have to remove some. context.synchronizeTablespaceVersions(); CoordinationStructures.DEPLOY_IN_PROGRESS.decrementAndGet(); } catch (MemberLeftException e) { log.error("Error while deploying version [" + version + "]", e); abortDeploy(dnodes, version); } catch (InstanceDestroyedException e) { log.error("Error while deploying version [" + version + "]", e); abortDeploy(dnodes, version); } catch (InterruptedException e) { log.error("Error while deploying version [" + version + "]", e); abortDeploy(dnodes, version); } catch (Throwable t) { t.printStackTrace(); throw new RuntimeException(t); } }
public void timeStep() { ICountDownLatch latch = hzInstance.getCountDownLatch(name + random.nextInt()); latch.destroy(); }