/** * Stop this TachyonWorker. Stop all the threads belong to this TachyonWorker. * * @throws IOException * @throws InterruptedException */ public void stop() throws IOException, InterruptedException { mStop = true; mWorkerStorage.stop(); mDataServer.close(); mServer.stop(); mServerTNonblockingServerSocket.close(); mExecutorService.shutdown(); while (!mDataServer.isClosed() || mServer.isServing() || mHeartbeatThread.isAlive()) { // TODO The reason to stop and close again is due to some issues in Thrift. mServer.stop(); mServerTNonblockingServerSocket.close(); CommonUtils.sleepMs(null, 100); } mHeartbeatThread.join(); }
public synchronized void stop() { final TServer thriftServer = this.thriftServer; if (thriftServer != null) { this.service.stop(); thriftServer.stop(); final ThreadPoolExecutor connExecutor = this.thriftThreadPerConnExecutor; if (connExecutor != null) { connExecutor.shutdown(); } this.thriftExecutor.shutdown(); try { this.thriftMainThread.join(5000L); // force stop the executor if required if (this.thriftMainThread.isAlive()) { if (connExecutor != null) { connExecutor.shutdownNow(); } this.thriftExecutor.shutdownNow(); this.thriftMainThread.join(); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } }
public void run() { if (server != null) { server.stop(); } if (transport != null) { transport.interrupt(); transport.close(); } }
public void shutdown() { this.interrupt(); if (mMasterServiceServer != null) { mMasterServiceServer.stop(); } if (mServerTNonblockingServerSocket != null) { mServerTNonblockingServerSocket.close(); } }
@Override public void process(WatchedEvent event) { log.debug("event " + event.getPath() + " " + event.getType() + " " + event.getState()); if (event.getState() == KeeperState.Expired) { log.warn("Trace server lost zookeeper registration at " + event.getPath()); server.stop(); } else if (event.getType() == EventType.NodeDeleted) { log.warn("Trace server zookeeper entry lost " + event.getPath()); server.stop(); } if (event.getPath() != null) { try { if (ZooReaderWriter.getInstance().exists(event.getPath(), this)) return; } catch (Exception ex) { log.error(ex, ex); } log.warn("Trace server unable to reset watch on zookeeper registration"); server.stop(); } }
public void downServer() { server.stop(); try { serverThread.join(); } catch (InterruptedException e) { // we're probably shutting down LOG.debug("Interrupted waiting for server thread to exit.", e); } server = null; serverThread = null; }
// Existing connections will keep our thread running: reach in with reflection and insist that // they shutdown. public static void stopTServer(TServer s) { if (s == null) return; s.stop(); try { Field f = s.getClass().getDeclaredField("executorService_"); f.setAccessible(true); ExecutorService es = (ExecutorService) f.get(s); es.shutdownNow(); } catch (Exception e) { TServerUtils.log.error("Unable to call shutdownNow", e); } }
public void stop() { if (server != null && server.isServing()) { server.stop(); } servingExecutor.shutdown(); try { if (!servingExecutor.awaitTermination(5, TimeUnit.SECONDS)) { servingExecutor.shutdownNow(); } } catch (InterruptedException e) { throw new FlumeException("Interrupted while waiting for server to be " + "shutdown."); } sourceCounter.stop(); // Thrift will shutdown the executor passed to it. super.stop(); }
public void close() { server.stop(); }
@Test public void testIt() throws Exception { int server1Port = 12345; int server2Port = 12346; int server3Port = 12347; // launch server 1 final MockPartitionServerHandler iface1 = new MockPartitionServerHandler(VALUE_1); TNonblockingServerTransport transport1 = createPartitionServerTransport(server1Port); final TServer server1 = createPartitionServer(transport1, iface1); Thread thread1 = new Thread(new ServerRunnable(server1), "mock partition server thread 1"); thread1.start(); // launch server 2; final MockPartitionServerHandler iface2 = new MockPartitionServerHandler(VALUE_2); TNonblockingServerTransport transport2 = createPartitionServerTransport(server2Port); final TServer server2 = createPartitionServer(transport2, iface2); Thread thread2 = new Thread(new ServerRunnable(server2), "mock partition server thread 2"); thread2.start(); // launch server 3; final MockPartitionServerHandler iface3 = new MockPartitionServerHandler(VALUE_3); TNonblockingServerTransport transport3 = createPartitionServerTransport(server3Port); final TServer server3 = createPartitionServer(transport3, iface3); Thread thread3 = new Thread(new ServerRunnable(server3), "mock partition server thread 3"); thread3.start(); final MockDomain existentDomain = new MockDomain( "existent_domain", 0, 2, new MapPartitioner(KEY_1, 0, KEY_2, 1, KEY_NOT_FOUND, 0), null, null, null); final MockDomain newDomain = new MockDomain("new_domain", 1, 1, new MapPartitioner(KEY_3, 0), null, null, null); final Host host1 = getHost(existentDomain, new PartitionServerAddress("localhost", server1Port), 0); final Host host2 = getHost(existentDomain, new PartitionServerAddress("localhost", server2Port), 1); final Host host3 = getHost(newDomain, new PartitionServerAddress("localhost", server3Port), 0); final Set<Host> mockRingHosts = new HashSet<Host>(); mockRingHosts.add(host1); mockRingHosts.add(host2); final MockRing mockRing = new MockRing(null, null, 1) { @Override public Set<Host> getHosts() { return mockRingHosts; } }; MockDomainGroup mockDomainGroup = new MockDomainGroup("myDomainGroup") { @Override public Set<DomainAndVersion> getDomainVersions() { return new HashSet<DomainAndVersion>( Arrays.asList(new DomainAndVersion(existentDomain, 1))); } }; final MockRingGroup mockRingGroup = new MockRingGroup(mockDomainGroup, "myRingGroup", null) { @Override public Set<Ring> getRings() { return Collections.singleton((Ring) mockRing); } }; Coordinator mockCoord = new MockCoordinator() { @Override public RingGroup getRingGroup(String ringGroupName) { return mockRingGroup; } @Override public Domain getDomain(String domainName) { if (domainName.equals("existent_domain")) { return existentDomain; } else if (domainName.equals("new_domain")) { return newDomain; } else { return null; } } }; WaitUntil.orDie( new Condition() { @Override public boolean test() { return server1.isServing() && server2.isServing() && server3.isServing(); } }); try { final HankSmartClient client = new HankSmartClient( mockCoord, "myRingGroup", new HankSmartClientOptions().setQueryTimeoutMs(1000)); final HankSmartClient cachingClient = new HankSmartClient( mockCoord, "myRingGroup", new HankSmartClientOptions() .setResponseCacheEnabled(true) .setResponseCacheNumItemsCapacity(1) .setResponseCacheNumBytesCapacity(-1) .setResponseCacheExpirationSeconds(1)); // Test invalid get assertEquals( HankResponse.xception(HankException.no_such_domain(true)), client.get("nonexistent_domain", null)); // Test get assertEquals(HankResponse.value(VALUE_1), client.get("existent_domain", KEY_1)); assertEquals(HankResponse.value(VALUE_2), client.get("existent_domain", KEY_2)); // Test invalid getBulk assertEquals( HankBulkResponse.xception(HankException.no_such_domain(true)), client.getBulk("nonexistent_domain", null)); // Test getBulk HankBulkResponse bulkResponse1 = HankBulkResponse.responses(new ArrayList<HankResponse>()); bulkResponse1.get_responses().add(HankResponse.value(VALUE_1)); bulkResponse1.get_responses().add(HankResponse.value(VALUE_2)); List<ByteBuffer> bulkRequest1 = new ArrayList<ByteBuffer>(); bulkRequest1.add(KEY_1); bulkRequest1.add(KEY_2); assertEquals(bulkResponse1, client.getBulk("existent_domain", bulkRequest1)); // Test get with null key try { client.get("existent_domain", null); fail("Should throw an exception."); } catch (NullKeyException e) { // Good } // Test get with empty key try { client.get("existent_domain", ByteBuffer.wrap(new byte[0])); fail("Should throw an exception."); } catch (EmptyKeyException e) { // Good } // Host is not available host1.setState(HostState.UPDATING); assertEquals( HankResponse.xception(HankException.no_connection_available(true)), client.get("existent_domain", KEY_1)); // Host is offline but it's the only one, use it opportunistically host2.setState(HostState.OFFLINE); assertEquals(HankResponse.value(VALUE_2), client.get("existent_domain", KEY_2)); host1.setState(HostState.SERVING); host2.setState(HostState.SERVING); // Test location changes // Add new host that has new domain mockRingHosts.add(host3); // Should not be able to query new domain assertTrue(client.get("new_domain", KEY_3).get_xception().is_set_no_replica()); // Notify client of data location change client.onDataLocationChange(mockCoord.getRingGroup("myRingGroup")); // Should be able to query new domain when the client has done updating its cache WaitUntil.orDie( new Condition() { @Override public boolean test() { return HankResponse.value(VALUE_3).equals(client.get("new_domain", KEY_3)); } }); assertEquals(HankResponse.value(VALUE_3), client.get("new_domain", KEY_3)); // TODO: Test not querying deletable partitions // Simulate servers that fail to perform gets iface1.setMode(MockPartitionServerHandler.Mode.FAILING); iface2.setMode(MockPartitionServerHandler.Mode.FAILING); assertTrue(client.get("existent_domain", KEY_1).get_xception().get_failed_retries() > 0); assertTrue(client.get("existent_domain", KEY_2).get_xception().get_failed_retries() > 0); // Simulate servers that throws an error iface1.setMode(MockPartitionServerHandler.Mode.THROWING_ERROR); iface2.setMode(MockPartitionServerHandler.Mode.THROWING_ERROR); assertTrue(client.get("existent_domain", KEY_1).get_xception().get_failed_retries() > 0); assertTrue(client.get("existent_domain", KEY_2).get_xception().get_failed_retries() > 0); // Simulate servers that hangs iface1.setMode(MockPartitionServerHandler.Mode.HANGING); iface2.setMode(MockPartitionServerHandler.Mode.HANGING); assertTrue(client.get("existent_domain", KEY_1).get_xception().get_failed_retries() > 0); assertTrue(client.get("existent_domain", KEY_2).get_xception().get_failed_retries() > 0); // Test caching iface1.setMode(MockPartitionServerHandler.Mode.NORMAL); iface2.setMode(MockPartitionServerHandler.Mode.NORMAL); iface3.setMode(MockPartitionServerHandler.Mode.NORMAL); iface1.clearNumRequests(); // One request assertEquals(HankResponse.value(VALUE_1), cachingClient.get("existent_domain", KEY_1)); assertEquals(1, iface1.getNumRequests()); // One cached request assertEquals(HankResponse.value(VALUE_1), cachingClient.get("existent_domain", KEY_1)); assertEquals(1, iface1.getNumRequests()); iface1.clearNumRequests(); // One not found request assertEquals( HankResponse.not_found(true), cachingClient.get("existent_domain", KEY_NOT_FOUND)); assertEquals(1, iface1.getNumRequests()); // One not found cached request assertEquals( HankResponse.not_found(true), cachingClient.get("existent_domain", KEY_NOT_FOUND)); assertEquals(1, iface1.getNumRequests()); // Wait for cache to expire Thread.sleep(2000); // Should not be in cache anymore assertEquals( HankResponse.not_found(true), cachingClient.get("existent_domain", KEY_NOT_FOUND)); assertEquals(2, iface1.getNumRequests()); } finally { server1.stop(); server2.stop(); thread1.join(); thread2.join(); transport1.close(); transport2.close(); } }