/** * Opens the connection to the worker. And start the heartbeat thread. * * @throws IOException if a non-Tachyon exception occurs */ private synchronized void connectOperation() throws IOException { if (!mConnected) { LOG.info("Connecting to {} worker @ {}", (mIsLocal ? "local" : "remote"), mAddress); TProtocol binaryProtocol = new TBinaryProtocol(AuthenticationUtils.getClientTransport(mTachyonConf, mAddress)); mProtocol = new TMultiplexedProtocol(binaryProtocol, getServiceName()); mClient = new BlockWorkerClientService.Client(mProtocol); try { mProtocol.getTransport().open(); } catch (TTransportException e) { LOG.error(e.getMessage(), e); return; } mConnected = true; // only start the heartbeat thread if the connection is successful and if there is not // another heartbeat thread running if (mHeartbeat == null || mHeartbeat.isCancelled() || mHeartbeat.isDone()) { final int interval = mTachyonConf.getInt(Constants.USER_HEARTBEAT_INTERVAL_MS); mHeartbeat = mExecutorService.submit( new HeartbeatThread(HeartbeatContext.WORKER_CLIENT, mHeartbeatExecutor, interval)); } } }
public String getSentimentScore( String mainText, String title, String middleParas, String lastPara, int diffBlog) { // "diffBlog" parameter can be set with any integer (added as a dummy parameter to support // method overloading) try { SentiRequestObject obj = new SentiRequestObject(); obj.setMainText(mainText); obj.setTextType("blogs_news"); obj.setTitle(title); obj.setMiddleParas(middleParas); obj.setLastPara(lastPara); int senti = client.getSentimentScore(obj); return ("" + senti); } catch (TTransportException e) { e.printStackTrace(); transport.close(); } catch (TException e) { e.printStackTrace(); transport.close(); } transport.close(); return "Connection to " + this.ip + ":" + this.port + " failed!"; }
public static MasterClientService.Client getConnection(Instance instance) { List<String> locations = instance.getMasterLocations(); if (locations.size() == 0) { log.debug("No masters..."); return null; } String master = locations.get(0); if (master.endsWith(":0")) return null; try { // Master requests can take a long time: don't ever time out MasterClientService.Client client = ThriftUtil.getClientNoTimeout( new MasterClientService.Client.Factory(), master, ServerConfigurationUtil.getConfiguration(instance)); return client; } catch (TTransportException tte) { if (tte.getCause().getClass().equals(UnknownHostException.class)) { // do not expect to recover from this throw new RuntimeException(tte); } log.debug("Failed to connect to master=" + master + ", will retry... ", tte); return null; } }
public static void runServer(TProcessor processor, int port, int threads) { TNonblockingServerTransport serverTransport; TServer server; try { serverTransport = new TNonblockingServerSocket(port); if (threads == 1) { server = new TNonblockingServer(processor, serverTransport); } else { THsHaServer.Options serverOptions = new THsHaServer.Options(); serverOptions.workerThreads = threads; server = new THsHaServer( new TProcessorFactory(processor), serverTransport, new TFramedTransport.Factory(), new TBinaryProtocol.Factory(), serverOptions); } Runtime.getRuntime() .addShutdownHook( new Thread( new ShutdownListener(server, serverTransport), "Server Shutdown Listener")); logger.info("Starting the server on port {} with {} threads", port, threads); server.serve(); } catch (TTransportException e) { logger.error("Thrift Transport error"); logger.error(e.toString()); System.exit(1); } }
/** * Helper method to create a {@link org.apache.thrift.transport.TServerSocket} for the RPC server * * @return a thrift server socket */ private TServerSocket createThriftServerSocket() { try { return new TServerSocket( NetworkAddressUtils.getBindAddress(ServiceType.WORKER_RPC, mTachyonConf)); } catch (TTransportException tte) { LOG.error(tte.getMessage(), tte); throw Throwables.propagate(tte); } }
@BeforeClass public static void initClass() { try { client = new THttpClient("http://localhost:8080/userService"); } catch (TTransportException e) { e.printStackTrace(); } TProtocol protocol = new TBinaryProtocol(client); serviceClient = new UserService.Client(protocol); }
/** * Open the connection to the worker. And start the heartbeat thread. * * @return true if succeed, false otherwise * @throws IOException */ private synchronized boolean connect() throws IOException { if (!mConnected) { NetAddress workerNetAddress = null; try { String localHostName = NetworkAddressUtils.getLocalHostName(mTachyonConf); LOG.info("Trying to get local worker host : " + localHostName); workerNetAddress = mMasterClient.user_getWorker(false, localHostName); mIsLocal = workerNetAddress .getMHost() .equals(InetAddress.getByName(localHostName).getHostAddress()); } catch (NoWorkerException e) { LOG.info(e.getMessage()); workerNetAddress = null; } catch (UnknownHostException e) { LOG.info(e.getMessage()); workerNetAddress = null; } if (workerNetAddress == null) { try { workerNetAddress = mMasterClient.user_getWorker(true, ""); } catch (NoWorkerException e) { LOG.info("No worker running in the system: " + e.getMessage()); mClient = null; return false; } } String host = NetworkAddressUtils.getFqdnHost(workerNetAddress); int port = workerNetAddress.mPort; mWorkerAddress = new InetSocketAddress(host, port); mWorkerDataServerAddress = new InetSocketAddress(host, workerNetAddress.mSecondaryPort); LOG.info("Connecting " + (mIsLocal ? "local" : "remote") + " worker @ " + mWorkerAddress); mProtocol = new TBinaryProtocol(new TFramedTransport(new TSocket(host, port))); mClient = new WorkerService.Client(mProtocol); mHeartbeatExecutor = new WorkerClientHeartbeatExecutor(this, mMasterClient.getUserId()); String threadName = "worker-heartbeat-" + mWorkerAddress; int interval = mTachyonConf.getInt(Constants.USER_HEARTBEAT_INTERVAL_MS, Constants.SECOND_MS); mHeartbeat = mExecutorService.submit(new HeartbeatThread(threadName, mHeartbeatExecutor, interval)); try { mProtocol.getTransport().open(); } catch (TTransportException e) { LOG.error(e.getMessage(), e); return false; } mConnected = true; } return mConnected; }
public static void StartsimpleServer( AdditionService.Processor<AdditionServiceHandler> processor) { try { TServerTransport serverTransport = new TServerSocket(9090); TServer server = new TSimpleServer(new TServer.Args(serverTransport).processor(processor)); // log.info("Starting the simple server......."); System.out.println("Starting the simple server......"); server.serve(); } catch (TTransportException e) { e.printStackTrace(); } }
/** * @param masterAddress The TachyonMaster's address. * @param workerAddress This TachyonWorker's address. * @param dataPort This TachyonWorker's data server's port * @param selectorThreads The number of selector threads of the worker's thrift server * @param acceptQueueSizePerThreads The accept queue size per thread of the worker's thrift server * @param workerThreads The number of threads of the worker's thrift server * @param dataFolder This TachyonWorker's local folder's path * @param memoryCapacityBytes The maximum memory space this TachyonWorker can use, in bytes */ private TachyonWorker( InetSocketAddress masterAddress, InetSocketAddress workerAddress, int dataPort, int selectorThreads, int acceptQueueSizePerThreads, int workerThreads, String dataFolder, long memoryCapacityBytes) { CommonConf.assertValidPort(masterAddress); CommonConf.assertValidPort(workerAddress); CommonConf.assertValidPort(dataPort); mMasterAddress = masterAddress; mWorkerStorage = new WorkerStorage(mMasterAddress, dataFolder, memoryCapacityBytes, mExecutorService); mWorkerServiceHandler = new WorkerServiceHandler(mWorkerStorage); // Extract the port from the generated socket. // When running tests, its great to use port '0' so the system will figure out what port to use // (any random free port). // In a production or any real deployment setup, port '0' should not be used as it will make // deployment more complicated. InetSocketAddress dataAddress = new InetSocketAddress(workerAddress.getHostName(), dataPort); BlocksLocker blockLocker = new BlocksLocker(mWorkerStorage, Users.DATASERVER_USER_ID); mDataServer = createDataServer(dataAddress, blockLocker); mDataPort = mDataServer.getPort(); mHeartbeatThread = new Thread(this); try { LOG.info("Tachyon Worker version " + Version.VERSION + " tries to start @ " + workerAddress); WorkerService.Processor<WorkerServiceHandler> processor = new WorkerService.Processor<WorkerServiceHandler>(mWorkerServiceHandler); mServerTNonblockingServerSocket = new TNonblockingServerSocket(workerAddress); mPort = NetworkUtils.getPort(mServerTNonblockingServerSocket); mServer = new TThreadedSelectorServer( new TThreadedSelectorServer.Args(mServerTNonblockingServerSocket) .processor(processor) .selectorThreads(selectorThreads) .acceptQueueSizePerThread(acceptQueueSizePerThreads) .workerThreads(workerThreads)); } catch (TTransportException e) { LOG.error(e.getMessage(), e); throw Throwables.propagate(e); } mWorkerAddress = new NetAddress(workerAddress.getAddress().getCanonicalHostName(), mPort, mDataPort); mWorkerStorage.initialize(mWorkerAddress); }
public synchronized boolean open() { if (!mIsConnected) { try { mProtocol.getTransport().open(); } catch (TTransportException e) { LOG.error(e.getMessage(), e); return false; } mIsConnected = true; } return mIsConnected; }
public void start() { try { TServerSocket serverTransport = new TServerSocket(7911); BGService.Processor<BGService.Iface> processor = new BGService.Processor<BGService.Iface>(new BGVerticalServiceImpl(options)); TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).processor(processor)); System.out.println("Starting Server on port 7911.."); server.serve(); } catch (TTransportException e) { e.printStackTrace(); } }
public static void main(String args[]) { try { TServerSocket serverTransport = new TServerSocket(7777); Factory proFactory = new TBinaryProtocol.Factory(); TProcessor processor = new RFIDService.Processor(new RFIDServiceImpl()); TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).processor(processor)); System.out.println("Start server on port 7777..."); server.serve(); } catch (TTransportException e) { e.printStackTrace(); } }
public String getAcronyms(String mainText) { try { String Acronyms = client.ExAcro(mainText); return Acronyms; } catch (TTransportException e) { e.printStackTrace(); transport.close(); } catch (TException e) { e.printStackTrace(); transport.close(); } transport.close(); return null; }
public ThriftClient(String s, int p) { this.ip = s; this.port = p; if (transport == null) transport = new TSocket(this.ip, this.port); if (protocol == null) { protocol = new TBinaryProtocol(transport); client = new PyInterface.Client(protocol); try { transport.open(); } catch (TTransportException e) { e.printStackTrace(); } } }
public String getEmoticons(String mainText) { try { String Emoticons = client.ExEmo(mainText); return Emoticons; } catch (TTransportException e) { e.printStackTrace(); transport.close(); } catch (TException e) { e.printStackTrace(); transport.close(); } transport.close(); return null; }
private void openTransport() throws SQLException { while (true) { try { assumeSubject = JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equals( sessConfMap.get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE)); transport = isHttpTransportMode() ? createHttpTransport() : createBinaryTransport(); if (!transport.isOpen()) { LOG.info("Will try to open client transport with JDBC Uri: " + jdbcUriString); transport.open(); } break; } catch (TTransportException e) { LOG.info("Could not open client transport with JDBC Uri: " + jdbcUriString); // We'll retry till we exhaust all HiveServer2 uris from ZooKeeper if ((sessConfMap.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE) != null) && (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase( sessConfMap.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE)))) { try { // Update jdbcUriString, host & port variables in connParams // Throw an exception if all HiveServer2 uris have been exhausted, // or if we're unable to connect to ZooKeeper. Utils.updateConnParamsFromZooKeeper(connParams); } catch (ZooKeeperHiveClientException ze) { throw new SQLException( "Could not open client transport for any of the Server URI's in ZooKeeper: " + ze.getMessage(), " 08S01", ze); } // Update with new values jdbcUriString = connParams.getJdbcUriString(); host = connParams.getHost(); port = connParams.getPort(); LOG.info("Will retry opening client transport"); } else { LOG.info( "Transport Used for JDBC connection: " + sessConfMap.get(JdbcConnectionParams.TRANSPORT_MODE)); throw new SQLException( "Could not open client transport with JDBC Uri: " + jdbcUriString + ": " + e.getMessage(), " 08S01", e); } } } }
public List<String> getTopics(String mainText) { try { List<String> topics = client.getTopics(mainText); return topics; } catch (TTransportException e) { e.printStackTrace(); transport.close(); } catch (TException e) { e.printStackTrace(); transport.close(); } transport.close(); return null; }
public void run() { try { int port = 9090; TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(port); VicDataService.Processor<VicDataServiceImpl> processor = new VicDataService.Processor<VicDataServiceImpl>(new VicDataServiceImpl()); TServer server = new TNonblockingServer(new TNonblockingServer.Args(serverTransport).processor(processor)); System.out.println("Starting server on port " + port + " ..."); server.serve(); } catch (TTransportException e) { e.printStackTrace(); } }
public static void main(String[] args) { try { // 设置调用的服务地址为本地,端口为 7911 TTransport transport = new TSocket("127.0.0.1", 9001); transport.open(); // 设置传输协议为 TBinaryProtocol TProtocol protocol = new TBinaryProtocol(transport); baymax.Client client = new baymax.Client(protocol); // 调用服务的 helloVoid 方法 String[] p = {"72", "144", "82", "36.0", "1.56", "2.41", "48"}; System.out.println(client.isSick(Arrays.asList(p))); transport.close(); } catch (TTransportException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } }
// public String getSentimentScore(String mainText,String textType) public String getSentimentScore(String mainText) { try { SentiRequestObject obj = new SentiRequestObject(); obj.setMainText(mainText); // obj.setTextType(textType); int senti = client.getSentimentScore(obj); return ("" + senti); } catch (TTransportException e) { e.printStackTrace(); transport.close(); } catch (TException e) { e.printStackTrace(); transport.close(); } transport.close(); return "Connection to " + this.ip + ":" + this.port + " failed!"; }
public static void main(String[] args) throws IOException, TException { OptionParser parser = new OptionParser(); parser.accepts("c", "configuration file").withRequiredArg().ofType(String.class); parser.accepts("help", "print help statement"); OptionSet options = parser.parse(args); if (options.has("help")) { parser.printHelpOn(System.out); System.exit(-1); } // Logger configuration: log to the console BasicConfigurator.configure(); LOG.setLevel(Level.DEBUG); LOG.debug("debug logging on"); Configuration conf = new PropertiesConfiguration(); if (options.has("c")) { String configFile = (String) options.valueOf("c"); try { conf = new PropertiesConfiguration(configFile); } catch (ConfigurationException e) { } } // Start backend server BackendService.Processor<BackendService.Iface> processor = new BackendService.Processor<BackendService.Iface>(new ProtoBackend()); int listenPort = conf.getInt("listen_port", DEFAULT_LISTEN_PORT); NM_PORT = conf.getInt("node_monitor_port", NodeMonitorThrift.DEFAULT_NM_THRIFT_PORT); TServers.launchThreadedThriftServer(listenPort, THRIFT_WORKER_THREADS, processor); // Register server client = TClients.createBlockingNmClient(NM_HOST, NM_PORT); try { client.registerBackend(APP_ID, "localhost:" + listenPort); LOG.debug("Client successfullly registered"); } catch (TTransportException e) { LOG.debug("Error while registering backend: " + e.getMessage()); } }
// @Test public void testThreadServerStart() { try { TServerTransport serverTransport = serverTransport = new TServerSocket(port); TThreadPoolServer.Args processor = new TThreadPoolServer.Args(serverTransport) .inputTransportFactory(new TFramedTransport.Factory()) .outputTransportFactory(new TFramedTransport.Factory()) .protocolFactory(new TCompactProtocol.Factory()) .processor(new PingPongService.Processor<>(pingPongService)); // processor.maxWorkerThreads = 20; TThreadPoolServer server = new TThreadPoolServer(processor); log.info("Starting the server..."); server.serve(); } catch (TTransportException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InstantiationException, InvocationTargetException, TTransportException { // Open the .jar // Extract the thrift structures+exceptions+services ParsedThrift parsedThrift = ThriftAnalyzer.findClassesInJar(new JarFile(args[0])); // Create an dummy instance of every thrift structure found // and store it in a big fat map HashMap<String, Object> structures = getStructures(parsedThrift); // For each service found, create a dummy client // and invoke their methods for (String key : parsedThrift.services.keySet()) { // Preparing the protocol for invocation TTransport transport; transport = new TSocket("localhost", 9090); try { transport.open(); TProtocol protocol = new TBinaryProtocol(transport); Class<TServiceClient> clientClass = parsedThrift.clients.get(key); TServiceClient client = getClient(clientClass, protocol); runClient(client, structures); } catch (TTransportException e) { e.printStackTrace(); } catch (NoSuchMethodException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } finally { transport.close(); } } }
@Test public void client() { try { // 设置调用的服务地址为本地,端口为 7911 TTransport transport = new TSocket("localhost", PORT); transport.open(); // 设置传输协议为 TBinaryProtocol TProtocol protocol = new TBinaryProtocol(transport); Hello.Client client = new Hello.Client(protocol); // 调用服务的 helloVoid 方法 client.helloVoid(); String echo = client.helloString("HELLO NIFTY ......"); logger.debug(echo); transport.close(); } catch (TTransportException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } }
/** * 启动 Thrift 服务器 * * @param args */ public static void main(String[] args) { try { // hello 协议的处理类 TProcessor hello = new Hello.Processor(new HelloServiceImpl()); TProcessor world = new World.Processor(new WorldServiceImpl()); TMultiplexedProcessor processor = new TMultiplexedProcessor(); processor.registerProcessor("HelloService", hello); processor.registerProcessor("WorldService", world); // 服务传输层 TServerTransport serverTransport = new TServerSocket(9090); TServer server = new TSimpleServer(new TThreadPoolServer.Args(serverTransport).processor(processor)); System.out.println("Start server on port 9090..."); server.serve(); } catch (TTransportException e) { e.printStackTrace(); } }
public String getSentimentScore( String mainText, String title, String topDomain, String subDomain) { try { SentiRequestObject obj = new SentiRequestObject(); obj.setMainText(mainText); obj.setTextType("reviews"); obj.setTitle(title); obj.setTopDomain(topDomain); obj.setSubDomain(subDomain); int senti = client.getSentimentScore(obj); return ("" + senti); } catch (TTransportException e) { e.printStackTrace(); transport.close(); } catch (TException e) { e.printStackTrace(); transport.close(); } transport.close(); return "Connection to " + this.ip + ":" + this.port + " failed!"; }
protected LogsResp sendFlag(LogsRequ logsRequ) { TTransport transport = this.getTTransport(); try { transport.open(); // 使用高密度二进制协议 TProtocol protocol = new TCompactProtocol(transport); LogsService.Client client = new LogsService.Client(protocol); LogsResp logsResp = client.logByThrift(logsRequ); return logsResp; } catch (TTransportException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } finally { transport.close(); } return null; }
public static void startCassandraInstance(String pathToDataDir) throws TTransportException, IOException, InterruptedException, SecurityException, IllegalArgumentException, NoSuchMethodException, IllegalAccessException, InvocationTargetException { if (cassandraStarted) { return; } try { FileUtils.deleteRecursive(new File(pathToDataDir)); } catch (AssertionError e) { // eat } catch (IOException e) { // eat } CassandraServiceDataCleaner cleaner = new CassandraServiceDataCleaner(); cleaner.prepare(); EmbeddedCassandraService cassandra = new EmbeddedCassandraService(); try { cassandra.init(); } catch (TTransportException e) { System.out.println("Could not initialize Cassandra"); e.printStackTrace(); throw e; } cassandraStarted = true; Thread t = new Thread(cassandra); t.setName(cassandra.getClass().getSimpleName()); t.setDaemon(true); t.start(); System.out.println("Successfully started Cassandra"); }
public IOException wrap(TTransportException e) throws IOException { throw new IOException( "Error reading from thrift transport " + transport() + ": " + e.getMessage(), e); }
public static TServer factory(ProcessorFactory processorFactory) { loadProperties(); try { validate(); } catch (Exception e) { e.printStackTrace(); } TProtocolFactory tProtocolFactory = getTProtocolFactory(); TTransportFactory tTransportFactory = getTTransportFactory(); TServer serverEngine = null; TMultiplexedProcessor tProcessor = processorFactory.getProcessor(); if (server_type.equals("nonblocking") || server_type.equals("threaded-selector")) { // Nonblocking servers TNonblockingServerSocket tNonblockingServerSocket = null; try { tNonblockingServerSocket = new TNonblockingServerSocket( new TNonblockingServerSocket.NonblockingAbstractServerSocketArgs().port(port)); } catch (TTransportException e) { e.printStackTrace(); } if (server_type.equals("nonblocking")) { // Nonblocking Server TNonblockingServer.Args tNonblockingServerArgs = new TNonblockingServer.Args(tNonblockingServerSocket); tNonblockingServerArgs.processor(tProcessor); tNonblockingServerArgs.protocolFactory(tProtocolFactory); tNonblockingServerArgs.transportFactory(tTransportFactory); serverEngine = new TNonblockingServer(tNonblockingServerArgs); } else { // server_type.equals("threaded-selector") // ThreadedSelector Server TThreadedSelectorServer.Args tThreadedSelectorServerArgs = new TThreadedSelectorServer.Args(tNonblockingServerSocket); tThreadedSelectorServerArgs.processor(tProcessor); tThreadedSelectorServerArgs.protocolFactory(tProtocolFactory); tThreadedSelectorServerArgs.transportFactory(tTransportFactory); serverEngine = new TThreadedSelectorServer(tThreadedSelectorServerArgs); } } else { // Blocking servers // SSL socket TServerSocket tServerSocket = null; if (ssl) { try { tServerSocket = TSSLTransportFactory.getServerSocket(port, 0); } catch (TTransportException e) { e.printStackTrace(); } } else { try { tServerSocket = new TServerSocket(new TServerSocket.ServerSocketTransportArgs().port(port)); } catch (TTransportException e) { e.printStackTrace(); } } if (server_type.equals("simple")) { // Simple Server TServer.Args tServerArgs = new TServer.Args(tServerSocket); tServerArgs.processor(tProcessor); tServerArgs.protocolFactory(tProtocolFactory); tServerArgs.transportFactory(tTransportFactory); serverEngine = new TSimpleServer(tServerArgs); } else { // server_type.equals("threadpool") // ThreadPool Server TThreadPoolServer.Args tThreadPoolServerArgs = new TThreadPoolServer.Args(tServerSocket); tThreadPoolServerArgs.processor(tProcessor); tThreadPoolServerArgs.protocolFactory(tProtocolFactory); tThreadPoolServerArgs.transportFactory(tTransportFactory); serverEngine = new TThreadPoolServer(tThreadPoolServerArgs); } } // Set server event handler serverEngine.setServerEventHandler(new TServerEventHandlerImpl()); return serverEngine; }