/** * Creates a <code>TachyonFS</code> handler for the given hostname, port, and Zookeeper mode. * * @param masterHost master host details * @param masterPort port master listens on * @param zkMode use zookeeper * @return the corresponding TachyonFS handler */ public static synchronized TachyonFS get(String masterHost, int masterPort, boolean zkMode) { TachyonConf tachyonConf = new TachyonConf(); tachyonConf.set(Constants.MASTER_HOSTNAME, masterHost); tachyonConf.set(Constants.MASTER_PORT, Integer.toString(masterPort)); tachyonConf.set(Constants.USE_ZOOKEEPER, Boolean.toString(zkMode)); return get(tachyonConf); }
/** * Creates a <code>TachyonFS</code> handler for the given Tachyon URI and configuration. * * @param tachyonURI a Tachyon URI to indicate master address. e.g., tachyon://localhost:19998, * tachyon://localhost:19998/ab/c.txt * @param tachyonConf The TachyonConf instance. * @return the corresponding TachyonFS handler */ public static synchronized TachyonFS get(final TachyonURI tachyonURI, TachyonConf tachyonConf) { Preconditions.checkArgument(tachyonConf != null, "TachyonConf cannot be null."); Preconditions.checkArgument( tachyonURI != null, "Tachyon URI cannot be null. Use " + Constants.HEADER + "host:port/ ," + Constants.HEADER_FT + "host:port/."); String scheme = tachyonURI.getScheme(); Preconditions.checkNotNull( scheme, "Tachyon scheme cannot be null. Use " + Constants.SCHEME + " or " + Constants.SCHEME_FT + "."); Preconditions.checkNotNull(tachyonURI.getHost(), "Tachyon hostname cannot be null."); Preconditions.checkState(tachyonURI.getPort() != -1, "Tachyon URI must have a port number."); Preconditions.checkState( (Constants.SCHEME.equals(scheme) || Constants.SCHEME_FT.equals(scheme)), "Tachyon scheme must be either " + Constants.SCHEME + " or " + Constants.SCHEME_FT + "."); boolean useZookeeper = scheme.equals(Constants.SCHEME_FT); tachyonConf.set(Constants.USE_ZOOKEEPER, Boolean.toString(useZookeeper)); tachyonConf.set(Constants.MASTER_HOSTNAME, tachyonURI.getHost()); tachyonConf.set(Constants.MASTER_PORT, Integer.toString(tachyonURI.getPort())); return get(tachyonConf); }
@Before public final void before() throws Exception { TachyonConf conf = WorkerContext.getConf(); conf.set(Constants.WORKER_DATA_SERVER, mDataServerClass); conf.set(Constants.WORKER_NETWORK_NETTY_FILE_TRANSFER_TYPE, mNettyTransferType); conf.set(Constants.USER_REMOTE_BLOCK_READER, mRemoteReaderClass); conf.set(Constants.USER_REMOTE_READ_BUFFER_SIZE_BYTE, "100"); mLocalTachyonCluster = new LocalTachyonCluster(Constants.GB, Constants.KB, Constants.GB); mLocalTachyonCluster.start(); mTachyonConf = mLocalTachyonCluster.getMasterTachyonConf(); mTfs = mLocalTachyonCluster.getClient(); mWriteTachyon = new OutStreamOptions.Builder(mTachyonConf) .setTachyonStorageType(TachyonStorageType.STORE) .setUnderStorageType(UnderStorageType.NO_PERSIST) .build(); mWriteUnderStore = new OutStreamOptions.Builder(mTachyonConf) .setTachyonStorageType(TachyonStorageType.NO_STORE) .setUnderStorageType(UnderStorageType.PERSIST) .build(); mReadCache = new InStreamOptions.Builder(mTachyonConf) .setTachyonStorageType(TachyonStorageType.STORE) .build(); mReadNoCache = new InStreamOptions.Builder(mTachyonConf) .setTachyonStorageType(TachyonStorageType.NO_STORE) .build(); }
/** * Tests that allocation happens when the RAM, SSD and HDD size is lower than the default size. * * @throws Exception if a block cannot be allocated */ @Test public void shouldAllocateTest() throws Exception { TachyonConf conf = WorkerContext.getConf(); for (String strategyName : mStrategies) { conf.set(Constants.WORKER_ALLOCATOR_CLASS, strategyName); resetManagerView(); Allocator tierAllocator = Allocator.Factory.create(conf, getManagerView()); for (int i = 0; i < DEFAULT_RAM_NUM; i++) { assertTempBlockMeta(tierAllocator, mAnyDirInTierLoc1, DEFAULT_RAM_SIZE - 1, true); } for (int i = 0; i < DEFAULT_SSD_NUM; i++) { assertTempBlockMeta(tierAllocator, mAnyDirInTierLoc2, DEFAULT_SSD_SIZE - 1, true); } for (int i = 0; i < DEFAULT_HDD_NUM; i++) { assertTempBlockMeta(tierAllocator, mAnyDirInTierLoc3, DEFAULT_HDD_SIZE - 1, true); } resetManagerView(); Allocator anyAllocator = Allocator.Factory.create(conf, getManagerView()); for (int i = 0; i < DEFAULT_RAM_NUM; i++) { assertTempBlockMeta(anyAllocator, mAnyTierLoc, DEFAULT_RAM_SIZE - 1, true); } for (int i = 0; i < DEFAULT_SSD_NUM; i++) { assertTempBlockMeta(anyAllocator, mAnyTierLoc, DEFAULT_SSD_SIZE - 1, true); } for (int i = 0; i < DEFAULT_HDD_NUM; i++) { assertTempBlockMeta(anyAllocator, mAnyTierLoc, DEFAULT_HDD_SIZE - 1, true); } } }
/** * @param master the master to apply the journal entries to * @param journal the journal to tail */ public JournalTailerThread(Master master, Journal journal) { mMaster = Preconditions.checkNotNull(master); mJournal = Preconditions.checkNotNull(journal); TachyonConf conf = MasterContext.getConf(); mShutdownQuietWaitTimeMs = conf.getInt(Constants.MASTER_JOURNAL_TAILER_SHUTDOWN_QUIET_WAIT_TIME_MS); mJournalTailerSleepTimeMs = conf.getInt(Constants.MASTER_JOURNAL_TAILER_SLEEP_TIME_MS); }
/** * Creates a <code>MetricsSystem</code> using the default metrics config. * * @param instance the instance name * @param tachyonConf the {@link TachyonConf} instance for configuration properties. */ public MetricsSystem(String instance, TachyonConf tachyonConf) { mInstance = instance; mTachyonConf = tachyonConf; String metricsConfFile = null; if (mTachyonConf.containsKey(Constants.METRICS_CONF_FILE)) { metricsConfFile = mTachyonConf.get(Constants.METRICS_CONF_FILE); } mMetricsConfig = new MetricsConfig(metricsConfFile); }
/** Sets the volume and the mount directory before a test runs. */ @Before public final void before() { mTachyonConf = new TachyonConf(); if (mTachyonConf.containsKey(Constants.UNDERFS_GLUSTERFS_MR_DIR)) { mMount = mTachyonConf.get(Constants.UNDERFS_GLUSTERFS_MR_DIR); } if (mTachyonConf.containsKey(Constants.UNDERFS_GLUSTERFS_VOLUMES)) { mVolume = mTachyonConf.get(Constants.UNDERFS_GLUSTERFS_VOLUMES); } }
@Test public void freeTest() throws IOException, TException { TachyonFile file = TachyonFSTestUtils.createByteFile( mTfs, "/testFile", TachyonStorageType.STORE, UnderStorageType.NO_PERSIST, 10); mFsShell.run(new String[] {"free", "/testFile"}); TachyonConf tachyonConf = mLocalTachyonCluster.getMasterTachyonConf(); CommonUtils.sleepMs(tachyonConf.getInt(Constants.WORKER_TO_MASTER_HEARTBEAT_INTERVAL_MS)); Assert.assertFalse(mTfs.getInfo(file).getInMemoryPercentage() == 100); }
@Override public void close() throws IOException { int quietPeriodSecs = mTachyonConf.getInt(Constants.WORKER_NETWORK_NETTY_SHUTDOWN_QUIET_PERIOD); int timeoutSecs = mTachyonConf.getInt(Constants.WORKER_NETWORK_NETTY_SHUTDOWN_TIMEOUT); // TODO(binfan): investigate when timeoutSecs is zero (e.g., set in integration tests), does // this still complete successfully. mChannelFuture.channel().close().awaitUninterruptibly(timeoutSecs, TimeUnit.SECONDS); mBootstrap.group().shutdownGracefully(quietPeriodSecs, timeoutSecs, TimeUnit.SECONDS); mBootstrap.childGroup().shutdownGracefully(quietPeriodSecs, timeoutSecs, TimeUnit.SECONDS); }
/** * Creates a new local tachyon master with a isolated home and port. * * @throws IOException when unable to do file operation or listen on port * @return an instance of Tachyon master */ public static LocalTachyonMaster create() throws IOException { final String tachyonHome = uniquePath(); TachyonConf tachyonConf = MasterContext.getConf(); UnderFileSystemUtils.deleteDir(tachyonHome, tachyonConf); UnderFileSystemUtils.mkdirIfNotExists(tachyonHome, tachyonConf); // Update Tachyon home in the passed TachyonConf instance. tachyonConf.set(Constants.TACHYON_HOME, tachyonHome); return new LocalTachyonMaster(tachyonHome); }
public S3UnderFileSystem(String bucketName, TachyonConf tachyonConf) throws ServiceException { super(tachyonConf); Preconditions.checkArgument( tachyonConf.containsKey(Constants.S3_ACCESS_KEY), "Property " + Constants.S3_ACCESS_KEY + " is required to connect to S3"); Preconditions.checkArgument( tachyonConf.containsKey(Constants.S3_SECRET_KEY), "Property " + Constants.S3_SECRET_KEY + " is required to connect to S3"); AWSCredentials awsCredentials = new AWSCredentials( tachyonConf.get(Constants.S3_ACCESS_KEY), tachyonConf.get(Constants.S3_SECRET_KEY)); mBucketName = bucketName; Jets3tProperties props = new Jets3tProperties(); if (tachyonConf.containsKey(Constants.UNDERFS_S3_PROXY_HOST)) { props.setProperty("httpclient.proxy-autodetect", "false"); props.setProperty("httpclient.proxy-host", tachyonConf.get(Constants.UNDERFS_S3_PROXY_HOST)); props.setProperty("httpclient.proxy-port", tachyonConf.get(Constants.UNDERFS_S3_PROXY_PORT)); } if (tachyonConf.containsKey(Constants.UNDERFS_S3_PROXY_HTTPS_ONLY)) { props.setProperty( "s3service.https-only", Boolean.toString(tachyonConf.getBoolean(Constants.UNDERFS_S3_PROXY_HTTPS_ONLY))); } LOG.debug("Initializing S3 underFs with properties: " + props.getProperties()); mClient = new RestS3Service(awsCredentials, null, null, props); mBucketPrefix = Constants.HEADER_S3N + mBucketName + PATH_SEPARATOR; }
@Before public final void before() throws Exception { TachyonConf tachyonConf = MasterContext.getConf(); tachyonConf.set(Constants.USER_FILE_BUFFER_BYTES, String.valueOf(100)); mLocalTachyonCluster = new LocalTachyonCluster(MEM_CAPACITY_BYTES, USER_QUOTA_UNIT_BYTES, Constants.GB); mLocalTachyonCluster.start(); mTFS = mLocalTachyonCluster.getClient(); mWorkerConf = mLocalTachyonCluster.getWorkerTachyonConf(); mWorkerToMasterHeartbeatIntervalMs = mWorkerConf.getInt(Constants.WORKER_BLOCK_HEARTBEAT_INTERVAL_MS); mSetPinned = new SetStateOptions.Builder(mWorkerConf).setPinned(true).build(); mSetUnpinned = new SetStateOptions.Builder(mWorkerConf).setPinned(false).build(); }
@Override public void connectFromWorker(TachyonConf conf, String host) throws IOException { String workerKeytab = conf.get(Constants.WORKER_KEYTAB_KEY, null); String workerPrincipal = conf.get(Constants.WORKER_PRINCIPAL_KEY, null); if (workerKeytab == null || workerPrincipal == null) { return; } login( Constants.WORKER_KEYTAB_KEY, workerKeytab, Constants.WORKER_PRINCIPAL_KEY, workerPrincipal, host); }
/** * Sets up all dependencies before a test runs. * * @throws Exception if setting up the meta manager, the lock manager or the evictor fails */ @Before public final void before() throws Exception { File tempFolder = mTestFolder.newFolder(); mMetaManager = TieredBlockStoreTestUtils.defaultMetadataManager(tempFolder.getAbsolutePath()); mManagerView = new BlockMetadataManagerView( mMetaManager, Collections.<Long>emptySet(), Collections.<Long>emptySet()); TachyonConf conf = WorkerContext.getConf(); conf.set(Constants.WORKER_EVICTOR_CLASS, LRFUEvictor.class.getName()); conf.set(Constants.WORKER_ALLOCATOR_CLASS, MaxFreeAllocator.class.getName()); mAllocator = Allocator.Factory.create(conf, mManagerView); mStepFactor = conf.getDouble(Constants.WORKER_EVICTOR_LRFU_STEP_FACTOR); mAttenuationFactor = conf.getDouble(Constants.WORKER_EVICTOR_LRFU_ATTENUATION_FACTOR); mEvictor = Evictor.Factory.create(conf, mManagerView, mAllocator); }
public S3UnderFileSystem(String bucketName, TachyonConf tachyonConf) throws ServiceException { super(tachyonConf); Preconditions.checkArgument( tachyonConf.containsKey(Constants.S3_ACCESS_KEY), "Property " + Constants.S3_ACCESS_KEY + " is required to connect to S3"); Preconditions.checkArgument( tachyonConf.containsKey(Constants.S3_SECRET_KEY), "Property " + Constants.S3_SECRET_KEY + " is required to connect to S3"); AWSCredentials awsCredentials = new AWSCredentials( tachyonConf.get(Constants.S3_ACCESS_KEY), tachyonConf.get(Constants.S3_SECRET_KEY)); mBucketName = bucketName; mClient = new RestS3Service(awsCredentials); mBucketPrefix = Constants.HEADER_S3N + mBucketName + PATH_SEPARATOR; }
/** * Helper method to create a {@link org.apache.thrift.server.TThreadPoolServer} for handling * incoming RPC requests. * * @return a thrift server */ private TThreadPoolServer createThriftServer() { int minWorkerThreads = mTachyonConf.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MIN); int maxWorkerThreads = mTachyonConf.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MAX); BlockWorkerClientService.Processor<BlockWorkerClientServiceHandler> processor = new BlockWorkerClientService.Processor<BlockWorkerClientServiceHandler>(mServiceHandler); TTransportFactory tTransportFactory; try { tTransportFactory = AuthenticationUtils.getServerTransportFactory(mTachyonConf); } catch (IOException ioe) { throw Throwables.propagate(ioe); } Args args = new TThreadPoolServer.Args(mThriftServerSocket) .minWorkerThreads(minWorkerThreads) .maxWorkerThreads(maxWorkerThreads) .processor(processor) .transportFactory(tTransportFactory) .protocolFactory(new TBinaryProtocol.Factory(true, true)); if (WorkerContext.getConf().getBoolean(Constants.IN_TEST_MODE)) { args.stopTimeoutVal = 0; } else { args.stopTimeoutVal = Constants.THRIFT_STOP_TIMEOUT_SECONDS; } return new TThreadPoolServer(args); }
public static void assertValidPort(final int port, TachyonConf tachyonConf) { Preconditions.checkArgument(port < 65536, "Port must be less than 65536"); if (!tachyonConf.getBoolean(Constants.IN_TEST_MODE)) { Preconditions.checkArgument(port > 0, "Port is only allowed to be zero in test mode."); } }
/** * Helper method to create a {@link org.apache.thrift.server.TThreadPoolServer} for handling * incoming RPC requests. * * @return a thrift server */ private TThreadPoolServer createThriftServer() { int minWorkerThreads = mTachyonConf.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MIN); int maxWorkerThreads = mTachyonConf.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MAX); TMultiplexedProcessor processor = new TMultiplexedProcessor(); registerServices(processor, mBlockWorker.getServices()); registerServices(processor, mFileSystemWorker.getServices()); // register additional workers for RPC service for (Worker worker : mAdditionalWorkers) { registerServices(processor, worker.getServices()); } // Return a TTransportFactory based on the authentication type TTransportFactory tTransportFactory; try { tTransportFactory = AuthenticationUtils.getServerTransportFactory(mTachyonConf); } catch (IOException e) { throw Throwables.propagate(e); } TThreadPoolServer.Args args = new TThreadPoolServer.Args(mThriftServerSocket) .minWorkerThreads(minWorkerThreads) .maxWorkerThreads(maxWorkerThreads) .processor(processor) .transportFactory(tTransportFactory) .protocolFactory(new TBinaryProtocol.Factory(true, true)); if (WorkerContext.getConf().getBoolean(Constants.IN_TEST_MODE)) { args.stopTimeoutVal = 0; } else { args.stopTimeoutVal = Constants.THRIFT_STOP_TIMEOUT_SECONDS; } return new TThreadPoolServer(args); }
/** * Creates a new instance of {@link DataServerHandler}. * * @param blockWorker the block worker handle * @param tachyonConf Tachyon configuration */ public DataServerHandler(final BlockWorker blockWorker, TachyonConf tachyonConf) { mBlockWorker = Preconditions.checkNotNull(blockWorker); mTachyonConf = Preconditions.checkNotNull(tachyonConf); mStorageTierAssoc = new WorkerStorageTierAssoc(mTachyonConf); mTransferType = mTachyonConf.getEnum( Constants.WORKER_NETWORK_NETTY_FILE_TRANSFER_TYPE, FileTransferType.class); }
/** * Creates a default {@link io.netty.bootstrap.ServerBootstrap} where the channel and groups are * preset. * * @param type the channel type; current channel types supported are nio and epoll * @return an instance of {@code ServerBootstrap} */ private ServerBootstrap createBootstrapOfType(final ChannelType type) { final ServerBootstrap boot = new ServerBootstrap(); final int bossThreadCount = mTachyonConf.getInt(Constants.WORKER_NETWORK_NETTY_BOSS_THREADS); // If number of worker threads is 0, Netty creates (#processors * 2) threads by default. final int workerThreadCount = mTachyonConf.getInt(Constants.WORKER_NETWORK_NETTY_WORKER_THREADS); final EventLoopGroup bossGroup = NettyUtils.createEventLoop(type, bossThreadCount, "data-server-boss-%d", false); final EventLoopGroup workerGroup = NettyUtils.createEventLoop(type, workerThreadCount, "data-server-worker-%d", false); final Class<? extends ServerChannel> socketChannelClass = NettyUtils.getServerChannelClass(type); boot.group(bossGroup, workerGroup).channel(socketChannelClass); return boot; }
/** Starts the lineage worker service. */ public void start() { mFilePersistenceService = getExecutorService() .submit( new HeartbeatThread( HeartbeatContext.WORKER_FILESYSTEM_MASTER_SYNC, new FileWorkerMasterSyncExecutor( mFileDataManager, mFileSystemMasterWorkerClient), mTachyonConf.getInt(Constants.WORKER_FILESYSTEM_HEARTBEAT_INTERVAL_MS))); }
/** * Open the connection to the worker. And start the heartbeat thread. * * @return true if succeed, false otherwise * @throws IOException */ private synchronized boolean connect() throws IOException { if (!mConnected) { NetAddress workerNetAddress = null; try { String localHostName = NetworkAddressUtils.getLocalHostName(mTachyonConf); LOG.info("Trying to get local worker host : " + localHostName); workerNetAddress = mMasterClient.user_getWorker(false, localHostName); mIsLocal = workerNetAddress .getMHost() .equals(InetAddress.getByName(localHostName).getHostAddress()); } catch (NoWorkerException e) { LOG.info(e.getMessage()); workerNetAddress = null; } catch (UnknownHostException e) { LOG.info(e.getMessage()); workerNetAddress = null; } if (workerNetAddress == null) { try { workerNetAddress = mMasterClient.user_getWorker(true, ""); } catch (NoWorkerException e) { LOG.info("No worker running in the system: " + e.getMessage()); mClient = null; return false; } } String host = NetworkAddressUtils.getFqdnHost(workerNetAddress); int port = workerNetAddress.mPort; mWorkerAddress = new InetSocketAddress(host, port); mWorkerDataServerAddress = new InetSocketAddress(host, workerNetAddress.mSecondaryPort); LOG.info("Connecting " + (mIsLocal ? "local" : "remote") + " worker @ " + mWorkerAddress); mProtocol = new TBinaryProtocol(new TFramedTransport(new TSocket(host, port))); mClient = new WorkerService.Client(mProtocol); mHeartbeatExecutor = new WorkerClientHeartbeatExecutor(this, mMasterClient.getUserId()); String threadName = "worker-heartbeat-" + mWorkerAddress; int interval = mTachyonConf.getInt(Constants.USER_HEARTBEAT_INTERVAL_MS, Constants.SECOND_MS); mHeartbeat = mExecutorService.submit(new HeartbeatThread(threadName, mHeartbeatExecutor, interval)); try { mProtocol.getTransport().open(); } catch (TTransportException e) { LOG.error(e.getMessage(), e); return false; } mConnected = true; } return mConnected; }
/** * Factory for {@link Evictor}. * * @param conf {@link TachyonConf} to determine the {@link Evictor} type * @param view {@link BlockMetadataManagerView} to pass to {@link Evictor} * @param allocator an allocation policy * @return the generated {@link Evictor} */ public static Evictor create( TachyonConf conf, BlockMetadataManagerView view, Allocator allocator) { try { return CommonUtils.createNewClassInstance( conf.<Evictor>getClass(Constants.WORKER_EVICTOR_CLASS), new Class[] {BlockMetadataManagerView.class, Allocator.class}, new Object[] {view, allocator}); } catch (Exception e) { throw Throwables.propagate(e); } }
private void initBlockMetadataManager(TachyonConf tachyonConf) throws AlreadyExistsException, IOException, OutOfSpaceException { // Initialize storage tiers int totalTiers = tachyonConf.getInt(Constants.WORKER_MAX_TIERED_STORAGE_LEVEL, 1); mAliasToTiers = new HashMap<Integer, StorageTier>(totalTiers); mTiers = new ArrayList<StorageTier>(totalTiers); for (int level = 0; level < totalTiers; level++) { StorageTier tier = StorageTier.newStorageTier(tachyonConf, level); mTiers.add(tier); mAliasToTiers.put(tier.getTierAlias(), tier); } }
/** Constructor of {@link TachyonWorker}. */ public TachyonWorker() { try { mStartTimeMs = System.currentTimeMillis(); mTachyonConf = WorkerContext.getConf(); mBlockWorker = new BlockWorker(); mFileSystemWorker = new FileSystemWorker(mBlockWorker); mAdditionalWorkers = Lists.newArrayList(); List<? extends Worker> workers = Lists.newArrayList(mBlockWorker, mFileSystemWorker); // Discover and register the available factories // NOTE: ClassLoader is explicitly specified so we don't need to set ContextClassLoader ServiceLoader<WorkerFactory> discoveredMasterFactories = ServiceLoader.load(WorkerFactory.class, WorkerFactory.class.getClassLoader()); for (WorkerFactory factory : discoveredMasterFactories) { Worker worker = factory.create(workers); if (worker != null) { mAdditionalWorkers.add(worker); } } // Setup metrics collection system mWorkerMetricsSystem = new MetricsSystem("worker", mTachyonConf); WorkerSource workerSource = WorkerContext.getWorkerSource(); workerSource.registerGauges(mBlockWorker); mWorkerMetricsSystem.registerSource(workerSource); // Setup web server mWebServer = new WorkerUIWebServer( ServiceType.WORKER_WEB, NetworkAddressUtils.getBindAddress(ServiceType.WORKER_WEB, mTachyonConf), mBlockWorker, NetworkAddressUtils.getConnectAddress(ServiceType.WORKER_RPC, mTachyonConf), mStartTimeMs, mTachyonConf); // Setup Thrift server mThriftServerSocket = createThriftServerSocket(); mRPCPort = NetworkAddressUtils.getThriftPort(mThriftServerSocket); // Reset worker RPC port based on assigned port number mTachyonConf.set(Constants.WORKER_RPC_PORT, Integer.toString(mRPCPort)); mThriftServer = createThriftServer(); mWorkerAddress = NetworkAddressUtils.getConnectAddress( NetworkAddressUtils.ServiceType.WORKER_RPC, mTachyonConf); } catch (Exception e) { LOG.error("Failed to initialize {}", this.getClass().getName(), e); System.exit(-1); } }
@Before public final void before() throws Exception { TachyonConf tachyonConf = new TachyonConf(); tachyonConf.set(Constants.USER_FILE_BUFFER_BYTES, String.valueOf(100)); System.setProperty(Constants.WORKER_DATA_SERVER, mDataServerClass); System.setProperty(Constants.WORKER_NETTY_FILE_TRANSFER_TYPE, mNettyTransferType); System.setProperty(Constants.USER_REMOTE_BLOCK_READER, mBlockReader); mLocalTachyonCluster = new LocalTachyonCluster(WORKER_CAPACITY_BYTES, USER_QUOTA_UNIT_BYTES, Constants.GB); mLocalTachyonCluster.start(tachyonConf); mWorkerTachyonConf = mLocalTachyonCluster.getWorkerTachyonConf(); mTFS = mLocalTachyonCluster.getClient(); mBlockMasterClient = new BlockMasterClient( new InetSocketAddress( mLocalTachyonCluster.getMasterHostname(), mLocalTachyonCluster.getMasterPort()), mExecutorService, mWorkerTachyonConf); }
@Override public void heartbeat() { LOG.debug("System status checking."); TachyonConf conf = MasterContext.getConf(); int masterWorkerTimeoutMs = conf.getInt(Constants.MASTER_WORKER_TIMEOUT_MS); synchronized (mBlocks) { synchronized (mWorkers) { Iterator<MasterWorkerInfo> iter = mWorkers.iterator(); while (iter.hasNext()) { MasterWorkerInfo worker = iter.next(); final long lastUpdate = CommonUtils.getCurrentMs() - worker.getLastUpdatedTimeMs(); if (lastUpdate > masterWorkerTimeoutMs) { LOG.error("The worker {} got timed out!", worker); mLostWorkers.add(worker); iter.remove(); processLostWorker(worker); } } } } }
/** * Starts the Tachyon worker server. * * @throws Exception if the workers fail to start */ public void start() throws Exception { // NOTE: the order to start different services is sensitive. If you change it, do it cautiously. // Start serving metrics system, this will not block mWorkerMetricsSystem.start(); // Start serving the web server, this will not block // Requirement: metrics system started so we could add the metrics servlet to the web server // Consequence: when starting webserver, the webport will be updated. mWebServer.addHandler(mWorkerMetricsSystem.getServletHandler()); mWebServer.startWebServer(); // Set updated net address for this worker in context // Requirement: RPC, web, and dataserver ports are updated // Consequence: create a NetAddress object and set it into WorkerContext mNetAddress = new NetAddress( NetworkAddressUtils.getConnectHost(ServiceType.WORKER_RPC, mTachyonConf), mTachyonConf.getInt(Constants.WORKER_RPC_PORT), getDataLocalPort(), mTachyonConf.getInt(Constants.WORKER_WEB_PORT)); WorkerContext.setWorkerNetAddress(mNetAddress); // Start each worker // Requirement: NetAddress set in WorkerContext, so block worker can initialize BlockMasterSync // Consequence: worker id is granted startWorkers(); LOG.info("Started worker with id {}", WorkerIdRegistry.getWorkerId()); mIsServingRPC = true; // Start serving RPC, this will block LOG.info("Tachyon Worker version {} started @ {}", Version.VERSION, mWorkerAddress); mThriftServer.serve(); LOG.info("Tachyon Worker version {} ended @ {}", Version.VERSION, mWorkerAddress); }
/** * This method adds Aliyun credentials from system properties to the Tachyon Conf if they are not * already present. * * @param tachyonConf the conf to check and add credentials to * @return true if both access and secret key are present, false otherwise */ private boolean addAndCheckOSSCredentials(TachyonConf tachyonConf) { String accessKeyConf = Constants.OSS_ACCESS_KEY; if (System.getProperty(accessKeyConf) != null && tachyonConf.get(accessKeyConf) == null) { tachyonConf.set(accessKeyConf, System.getProperty(accessKeyConf)); } String secretKeyConf = Constants.OSS_SECRET_KEY; if (System.getProperty(secretKeyConf) != null && tachyonConf.get(secretKeyConf) == null) { tachyonConf.set(secretKeyConf, System.getProperty(secretKeyConf)); } String endPointConf = Constants.OSS_ENDPOINT_KEY; if (System.getProperty(endPointConf) != null && tachyonConf.get(endPointConf) == null) { tachyonConf.set(endPointConf, System.getProperty(endPointConf)); } return tachyonConf.get(accessKeyConf) != null && tachyonConf.get(secretKeyConf) != null && tachyonConf.get(endPointConf) != null; }
/** * Validates the path, verifying that it contains the {@link Constants#HEADER} or {@link * Constants#HEADER_FT} and a hostname:port specified. * * @param path the path to be verified * @param tachyonConf the instance of {@link tachyon.conf.TachyonConf} to be used * @return the verified path in a form like tachyon://host:port/dir. If only the "/dir" or "dir" * part is provided, the host and port are retrieved from property, tachyon.master.hostname * and tachyon.master.port, respectively. * @throws IOException if the given path is not valid */ public static String validatePath(String path, TachyonConf tachyonConf) throws IOException { if (path.startsWith(Constants.HEADER) || path.startsWith(Constants.HEADER_FT)) { if (!path.contains(":")) { throw new IOException( "Invalid Path: " + path + ". Use " + Constants.HEADER + "host:port/ ," + Constants.HEADER_FT + "host:port/" + " , or /file"); } else { return path; } } else { String hostname = NetworkAddressUtils.getConnectHost(ServiceType.MASTER_RPC, tachyonConf); int port = tachyonConf.getInt(Constants.MASTER_PORT); if (tachyonConf.getBoolean(Constants.ZOOKEEPER_ENABLED)) { return PathUtils.concatPath(Constants.HEADER_FT + hostname + ":" + port, path); } return PathUtils.concatPath(Constants.HEADER + hostname + ":" + port, path); } }