Пример #1
0
  private void initStorageTier() throws AlreadyExistsException, IOException, OutOfSpaceException {
    String workerDataFolder =
        WorkerContext.getConf().get(Constants.WORKER_DATA_FOLDER, Constants.DEFAULT_DATA_FOLDER);
    String tierDirPathConf =
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, mTierLevel);
    String[] dirPaths = WorkerContext.getConf().get(tierDirPathConf, "/mnt/ramdisk").split(",");

    // Add the worker data folder path after each storage directory, the final path will be like
    // /mnt/ramdisk/tachyonworker
    for (int i = 0; i < dirPaths.length; i++) {
      dirPaths[i] = PathUtils.concatPath(dirPaths[i].trim(), workerDataFolder);
    }

    String tierDirCapacityConf =
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_QUOTA_FORMAT, mTierLevel);
    String[] dirQuotas = WorkerContext.getConf().get(tierDirCapacityConf, "0").split(",");

    mDirs = new ArrayList<StorageDir>(dirPaths.length);

    long totalCapacity = 0;
    for (int i = 0; i < dirPaths.length; i++) {
      int index = i >= dirQuotas.length ? dirQuotas.length - 1 : i;
      long capacity = FormatUtils.parseSpaceSize(dirQuotas[index]);
      totalCapacity += capacity;
      mDirs.add(StorageDir.newStorageDir(this, i, capacity, dirPaths[i]));
    }
    mCapacityBytes = totalCapacity;
  }
Пример #2
0
  /**
   * Runs the block worker. The thread calling this will be blocked until the thrift server shuts
   * down.
   */
  public void process() {
    getExecutorService()
        .submit(
            new HeartbeatThread(
                HeartbeatContext.WORKER_BLOCK_SYNC,
                mBlockMasterSync,
                WorkerContext.getConf().getInt(Constants.WORKER_BLOCK_HEARTBEAT_INTERVAL_MS)));

    // Start the pinlist syncer to perform the periodical fetching
    getExecutorService()
        .submit(
            new HeartbeatThread(
                HeartbeatContext.WORKER_PIN_LIST_SYNC,
                mPinListSync,
                WorkerContext.getConf().getInt(Constants.WORKER_BLOCK_HEARTBEAT_INTERVAL_MS)));

    // Start the session cleanup checker to perform the periodical checking
    getExecutorService().submit(mSessionCleanerThread);

    // Start the space reserver
    if (mSpaceReserver != null) {
      getExecutorService().submit(mSpaceReserver);
    }

    mThriftServer.serve();
  }
Пример #3
0
  @Test
  public void reserveTest() throws Exception {
    // Reserve on top tier
    long blockId = 100;
    BlockStoreLocation tier0 = BlockStoreLocation.anyDirInTier(StorageLevelAlias.MEM.getValue());
    for (int i = 0; i < 3; i++) {
      TieredBlockStoreTestUtils.cache(SESSION_ID, blockId++, BLOCK_SIZE, mBlockStore, tier0);
    }
    CommonUtils.sleepMs(
        WorkerContext.getConf().getLong(Constants.WORKER_SPACE_RESERVER_INTERVAL_MS));
    BlockStoreMeta storeMeta = mBlockStore.getBlockStoreMeta();
    Assert.assertEquals(3 * BLOCK_SIZE, storeMeta.getUsedBytes());
    List<Long> usedBytesOnTiers = storeMeta.getUsedBytesOnTiers();
    Assert.assertEquals(
        2 * BLOCK_SIZE, (long) usedBytesOnTiers.get(StorageLevelAlias.MEM.getValue() - 1));
    Assert.assertEquals(
        BLOCK_SIZE, (long) usedBytesOnTiers.get(StorageLevelAlias.HDD.getValue() - 1));

    // Reserve on under tier
    for (int i = 0; i < 7; i++) {
      TieredBlockStoreTestUtils.cache(SESSION_ID, blockId++, BLOCK_SIZE, mBlockStore, tier0);
    }
    CommonUtils.sleepMs(
        WorkerContext.getConf().getLong(Constants.WORKER_SPACE_RESERVER_INTERVAL_MS));
    storeMeta = mBlockStore.getBlockStoreMeta();
    Assert.assertEquals(9 * BLOCK_SIZE, storeMeta.getUsedBytes());
    usedBytesOnTiers = storeMeta.getUsedBytesOnTiers();
    Assert.assertEquals(
        2 * BLOCK_SIZE, (long) usedBytesOnTiers.get(StorageLevelAlias.MEM.getValue() - 1));
    Assert.assertEquals(
        7 * BLOCK_SIZE, (long) usedBytesOnTiers.get(StorageLevelAlias.HDD.getValue() - 1));
  }
  /**
   * Tests that allocation happens when the RAM, SSD and HDD size is lower than the default size.
   *
   * @throws Exception if a block cannot be allocated
   */
  @Test
  public void shouldAllocateTest() throws Exception {
    TachyonConf conf = WorkerContext.getConf();
    for (String strategyName : mStrategies) {
      conf.set(Constants.WORKER_ALLOCATOR_CLASS, strategyName);
      resetManagerView();
      Allocator tierAllocator = Allocator.Factory.create(conf, getManagerView());
      for (int i = 0; i < DEFAULT_RAM_NUM; i++) {
        assertTempBlockMeta(tierAllocator, mAnyDirInTierLoc1, DEFAULT_RAM_SIZE - 1, true);
      }
      for (int i = 0; i < DEFAULT_SSD_NUM; i++) {
        assertTempBlockMeta(tierAllocator, mAnyDirInTierLoc2, DEFAULT_SSD_SIZE - 1, true);
      }
      for (int i = 0; i < DEFAULT_HDD_NUM; i++) {
        assertTempBlockMeta(tierAllocator, mAnyDirInTierLoc3, DEFAULT_HDD_SIZE - 1, true);
      }

      resetManagerView();
      Allocator anyAllocator = Allocator.Factory.create(conf, getManagerView());
      for (int i = 0; i < DEFAULT_RAM_NUM; i++) {
        assertTempBlockMeta(anyAllocator, mAnyTierLoc, DEFAULT_RAM_SIZE - 1, true);
      }
      for (int i = 0; i < DEFAULT_SSD_NUM; i++) {
        assertTempBlockMeta(anyAllocator, mAnyTierLoc, DEFAULT_SSD_SIZE - 1, true);
      }
      for (int i = 0; i < DEFAULT_HDD_NUM; i++) {
        assertTempBlockMeta(anyAllocator, mAnyTierLoc, DEFAULT_HDD_SIZE - 1, true);
      }
    }
  }
Пример #5
0
 /**
  * Helper method to create a {@link org.apache.thrift.server.TThreadPoolServer} for handling
  * incoming RPC requests.
  *
  * @return a thrift server
  */
 private TThreadPoolServer createThriftServer() {
   int minWorkerThreads = mTachyonConf.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MIN);
   int maxWorkerThreads = mTachyonConf.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MAX);
   BlockWorkerClientService.Processor<BlockWorkerClientServiceHandler> processor =
       new BlockWorkerClientService.Processor<BlockWorkerClientServiceHandler>(mServiceHandler);
   TTransportFactory tTransportFactory;
   try {
     tTransportFactory = AuthenticationUtils.getServerTransportFactory(mTachyonConf);
   } catch (IOException ioe) {
     throw Throwables.propagate(ioe);
   }
   Args args =
       new TThreadPoolServer.Args(mThriftServerSocket)
           .minWorkerThreads(minWorkerThreads)
           .maxWorkerThreads(maxWorkerThreads)
           .processor(processor)
           .transportFactory(tTransportFactory)
           .protocolFactory(new TBinaryProtocol.Factory(true, true));
   if (WorkerContext.getConf().getBoolean(Constants.IN_TEST_MODE)) {
     args.stopTimeoutVal = 0;
   } else {
     args.stopTimeoutVal = Constants.THRIFT_STOP_TIMEOUT_SECONDS;
   }
   return new TThreadPoolServer(args);
 }
  @Before
  public final void before() throws Exception {
    TachyonConf conf = WorkerContext.getConf();
    conf.set(Constants.WORKER_DATA_SERVER, mDataServerClass);
    conf.set(Constants.WORKER_NETWORK_NETTY_FILE_TRANSFER_TYPE, mNettyTransferType);
    conf.set(Constants.USER_REMOTE_BLOCK_READER, mRemoteReaderClass);
    conf.set(Constants.USER_REMOTE_READ_BUFFER_SIZE_BYTE, "100");

    mLocalTachyonCluster = new LocalTachyonCluster(Constants.GB, Constants.KB, Constants.GB);
    mLocalTachyonCluster.start();

    mTachyonConf = mLocalTachyonCluster.getMasterTachyonConf();
    mTfs = mLocalTachyonCluster.getClient();
    mWriteTachyon =
        new OutStreamOptions.Builder(mTachyonConf)
            .setTachyonStorageType(TachyonStorageType.STORE)
            .setUnderStorageType(UnderStorageType.NO_PERSIST)
            .build();
    mWriteUnderStore =
        new OutStreamOptions.Builder(mTachyonConf)
            .setTachyonStorageType(TachyonStorageType.NO_STORE)
            .setUnderStorageType(UnderStorageType.PERSIST)
            .build();
    mReadCache =
        new InStreamOptions.Builder(mTachyonConf)
            .setTachyonStorageType(TachyonStorageType.STORE)
            .build();
    mReadNoCache =
        new InStreamOptions.Builder(mTachyonConf)
            .setTachyonStorageType(TachyonStorageType.NO_STORE)
            .build();
  }
Пример #7
0
  private StorageTier(int tierLevel) {
    mTierLevel = tierLevel;

    String tierLevelAliasProp =
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_ALIAS_FORMAT, tierLevel);
    StorageLevelAlias alias =
        WorkerContext.getConf().getEnum(tierLevelAliasProp, StorageLevelAlias.class);
    mTierAlias = alias.getValue();
  }
Пример #8
0
 public SpaceReserver(BlockDataManager blockManager) {
   mBlockManager = blockManager;
   mStorageTierAssoc = new WorkerStorageTierAssoc(WorkerContext.getConf());
   Map<String, Long> capOnTiers = blockManager.getStoreMeta().getCapacityBytesOnTiers();
   long lastTierReservedBytes = 0;
   for (int ordinal = 0; ordinal < mStorageTierAssoc.size(); ordinal++) {
     String tierReservedSpaceProp =
         String.format(Constants.WORKER_TIERED_STORE_LEVEL_RESERVED_RATIO_FORMAT, ordinal);
     String tierAlias = mStorageTierAssoc.getAlias(ordinal);
     long reservedSpaceBytes =
         (long)
             (capOnTiers.get(tierAlias)
                 * WorkerContext.getConf().getDouble(tierReservedSpaceProp));
     mBytesToReserveOnTiers.put(tierAlias, reservedSpaceBytes + lastTierReservedBytes);
     lastTierReservedBytes += reservedSpaceBytes;
   }
   mCheckIntervalMs =
       WorkerContext.getConf().getInt(Constants.WORKER_TIERED_STORE_RESERVER_INTERVAL_MS);
   mRunning = true;
 }
  public void stopTFS() throws Exception {
    mClientPool.close();

    mWorker.stop();
    if (LineageUtils.isLineageEnabled(WorkerContext.getConf())) {
      mLineageWorker.stop();
    }
    for (int k = 0; k < mNumOfMasters; k++) {
      mMasters.get(k).stop();
    }
    mCuratorServer.stop();
  }
Пример #10
0
 @Before
 public void before() throws Exception {
   WorkerFileSystemMasterClient workerFileSystemMasterClient =
       PowerMockito.mock(WorkerFileSystemMasterClient.class);
   WorkerSource workerSource = PowerMockito.mock(WorkerSource.class);
   WorkerBlockMasterClient blockMasterClient = PowerMockito.mock(WorkerBlockMasterClient.class);
   String baseDir = mTempFolder.newFolder().getAbsolutePath();
   TieredBlockStoreTestUtils.setupTachyonConfWithMultiTier(
       baseDir, TIER_LEVEL, TIER_ALIAS, TIER_PATH, TIER_CAPACITY_BYTES, null);
   mBlockStore = new TieredBlockStore();
   BlockDataManager blockDataManager =
       new BlockDataManager(
           workerSource, blockMasterClient, workerFileSystemMasterClient, mBlockStore);
   String reserveRatioProp =
       String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_RESERVED_RATIO_FORMAT, 0);
   WorkerContext.getConf().set(reserveRatioProp, "0.2");
   reserveRatioProp =
       String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_RESERVED_RATIO_FORMAT, 1);
   WorkerContext.getConf().set(reserveRatioProp, "0.2");
   mSpaceReserver = new SpaceReserver(blockDataManager);
   mExecutorService.submit(mSpaceReserver);
 }
Пример #11
0
  /**
   * Creates a new instance of {@link FileSystemWorker}.
   *
   * @param blockDataManager a block data manager handle
   * @throws IOException if an I/O error occurs
   */
  public FileSystemWorker(BlockDataManager blockDataManager) throws IOException {
    super(
        Executors.newFixedThreadPool(
            3, ThreadFactoryUtils.build("file-system-worker-heartbeat-%d", true)));
    Preconditions.checkState(WorkerIdRegistry.getWorkerId() != 0, "Failed to register worker");

    mTachyonConf = WorkerContext.getConf();
    mFileDataManager = new FileDataManager(Preconditions.checkNotNull(blockDataManager));

    // Setup MasterClientBase
    mFileSystemMasterWorkerClient =
        new FileSystemMasterClient(
            NetworkAddressUtils.getConnectAddress(ServiceType.MASTER_RPC, mTachyonConf),
            mTachyonConf);
  }
Пример #12
0
 /**
  * Sets up all dependencies before a test runs.
  *
  * @throws Exception if setting up the meta manager, the lock manager or the evictor fails
  */
 @Before
 public final void before() throws Exception {
   File tempFolder = mTestFolder.newFolder();
   mMetaManager = TieredBlockStoreTestUtils.defaultMetadataManager(tempFolder.getAbsolutePath());
   mManagerView =
       new BlockMetadataManagerView(
           mMetaManager, Collections.<Long>emptySet(), Collections.<Long>emptySet());
   TachyonConf conf = WorkerContext.getConf();
   conf.set(Constants.WORKER_EVICTOR_CLASS, LRFUEvictor.class.getName());
   conf.set(Constants.WORKER_ALLOCATOR_CLASS, MaxFreeAllocator.class.getName());
   mAllocator = Allocator.Factory.create(conf, mManagerView);
   mStepFactor = conf.getDouble(Constants.WORKER_EVICTOR_LRFU_STEP_FACTOR);
   mAttenuationFactor = conf.getDouble(Constants.WORKER_EVICTOR_LRFU_ATTENUATION_FACTOR);
   mEvictor = Evictor.Factory.create(conf, mManagerView, mAllocator);
 }
  public void start() throws IOException {
    int maxLevel = 1;
    mTachyonHome =
        File.createTempFile("Tachyon", "U" + System.currentTimeMillis()).getAbsolutePath();
    mWorkerDataFolder = "/datastore";

    mHostname = NetworkAddressUtils.getLocalHostName(100);

    mMasterConf = MasterContext.getConf();
    mMasterConf.set(Constants.IN_TEST_MODE, "true");
    mMasterConf.set(Constants.TACHYON_HOME, mTachyonHome);
    mMasterConf.set(Constants.USE_ZOOKEEPER, "true");
    mMasterConf.set(Constants.MASTER_HOSTNAME, mHostname);
    mMasterConf.set(Constants.MASTER_BIND_HOST, mHostname);
    mMasterConf.set(Constants.MASTER_PORT, "0");
    mMasterConf.set(Constants.MASTER_WEB_BIND_HOST, mHostname);
    mMasterConf.set(Constants.MASTER_WEB_PORT, "0");
    mMasterConf.set(Constants.ZOOKEEPER_ADDRESS, mCuratorServer.getConnectString());
    mMasterConf.set(Constants.ZOOKEEPER_ELECTION_PATH, "/election");
    mMasterConf.set(Constants.ZOOKEEPER_LEADER_PATH, "/leader");
    mMasterConf.set(Constants.USER_QUOTA_UNIT_BYTES, "10000");
    mMasterConf.set(Constants.USER_DEFAULT_BLOCK_SIZE_BYTE, Integer.toString(mUserBlockSize));

    // Since tests are always running on a single host keep the resolution timeout low as otherwise
    // people running with strange network configurations will see very slow tests
    mMasterConf.set(Constants.HOST_RESOLUTION_TIMEOUT_MS, "250");

    // Disable hdfs client caching to avoid file system close() affecting other clients
    System.setProperty("fs.hdfs.impl.disable.cache", "true");

    // re-build the dir to set permission to 777
    deleteDir(mTachyonHome);
    mkdir(mTachyonHome);

    for (int k = 0; k < mNumOfMasters; k++) {
      final LocalTachyonMaster master = LocalTachyonMaster.create(mTachyonHome);
      master.start();
      LOG.info(
          "master NO."
              + k
              + " started, isServing: "
              + master.isServing()
              + ", address: "
              + master.getAddress());
      mMasters.add(master);
      // Each master should generate a new port for binding
      mMasterConf.set(Constants.MASTER_PORT, "0");
    }

    // Create the directories for the data and workers after LocalTachyonMaster construction,
    // because LocalTachyonMaster sets the UNDERFS_DATA_FOLDER and UNDERFS_WORKERS_FOLDER.
    mkdir(mMasterConf.get(Constants.UNDERFS_DATA_FOLDER));
    mkdir(mMasterConf.get(Constants.UNDERFS_WORKERS_FOLDER));

    LOG.info("all " + mNumOfMasters + " masters started.");
    LOG.info("waiting for a leader.");
    boolean hasLeader = false;
    while (!hasLeader) {
      for (int i = 0; i < mMasters.size(); i++) {
        if (mMasters.get(i).isServing()) {
          LOG.info(
              "master NO."
                  + i
                  + " is selected as leader. address: "
                  + mMasters.get(i).getAddress());
          hasLeader = true;
          break;
        }
      }
    }
    // Use first master port
    mMasterConf.set(Constants.MASTER_PORT, getMasterPort() + "");

    CommonUtils.sleepMs(10);

    mWorkerConf = WorkerContext.getConf();
    mWorkerConf.merge(mMasterConf);
    mWorkerConf.set(Constants.WORKER_DATA_FOLDER, mWorkerDataFolder);
    mWorkerConf.set(Constants.WORKER_MEMORY_SIZE, mWorkerCapacityBytes + "");
    mWorkerConf.set(Constants.WORKER_TO_MASTER_HEARTBEAT_INTERVAL_MS, 15 + "");

    // Setup conf for worker
    mWorkerConf.set(Constants.WORKER_MAX_TIERED_STORAGE_LEVEL, Integer.toString(maxLevel));
    mWorkerConf.set(String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_ALIAS_FORMAT, 0), "MEM");
    mWorkerConf.set(
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, 0),
        mTachyonHome + "/ramdisk");
    mWorkerConf.set(
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_QUOTA_FORMAT, 0),
        mWorkerCapacityBytes + "");

    // Since tests are always running on a single host keep the resolution timeout low as otherwise
    // people running with strange network configurations will see very slow tests
    mWorkerConf.set(Constants.HOST_RESOLUTION_TIMEOUT_MS, "250");

    for (int level = 1; level < maxLevel; level++) {
      String tierLevelDirPath =
          String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, level);
      String[] dirPaths = mWorkerConf.get(tierLevelDirPath).split(",");
      String newPath = "";
      for (String dirPath : dirPaths) {
        newPath += mTachyonHome + dirPath + ",";
      }
      mWorkerConf.set(
          String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, level),
          newPath.substring(0, newPath.length() - 1));
    }

    mWorkerConf.set(Constants.WORKER_BIND_HOST, mHostname);
    mWorkerConf.set(Constants.WORKER_PORT, "0");
    mWorkerConf.set(Constants.WORKER_DATA_BIND_HOST, mHostname);
    mWorkerConf.set(Constants.WORKER_DATA_PORT, "0");
    mWorkerConf.set(Constants.WORKER_WEB_BIND_HOST, mHostname);
    mWorkerConf.set(Constants.WORKER_WEB_PORT, "0");
    mWorkerConf.set(Constants.WORKER_MIN_WORKER_THREADS, "1");
    mWorkerConf.set(Constants.WORKER_MAX_WORKER_THREADS, "100");

    // Perform immediate shutdown of data server. Graceful shutdown is unnecessary and slow
    mWorkerConf.set(Constants.WORKER_NETWORK_NETTY_SHUTDOWN_QUIET_PERIOD, Integer.toString(0));
    mWorkerConf.set(Constants.WORKER_NETWORK_NETTY_SHUTDOWN_TIMEOUT, Integer.toString(0));

    mWorker = new BlockWorker();
    Runnable runWorker =
        new Runnable() {
          @Override
          public void run() {
            try {
              mWorker.process();
            } catch (Exception e) {
              throw new RuntimeException(e + " \n Start Master Error \n" + e.getMessage(), e);
            }
          }
        };
    mWorkerThread = new Thread(runWorker);
    mWorkerThread.start();
    // The client context should reflect the updates to the conf.
    if (sReinitializer == null) {
      ClientContext.accessReinitializer(sReinitializerAccesser);
    }
    sReinitializer.reinitializeWithConf(mWorkerConf);
  }
Пример #14
0
  /**
   * Creates a new instance of {@link BlockWorker}.
   *
   * @throws ConnectionFailedException if network connection failed
   * @throws IOException for other exceptions
   */
  public BlockWorker() throws IOException, ConnectionFailedException {
    super(
        Executors.newFixedThreadPool(
            4, ThreadFactoryUtils.build("block-worker-heartbeat-%d", true)));
    mTachyonConf = WorkerContext.getConf();
    mStartTimeMs = System.currentTimeMillis();

    // Setup MasterClientBase
    mBlockMasterClient =
        new BlockMasterClient(
            NetworkAddressUtils.getConnectAddress(ServiceType.MASTER_RPC, mTachyonConf),
            mTachyonConf);

    mFileSystemMasterClient =
        new FileSystemMasterClient(
            NetworkAddressUtils.getConnectAddress(ServiceType.MASTER_RPC, mTachyonConf),
            mTachyonConf);

    // Set up BlockDataManager
    WorkerSource workerSource = new WorkerSource();
    mBlockDataManager =
        new BlockDataManager(
            workerSource, mBlockMasterClient, mFileSystemMasterClient, new TieredBlockStore());

    // Setup metrics collection
    mWorkerMetricsSystem = new MetricsSystem("worker", mTachyonConf);
    workerSource.registerGauges(mBlockDataManager);
    mWorkerMetricsSystem.registerSource(workerSource);

    // Setup DataServer
    mDataServer =
        DataServer.Factory.create(
            NetworkAddressUtils.getBindAddress(ServiceType.WORKER_DATA, mTachyonConf),
            mBlockDataManager,
            mTachyonConf);
    // Reset data server port
    mTachyonConf.set(Constants.WORKER_DATA_PORT, Integer.toString(mDataServer.getPort()));

    // Setup RPC Server
    mServiceHandler = new BlockWorkerClientServiceHandler(mBlockDataManager);
    mThriftServerSocket = createThriftServerSocket();
    mPort = NetworkAddressUtils.getThriftPort(mThriftServerSocket);
    // Reset worker RPC port
    mTachyonConf.set(Constants.WORKER_RPC_PORT, Integer.toString(mPort));
    mThriftServer = createThriftServer();

    // Setup web server
    mWebServer =
        new WorkerUIWebServer(
            ServiceType.WORKER_WEB,
            NetworkAddressUtils.getBindAddress(ServiceType.WORKER_WEB, mTachyonConf),
            mBlockDataManager,
            NetworkAddressUtils.getConnectAddress(ServiceType.WORKER_RPC, mTachyonConf),
            mStartTimeMs,
            mTachyonConf);
    mWorkerMetricsSystem.start();
    // Add the metrics servlet to the web server, this must be done after the metrics system starts
    mWebServer.addHandler(mWorkerMetricsSystem.getServletHandler());
    mWebServer.startWebServer();
    int webPort = mWebServer.getLocalPort();

    // Get the worker id
    mWorkerNetAddress =
        new NetAddress(
            NetworkAddressUtils.getConnectHost(ServiceType.WORKER_RPC, mTachyonConf),
            mPort,
            mDataServer.getPort(),
            webPort);
    WorkerIdRegistry.registerWithBlockMaster(mBlockMasterClient, mWorkerNetAddress);

    mBlockMasterSync =
        new BlockMasterSync(mBlockDataManager, mWorkerNetAddress, mBlockMasterClient);

    // Setup PinListSyncer
    mPinListSync = new PinListSync(mBlockDataManager, mFileSystemMasterClient);

    // Setup session cleaner
    mSessionCleanerThread = new SessionCleaner(mBlockDataManager);

    // Setup space reserver
    if (mTachyonConf.getBoolean(Constants.WORKER_TIERED_STORE_RESERVER_ENABLED)) {
      mSpaceReserver = new SpaceReserver(mBlockDataManager);
    }
  }