/** @return Generated {@link WorkerInfo} for this worker */ public synchronized WorkerInfo generateClientWorkerInfo() { WorkerInfo ret = new WorkerInfo(); ret.id = mId; ret.address = mWorkerAddress; ret.lastContactSec = (int) ((CommonUtils.getCurrentMs() - mLastUpdatedTimeMs) / Constants.SECOND_MS); ret.state = "In Service"; ret.capacityBytes = mCapacityBytes; ret.usedBytes = mUsedBytes; ret.startTimeMs = mStartTimeMs; return ret; }
public static void createDependency() throws IOException { long startTimeMs = CommonUtils.getCurrentMs(); List<String> children = new ArrayList<String>(); for (int k = 0; k < sFiles; k++) { children.add(sFileFolder + "/part-" + k); } List<ByteBuffer> data = new ArrayList<ByteBuffer>(); data.add(ByteBuffer.allocate(10)); int depId = sTachyonClient.createDependency( new ArrayList<String>(), children, "fake command", data, "BasicCheckpoint Dependency", "Tachyon Examples", "0.3", DependencyType.Narrow.getValue(), 512 * Constants.MB); CommonUtils.printTimeTakenMs(startTimeMs, LOG, "createDependency with depId " + depId); }
@Override public void heartbeat() { LOG.debug("System status checking."); TachyonConf conf = MasterContext.getConf(); int masterWorkerTimeoutMs = conf.getInt(Constants.MASTER_WORKER_TIMEOUT_MS); synchronized (mBlocks) { synchronized (mWorkers) { Iterator<MasterWorkerInfo> iter = mWorkers.iterator(); while (iter.hasNext()) { MasterWorkerInfo worker = iter.next(); final long lastUpdate = CommonUtils.getCurrentMs() - worker.getLastUpdatedTimeMs(); if (lastUpdate > masterWorkerTimeoutMs) { LOG.error("The worker {} got timed out!", worker); mLostWorkers.add(worker); iter.remove(); processLostWorker(worker); } } } } }
@Override public void run() { LOG.info("{}: Journal tailer started.", mMaster.getServiceName()); // Continually loop loading the checkpoint file, and then loading all completed files. The loop // only repeats when the checkpoint file is updated after it was read. while (!mInitiateShutdown) { try { // The start time (ms) for the initiated shutdown. long waitForShutdownStart = -1; // Load the checkpoint file. LOG.info("{}: Waiting to load the checkpoint file.", mMaster.getServiceName()); mJournalTailer = new JournalTailer(mMaster, mJournal); while (!mJournalTailer.checkpointExists()) { CommonUtils.sleepMs(LOG, mJournalTailerSleepTimeMs); if (mInitiateShutdown) { LOG.info("Journal tailer has been shutdown while waiting to load the checkpoint file."); mStopped = true; return; } } LOG.info("{}: Start loading the checkpoint file.", mMaster.getServiceName()); mJournalTailer.processJournalCheckpoint(true); LOG.info("{}: Checkpoint file has been loaded.", mMaster.getServiceName()); // Continually process completed log files. while (mJournalTailer.isValid()) { if (mJournalTailer.processNextJournalLogFiles() > 0) { // Reset the shutdown timer. waitForShutdownStart = -1; } else { if (mInitiateShutdown) { if (waitForShutdownStart == -1) { waitForShutdownStart = CommonUtils.getCurrentMs(); } else if ((CommonUtils.getCurrentMs() - waitForShutdownStart) > mShutdownQuietWaitTimeMs) { // There have been no new logs for the quiet period. Shutdown now. LOG.info( "{}: Journal tailer has been shutdown. No new logs for the quiet period.", mMaster.getServiceName()); mStopped = true; return; } } LOG.debug( "{}: The next complete log file does not exist yet. " + "Sleeping and checking again.", mMaster.getServiceName()); CommonUtils.sleepMs(LOG, mJournalTailerSleepTimeMs); } } LOG.info( "{}: The checkpoint is out of date. Will reload the checkpoint file.", mMaster.getServiceName()); CommonUtils.sleepMs(LOG, mJournalTailerSleepTimeMs); } catch (IOException ioe) { // Log the error and continue the loop. LOG.error(ioe.getMessage()); } } LOG.info("{}: Journal tailer has been shutdown.", mMaster.getServiceName()); mStopped = true; }
public static void printTimeTakenMs(long startTimeMs, Logger logger, String message) { logger.info(message + " took " + (CommonUtils.getCurrentMs() - startTimeMs) + " ms."); }