@Override public void run() { final long millisBeforeWarning = conf.getTimeInMillis(Property.TSERV_ASSIGNMENT_DURATION_WARNING); try { long now = System.currentTimeMillis(); KeyExtent extent; RunnableStartedAt runnable; for (Entry<KeyExtent, RunnableStartedAt> entry : activeAssignments.entrySet()) { extent = entry.getKey(); runnable = entry.getValue(); final long duration = now - runnable.getStartTime(); // Print a warning if an assignment has been running for over the configured time length if (duration > millisBeforeWarning) { log.warn( "Assignment for " + extent + " has been running for at least " + duration + "ms", runnable.getTask().getException()); } else if (log.isTraceEnabled()) { log.trace("Assignment for " + extent + " only running for " + duration + "ms"); } } } catch (Exception e) { log.warn("Caught exception checking active assignments", e); } finally { // Don't run more often than every 5s long delay = Math.max((long) (millisBeforeWarning * 0.5), 5000l); if (log.isTraceEnabled()) { log.trace("Rescheduling assignment watcher to run in " + delay + "ms"); } timer.schedule(this, delay); } }
public void startProcessing(final Processor processor, ThreadPoolExecutor executorService) throws KeeperException, InterruptedException { threadPool = (ThreadPoolExecutor) executorService; zoo.mkdirs(path); zoo.mkdirs(path + "/" + LOCKS_NODE); List<String> children = zoo.getChildren( path, new Watcher() { @Override public void process(WatchedEvent event) { switch (event.getType()) { case NodeChildrenChanged: if (event.getPath().equals(path)) try { lookForWork(processor, zoo.getChildren(path, this)); } catch (KeeperException e) { log.error("Failed to look for work", e); } catch (InterruptedException e) { log.info("Interrupted looking for work", e); } else log.info("Unexpected path for NodeChildrenChanged event " + event.getPath()); break; case NodeCreated: case NodeDataChanged: case NodeDeleted: case None: log.info("Got unexpected zookeeper event: " + event.getType() + " for " + path); break; } } }); lookForWork(processor, children); Random r = new Random(); // Add a little jitter to avoid all the tservers slamming zookeeper at once SimpleTimer.getInstance() .schedule( new Runnable() { @Override public void run() { try { lookForWork(processor, zoo.getChildren(path)); } catch (KeeperException e) { log.error("Failed to look for work", e); } catch (InterruptedException e) { log.info("Interrupted looking for work", e); } } }, r.nextInt(60 * 1000), 60 * 1000); }
public void run() throws Exception { SimpleTimer.getInstance() .schedule( new Runnable() { @Override public void run() { flush(); } }, 1000, 1000); server.serve(); }
private ExecutorService addEs( final Property maxThreads, String name, final ThreadPoolExecutor tp) { ExecutorService result = addEs(name, tp); SimpleTimer.getInstance(tserver.getConfiguration()) .schedule( new Runnable() { @Override public void run() { try { int max = tserver.getConfiguration().getCount(maxThreads); if (tp.getMaximumPoolSize() != max) { log.info("Changing " + maxThreads.getKey() + " to " + max); tp.setCorePoolSize(max); tp.setMaximumPoolSize(max); } } catch (Throwable t) { log.error("Failed to change thread pool size", t); } } }, 1000, 10 * 1000); return result; }
protected void pageStart(HttpServletRequest req, HttpServletResponse resp, StringBuilder sb) throws Exception { resp.setContentType(DEFAULT_CONTENT_TYPE); int refresh = -1; Cookie c = getCookie(req, "page.refresh.rate"); if (c != null && c.getValue() != null) { try { refresh = Integer.parseInt(c.getValue()); } catch (NumberFormatException e) { // ignore improperly formatted user cookie } } synchronized (BasicServlet.class) { // Learn our instance name asynchronously so we don't hang up if zookeeper is down if (cachedInstanceName == null) { SimpleTimer.getInstance() .schedule( new TimerTask() { @Override public void run() { synchronized (BasicServlet.class) { if (cachedInstanceName == null) { cachedInstanceName = HdfsZooInstance.getInstance().getInstanceName(); } } } }, 1000); } } // BEGIN PAGE sb.append("<html>\n"); // BEGIN HEADER sb.append("<head>\n"); sb.append("<title>") .append(getTitle(req)) .append(" - Accumulo ") .append(Constants.VERSION) .append("</title>\n"); if ((refresh > 0) && (req.getRequestURI().startsWith("/docs") == false) && (req.getRequestURI().startsWith("/vis") == false)) sb.append("<meta http-equiv='refresh' content='" + refresh + "' />\n"); sb.append("<meta http-equiv='Content-Type' content='") .append(DEFAULT_CONTENT_TYPE) .append("' />\n"); sb.append("<meta http-equiv='Content-Script-Type' content='text/javascript' />\n"); sb.append("<meta http-equiv='Content-Style-Type' content='text/css' />\n"); sb.append("<link rel='shortcut icon' type='image/jpg' href='/web/favicon.png' />\n"); sb.append("<link rel='stylesheet' type='text/css' href='/web/screen.css' media='screen' />\n"); sb.append("<script src='/web/functions.js' type='text/javascript'></script>\n"); sb.append( "<!--[if lte IE 8]><script language=\"javascript\" type=\"text/javascript\" src=\"/web/flot/excanvas.min.js\"></script><![endif]-->\n"); sb.append( "<script language=\"javascript\" type=\"text/javascript\" src=\"/web/flot/jquery.js\"></script>\n"); sb.append( "<script language=\"javascript\" type=\"text/javascript\" src=\"/web/flot/jquery.flot.js\"></script>\n"); sb.append("</head>\n"); // BEGIN BODY OPENING sb.append("\n<body>\n"); sb.append("<div id='content-wrapper'>\n"); sb.append("<div id='content'>\n"); sb.append("<div id='header'>"); if (!bannerText.isEmpty()) { sb.append("<div id='banner' style='color:") .append(bannerColor) .append(";background:") .append(bannerBackground) .append("'>") .append(bannerText) .append("</div>\n"); } sb.append("<div id='headertitle'>"); sb.append("<h1>").append(getTitle(req)).append("</h1></div>\n"); sb.append("<div id='subheader'>Instance Name: ") .append(cachedInstanceName) .append(" Version: ") .append(Constants.VERSION) .append("\n"); sb.append("<br><span class='smalltext'>Instance ID: ") .append(HdfsZooInstance.getInstance().getInstanceID()) .append("</span>\n"); sb.append("<br><span class='smalltext'>") .append(new Date().toString().replace(" ", " ")) .append("</span>"); sb.append("</div>\n"); // end <div id='subheader'> sb.append("</div>\n"); // end <div id='header'> // BEGIN LEFT SIDE sb.append("<div id='nav'>\n"); sb.append("<span id='nav-title'><a href='/'>Overview</a></span><br />\n"); sb.append("<hr />\n"); sb.append("<a href='/master'>Master Server</a><br />\n"); sb.append("<a href='/tservers'>Tablet Servers</a><br />\n"); sb.append("<a href='/loggers'>Logger Servers</a><br />\n"); sb.append("<a href='/vis'>Server Activity</a><br />\n"); sb.append("<a href='/gc'>Garbage Collector</a><br />\n"); sb.append("<a href='/tables'>Tables</a><br />\n"); sb.append("<a href='/trace/summary?minutes=10'>Recent Traces</a><br />\n"); sb.append("<a href='/docs'>Documentation</a><br />\n"); int numLogs = LogService.getInstance().getEvents().size(); if (numLogs > 0) sb.append( "<span class='error'><a href='/log'>Recent Logs <span class='smalltext'>(" + numLogs + ")</a></span></span><br />\n"); int numProblems = Monitor.getProblemSummary().entrySet().size(); if (numProblems > 0) sb.append( "<span class='error'><a href='/problems'>Table Problems <span class='smalltext'>(" + numProblems + ")</a></span></span><br />\n"); sb.append("<hr />\n"); sb.append("<a href='/xml'>XML</a><hr />\n"); sb.append("<div class='smalltext'>[<a href='") .append("/op?action=refresh&value=") .append(refresh < 1 ? "5" : "-1"); sb.append("&redir=").append(currentPage(req)).append("'>"); sb.append(refresh < 1 ? "en" : "dis").append("able auto-refresh</a>]</div>\n"); sb.append("</div>\n"); // end <div id='nav'> sb.append("<div id='main'"); if (bannerText.isEmpty()) sb.append(" style='bottom:0'"); sb.append(">\n"); sb.append("<!-- BEGIN MAIN BODY CONTENT -->\n\n"); }
public static synchronized void startWatching(AccumuloConfiguration config) { if (!watching) { SimpleTimer.getInstance(config).schedule(new CompactionWatcher(config), 10000, 10000); watching = true; } }
public static ServerAddress startHsHaServer( InetSocketAddress address, TProcessor processor, final String serverName, String threadName, final int numThreads, long timeBetweenThreadChecks, long maxMessageSize) throws TTransportException { TNonblockingServerSocket transport = new TNonblockingServerSocket(address); // check for the special "bind to everything address" if (address.getAddress().getHostAddress().equals("0.0.0.0")) { // can't get the address from the bind, so we'll do our best to invent our hostname try { address = new InetSocketAddress(InetAddress.getLocalHost().getHostName(), address.getPort()); } catch (UnknownHostException e) { throw new TTransportException(e); } } THsHaServer.Args options = new THsHaServer.Args(transport); options.protocolFactory(ThriftUtil.protocolFactory()); options.transportFactory(ThriftUtil.transportFactory(maxMessageSize)); options.stopTimeoutVal(5); /* * Create our own very special thread pool. */ final ThreadPoolExecutor pool = new SimpleThreadPool(numThreads, "ClientPool"); // periodically adjust the number of threads we need by checking how busy our threads are SimpleTimer.getInstance() .schedule( new Runnable() { @Override public void run() { if (pool.getCorePoolSize() <= pool.getActiveCount()) { int larger = pool.getCorePoolSize() + Math.min(pool.getQueue().size(), 2); log.info("Increasing server thread pool size on " + serverName + " to " + larger); pool.setMaximumPoolSize(larger); pool.setCorePoolSize(larger); } else { if (pool.getCorePoolSize() > pool.getActiveCount() + 3) { int smaller = Math.max(numThreads, pool.getCorePoolSize() - 1); if (smaller != pool.getCorePoolSize()) { // there is a race condition here... the active count could be higher by the // time // we decrease the core pool size... so the active count could end up higher // than // the core pool size, in which case everything will be queued... the increase // case // should handle this and prevent deadlock log.info( "Decreasing server thread pool size on " + serverName + " to " + smaller); pool.setCorePoolSize(smaller); } } } } }, timeBetweenThreadChecks, timeBetweenThreadChecks); options.executorService(pool); options.processorFactory(new TProcessorFactory(processor)); return new ServerAddress(new THsHaServer(options), address); }
public TabletServerResourceManager(TabletServer tserver, VolumeManager fs) { this.tserver = tserver; this.conf = tserver.getServerConfigurationFactory(); this.fs = fs; final AccumuloConfiguration acuConf = conf.getConfiguration(); long maxMemory = acuConf.getMemoryInBytes(Property.TSERV_MAXMEM); boolean usingNativeMap = acuConf.getBoolean(Property.TSERV_NATIVEMAP_ENABLED) && NativeMap.isLoaded(); long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE); long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE); long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE); long totalQueueSize = acuConf.getMemoryInBytes(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX); _iCache = new LruBlockCache(iCacheSize, blockSize); _dCache = new LruBlockCache(dCacheSize, blockSize); Runtime runtime = Runtime.getRuntime(); if (usingNativeMap) { // Still check block cache sizes when using native maps. if (dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) { throw new IllegalArgumentException( String.format( "Block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d", dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory())); } } else if (maxMemory + dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) { throw new IllegalArgumentException( String.format( "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory())); } runtime.gc(); // totalMemory - freeMemory = memory in use // maxMemory - memory in use = max available memory if (!usingNativeMap && maxMemory > runtime.maxMemory() - (runtime.totalMemory() - runtime.freeMemory())) { log.warn("In-memory map may not fit into local memory space."); } minorCompactionThreadPool = createEs(Property.TSERV_MINC_MAXCONCURRENT, "minor compactor"); // make this thread pool have a priority queue... and execute tablets with the most // files first! majorCompactionThreadPool = createEs( Property.TSERV_MAJC_MAXCONCURRENT, "major compactor", new CompactionQueue().asBlockingQueueOfRunnable()); rootMajorCompactionThreadPool = createEs(0, 1, 300, "md root major compactor"); defaultMajorCompactionThreadPool = createEs(0, 1, 300, "md major compactor"); splitThreadPool = createEs(1, "splitter"); defaultSplitThreadPool = createEs(0, 1, 60, "md splitter"); defaultMigrationPool = createEs(0, 1, 60, "metadata tablet migration"); migrationPool = createEs(Property.TSERV_MIGRATE_MAXCONCURRENT, "tablet migration"); // not sure if concurrent assignments can run safely... even if they could there is probably no // benefit at startup because // individual tablet servers are already running assignments concurrently... having each // individual tablet server run // concurrent assignments would put more load on the metadata table at startup assignmentPool = createEs(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "tablet assignment"); assignMetaDataPool = createEs(0, 1, 60, "metadata tablet assignment"); activeAssignments = new ConcurrentHashMap<KeyExtent, RunnableStartedAt>(); readAheadThreadPool = createEs(Property.TSERV_READ_AHEAD_MAXCONCURRENT, "tablet read ahead"); defaultReadAheadThreadPool = createEs(Property.TSERV_METADATA_READ_AHEAD_MAXCONCURRENT, "metadata tablets read ahead"); int maxOpenFiles = acuConf.getCount(Property.TSERV_SCAN_MAX_OPENFILES); fileManager = new FileManager(tserver, fs, maxOpenFiles, _dCache, _iCache); memoryManager = Property.createInstanceFromPropertyName( acuConf, Property.TSERV_MEM_MGMT, MemoryManager.class, new LargestFirstMemoryManager()); memoryManager.init(tserver.getServerConfigurationFactory()); memMgmt = new MemoryManagementFramework(); memMgmt.startThreads(); SimpleTimer timer = SimpleTimer.getInstance(tserver.getConfiguration()); // We can use the same map for both metadata and normal assignments since the keyspace (extent) // is guaranteed to be unique. Schedule the task once, the task will reschedule itself. timer.schedule(new AssignmentWatcher(acuConf, activeAssignments, timer), 5000); }