@Before
  public void setUp() {
    volMgr = createMock(VolumeManager.class);
    instance = createMock(Instance.class);
    SiteConfiguration siteConfig = EasyMock.createMock(SiteConfiguration.class);
    expect(instance.getInstanceID()).andReturn("mock").anyTimes();
    expect(instance.getZooKeepers()).andReturn("localhost").anyTimes();
    expect(instance.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();

    opts = new Opts();
    systemConfig = createSystemConfig();
    ServerConfigurationFactory factory = createMock(ServerConfigurationFactory.class);
    expect(factory.getInstance()).andReturn(instance).anyTimes();
    expect(factory.getConfiguration()).andReturn(systemConfig).anyTimes();
    expect(factory.getSiteConfiguration()).andReturn(siteConfig).anyTimes();

    // Just make the SiteConfiguration delegate to our AccumuloConfiguration
    // Presently, we only need get(Property) and iterator().
    EasyMock.expect(siteConfig.get(EasyMock.anyObject(Property.class)))
        .andAnswer(
            new IAnswer<String>() {
              @Override
              public String answer() {
                Object[] args = EasyMock.getCurrentArguments();
                return systemConfig.get((Property) args[0]);
              }
            })
        .anyTimes();
    EasyMock.expect(siteConfig.getBoolean(EasyMock.anyObject(Property.class)))
        .andAnswer(
            new IAnswer<Boolean>() {
              @Override
              public Boolean answer() {
                Object[] args = EasyMock.getCurrentArguments();
                return systemConfig.getBoolean((Property) args[0]);
              }
            })
        .anyTimes();

    EasyMock.expect(siteConfig.iterator())
        .andAnswer(
            new IAnswer<Iterator<Entry<String, String>>>() {
              @Override
              public Iterator<Entry<String, String>> answer() {
                return systemConfig.iterator();
              }
            })
        .anyTimes();

    replay(instance, factory, siteConfig);

    credentials = SystemCredentials.get(instance);
    gc = new SimpleGarbageCollector(opts, volMgr, factory);
  }
 private ExecutorService createEs(Property max, String name, BlockingQueue<Runnable> queue) {
   int maxThreads = conf.getConfiguration().getCount(max);
   ThreadPoolExecutor tp =
       new ThreadPoolExecutor(
           maxThreads,
           maxThreads,
           0L,
           TimeUnit.MILLISECONDS,
           queue,
           new NamingThreadFactory(name));
   return addEs(max, name, tp);
 }
 void waitUntilCommitsAreEnabled() {
   if (holdCommits) {
     long timeout =
         System.currentTimeMillis()
             + conf.getConfiguration().getTimeInMillis(Property.GENERAL_RPC_TIMEOUT);
     synchronized (commitHold) {
       while (holdCommits) {
         try {
           if (System.currentTimeMillis() > timeout)
             throw new HoldTimeoutException("Commits are held");
           commitHold.wait(1000);
         } catch (InterruptedException e) {
         }
       }
     }
   }
 }
  public TabletServerResourceManager(TabletServer tserver, VolumeManager fs) {
    this.tserver = tserver;
    this.conf = tserver.getServerConfigurationFactory();
    this.fs = fs;
    final AccumuloConfiguration acuConf = conf.getConfiguration();

    long maxMemory = acuConf.getMemoryInBytes(Property.TSERV_MAXMEM);
    boolean usingNativeMap =
        acuConf.getBoolean(Property.TSERV_NATIVEMAP_ENABLED) && NativeMap.isLoaded();

    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);
    long totalQueueSize = acuConf.getMemoryInBytes(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX);

    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);

    Runtime runtime = Runtime.getRuntime();
    if (usingNativeMap) {
      // Still check block cache sizes when using native maps.
      if (dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
        throw new IllegalArgumentException(
            String.format(
                "Block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
                dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
      }
    } else if (maxMemory + dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(
          String.format(
              "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
              maxMemory, dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
    }
    runtime.gc();

    // totalMemory - freeMemory = memory in use
    // maxMemory - memory in use = max available memory
    if (!usingNativeMap
        && maxMemory > runtime.maxMemory() - (runtime.totalMemory() - runtime.freeMemory())) {
      log.warn("In-memory map may not fit into local memory space.");
    }

    minorCompactionThreadPool = createEs(Property.TSERV_MINC_MAXCONCURRENT, "minor compactor");

    // make this thread pool have a priority queue... and execute tablets with the most
    // files first!
    majorCompactionThreadPool =
        createEs(
            Property.TSERV_MAJC_MAXCONCURRENT,
            "major compactor",
            new CompactionQueue().asBlockingQueueOfRunnable());
    rootMajorCompactionThreadPool = createEs(0, 1, 300, "md root major compactor");
    defaultMajorCompactionThreadPool = createEs(0, 1, 300, "md major compactor");

    splitThreadPool = createEs(1, "splitter");
    defaultSplitThreadPool = createEs(0, 1, 60, "md splitter");

    defaultMigrationPool = createEs(0, 1, 60, "metadata tablet migration");
    migrationPool = createEs(Property.TSERV_MIGRATE_MAXCONCURRENT, "tablet migration");

    // not sure if concurrent assignments can run safely... even if they could there is probably no
    // benefit at startup because
    // individual tablet servers are already running assignments concurrently... having each
    // individual tablet server run
    // concurrent assignments would put more load on the metadata table at startup
    assignmentPool = createEs(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "tablet assignment");

    assignMetaDataPool = createEs(0, 1, 60, "metadata tablet assignment");

    activeAssignments = new ConcurrentHashMap<KeyExtent, RunnableStartedAt>();

    readAheadThreadPool = createEs(Property.TSERV_READ_AHEAD_MAXCONCURRENT, "tablet read ahead");
    defaultReadAheadThreadPool =
        createEs(Property.TSERV_METADATA_READ_AHEAD_MAXCONCURRENT, "metadata tablets read ahead");

    int maxOpenFiles = acuConf.getCount(Property.TSERV_SCAN_MAX_OPENFILES);

    fileManager = new FileManager(tserver, fs, maxOpenFiles, _dCache, _iCache);

    memoryManager =
        Property.createInstanceFromPropertyName(
            acuConf, Property.TSERV_MEM_MGMT, MemoryManager.class, new LargestFirstMemoryManager());
    memoryManager.init(tserver.getServerConfigurationFactory());
    memMgmt = new MemoryManagementFramework();
    memMgmt.startThreads();

    SimpleTimer timer = SimpleTimer.getInstance(tserver.getConfiguration());

    // We can use the same map for both metadata and normal assignments since the keyspace (extent)
    // is guaranteed to be unique. Schedule the task once, the task will reschedule itself.
    timer.schedule(new AssignmentWatcher(acuConf, activeAssignments, timer), 5000);
  }