@Override
  public boolean put(ChunkData cm, boolean persist) throws IOException, HashtableFullException {
    // persist = false;
    if (this.isClosed())
      throw new HashtableFullException("Hashtable " + this.fileName + " is close");
    if (kSz.get() >= this.maxSz) throw new HashtableFullException("maximum sized reached");
    boolean added = false;
    // if (persist)
    // this.flushFullBuffer();
    Lock l = gcLock.readLock();
    l.lock();
    ProgressiveFileByteArrayLongMap bm = null;
    try {
      // long tm = System.currentTimeMillis();
      if (this.getReadMap(cm.getHash()) == null) {
        // this.misses.incrementAndGet();
        // tm = System.currentTimeMillis() - tm;
        if (persist) {
          try {
            bm = this.getWriteMap();
            added = bm.put(cm);
          } catch (HashtableFullException e) {
            bm.setActive(false);
            bm = this.createWriteMap();
            added = bm.put(cm);
          }
          if (added) {
            this.lbf.put(cm.getHash());
            this.kSz.incrementAndGet();
          }
        } else {
          try {
            bm = this.getWriteMap();
            added = bm.put(cm.getHash(), cm.getcPos());
            this.lbf.put(cm.getHash());
          } catch (HashtableFullException e) {
            bm.setActive(false);
            bm = this.createWriteMap();
            added = bm.put(cm.getHash(), cm.getcPos());

            this.lbf.put(cm.getHash());
          }
        }
      } else {
        // tm = System.currentTimeMillis() - tm;
      }
      // this.msTr.addAndGet(tm);

    } finally {
      try {
        if (bm != null) {
          bm.setActive(true);
          this.activeWriteMaps.offer(bm);
        }
      } catch (Exception e) {

      } finally {
        l.unlock();
      }
    }
    /*
     * this.trs.incrementAndGet(); if(this.trs.get() == 10000) { long tpm =
     * 0; if(this.misses.get() > 0) tpm = this.msTr.get()/this.misses.get();
     * SDFSLogger.getLog().info("trs=" + this.trs.get() + " misses=" +
     * this.misses.get() + " mtm=" + this.msTr.get() + " tpm=" + tpm);
     * this.trs.set(0); this.misses.set(0); this.msTr.set(0); }
     */
    return added;
  }
  @Override
  public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException {
    if (this.isClosed()) throw new IOException("Hashtable " + this.fileName + " is close");
    executor =
        new ThreadPoolExecutor(
            Main.writeThreads + 1,
            Main.writeThreads + 1,
            10,
            TimeUnit.SECONDS,
            worksQueue,
            new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY),
            executionHandler);
    csz = new AtomicLong(0);
    Lock l = this.gcLock.writeLock();
    l.lock();
    this.runningGC = true;
    lbf = null;
    lbf = new LargeBloomFilter(maxSz, .01);
    l.unlock();
    try {
      SDFSLogger.getLog()
          .info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
      SDFSEvent tEvt =
          SDFSEvent.claimInfoEvent(
              "Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
      tEvt.maxCt = this.maps.size();
      Iterator<ProgressiveFileByteArrayLongMap> iter = maps.iterator();
      while (iter.hasNext()) {
        tEvt.curCt++;
        ProgressiveFileByteArrayLongMap m = null;
        try {
          m = iter.next();
          executor.execute(new ClaimShard(m, bf, lbf, csz));
        } catch (Exception e) {
          tEvt.endEvent(
              "Unable to claim records for " + m + " because : [" + e.toString() + "]",
              SDFSEvent.ERROR);
          SDFSLogger.getLog().error("Unable to claim records for " + m, e);
          throw new IOException(e);
        }
      }
      executor.shutdown();
      try {
        while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
          SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
        }
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
      this.kSz.getAndAdd(-1 * csz.get());
      tEvt.endEvent("removed [" + csz.get() + "] records");
      SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
      iter = maps.iterator();
      while (iter.hasNext()) {
        ProgressiveFileByteArrayLongMap m = null;
        try {
          m = iter.next();
          if (m.isFull() && !m.isActive()) {
            double pf = (double) m.size() / (double) m.maxSize();
            // SDFSLogger.getLog().info("pfull=" + pf);
            if (pf < .4 || pf == Double.NaN) {
              // SDFSLogger.getLog().info("deleting " +
              // m.toString());
              m.iterInit();
              KVPair p = m.nextKeyValue();
              while (p != null) {
                ProgressiveFileByteArrayLongMap _m = this.getWriteMap();
                try {
                  _m.put(p.key, p.value);
                } catch (HashtableFullException e) {
                  _m.setActive(false);
                  _m = this.createWriteMap();
                  _m.put(p.key, p.value);
                } finally {
                  this.activeWriteMaps.offer(_m);
                }
                p = m.nextKeyValue();
              }
              int mapsz = maps.size();
              maps.remove(m);
              mapsz = mapsz - maps.size();
              // SDFSLogger.getLog().info(
              // "removing map " + m.toString() + " sz="
              // + maps.size() + " rm=" + mapsz);
              m.vanish();

              m = null;
            }
          }
        } catch (Exception e) {
          tEvt.endEvent(
              "Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
          SDFSLogger.getLog().error("to compact " + m, e);
          throw new IOException(e);
        }
      }
      return csz.get();
    } finally {
      l.lock();
      this.runningGC = false;
      l.unlock();
      executor = null;
    }
  }