コード例 #1
0
    /**
     * Used when the cache is growing past its max size to clone in a single pass. Removes least
     * recently used tables to get size of cache below its max size by the overage amount.
     */
    public PMetaDataCache cloneMinusOverage(long overage) {
      assert (overage > 0);
      int nToRemove =
          Math.max(
              MIN_REMOVAL_SIZE,
              (int) Math.ceil((currentByteSize - maxByteSize) / ((double) currentByteSize / size()))
                  + 1);
      MinMaxPriorityQueue<PTableRef> toRemove = BUILDER.expectedSize(nToRemove).create();
      PMetaDataCache newCache = new PMetaDataCache(this.size(), this.maxByteSize, this.timeKeeper);

      long toRemoveBytes = 0;
      // Add to new cache, but track references to remove when done
      // to bring cache at least overage amount below it's max size.
      for (PTableRef tableRef : this.tables.values()) {
        newCache.put(tableRef.getTable().getKey(), new PTableRef(tableRef));
        toRemove.add(tableRef);
        toRemoveBytes += tableRef.getEstSize();
        while (toRemoveBytes - toRemove.peekLast().getEstSize() >= overage) {
          PTableRef removedRef = toRemove.removeLast();
          toRemoveBytes -= removedRef.getEstSize();
        }
      }
      for (PTableRef toRemoveRef : toRemove) {
        newCache.remove(toRemoveRef.getTable().getKey());
      }
      return newCache;
    }
コード例 #2
0
 /** Add a region from the head or tail to the List of regions to return. */
 private void addRegionPlan(
     final MinMaxPriorityQueue<RegionPlan> regionsToMove,
     final boolean fetchFromTail,
     final ServerName sn,
     List<RegionPlan> regionsToReturn) {
   RegionPlan rp = null;
   if (!fetchFromTail) rp = regionsToMove.remove();
   else rp = regionsToMove.removeLast();
   rp.setDestination(sn);
   regionsToReturn.add(rp);
 }
コード例 #3
0
  /**
   * Sorts a list of rows and retain the top n items
   *
   * @param items the collections of items to be sorted
   * @param n the number of items to be retained
   * @return Top n items that are sorted in the order specified when this instance is constructed.
   */
  public Iterable<T> toTopN(Iterable<T> items, int n) {
    if (n <= 0) {
      return ImmutableList.of();
    }

    MinMaxPriorityQueue<T> queue =
        MinMaxPriorityQueue.orderedBy(ordering).maximumSize(n).create(items);

    return new OrderedPriorityQueueItems<T>(queue);
  }
コード例 #4
0
  public static void assignLabels(
      List<Cluster<DocumentDataElement>> clusterData, DocumentDataSet dataSet) {
    for (Cluster<DocumentDataElement> cluster : clusterData) {
      MinMaxPriorityQueue<TermEntry> queue =
          MinMaxPriorityQueue.orderedBy(
                  new Comparator<TermEntry>() {

                    @Override
                    public int compare(TermEntry o1, TermEntry o2) {
                      return -Double.compare(o1.getScore(), o2.getScore());
                    }
                  })
              .maximumSize(5)
              .create();

      DocumentCollection localCollection = new DocumentCollection();
      for (DocumentDataElement elem : cluster.getDataElements()) {
        localCollection.addDocument(elem.getDocument());
      }

      DocumentVSMGenerator docToVsm = new TFIDF();
      DocumentDataSet clusterDataSet = docToVsm.createVSM(localCollection);
      // TODO remove this
      try {
        CSVDataSetExporter.export(clusterDataSet, new File("tmp/" + cluster.getLabel() + ".csv"));
      } catch (IOException e) {
      }

      for (DocumentDataElement elem : clusterDataSet.elements()) {
        Document document = elem.getDocument();

        for (String term : document.getAllTerms()) {

          double termWeight = clusterDataSet.getTermWeight(document.getId(), term);
          queue.offer(new TermEntry(term, termWeight * getDocumentCount(term, cluster)));
        }
      }

      String label = "";
      StringBuilder labelBuilder = new StringBuilder();

      TreeSet<String> words = Sets.newTreeSet();

      // TODO this is a debug version of labels
      for (TermEntry termEntry : queue) {
        labelBuilder
            .append(termEntry.getTerm())
            .append(":")
            .append(String.format("%7.5f", termEntry.getScore()))
            .append(";")
            .append(getDocumentCount(termEntry.getTerm(), cluster))
            .append(",");

        words.add(termEntry.getTerm());
      }

      if (labelBuilder.length() > 0) {
        label = labelBuilder.substring(0, labelBuilder.length() - 1);
      }
      cluster.setLabel(words.toString());
    }
  }
コード例 #5
0
 private static MinMaxPriorityQueue<Edge> createQueue(
     int edgeLimit, Ordering<? super Edge> ordering) {
   return MinMaxPriorityQueue.orderedBy(ordering).maximumSize(edgeLimit).create();
 }
コード例 #6
0
 @Override
 public void addUndirectedEdge(int source, int target, Map<String, Object> attributes) {
   this.hasUndirectedEdges = true;
   edges.offer(new Edge(source, target, attributes, false));
 }
コード例 #7
0
ファイル: Guava.java プロジェクト: polinadotio/javalibrary
 public void createQueue() {
   this.minMaxPriorityQueueEx = MinMaxPriorityQueue.maximumSize(200000).create();
 }
コード例 #8
0
 protected Iterable<Integer> indexes() {
   Integer[] array = indexes.toArray(new Integer[indexes.size()]);
   Arrays.sort(array, 0, array.length, C);
   return Arrays.asList(array);
 }
コード例 #9
0
 protected int removeBiggest() {
   return indexes.removeLast();
 }
コード例 #10
0
 // returns true always
 protected boolean store(int index) {
   return indexes.add(index);
 }
コード例 #11
0
 protected int size() {
   return indexes.size();
 }
コード例 #12
0
 HashForRow(int topN, long threshold, BinaryCollector collector) {
   super(topN, threshold, collector);
   this.indexes = MinMaxPriorityQueue.orderedBy(C).create();
 }
コード例 #13
0
  @Override
  public AggregationInfo chronology(String dir, String targetFile) throws IOException {
    log.info("Try to aggregate {} into file {}", dir, targetFile);
    Collection<Hessian2Input> inputStreams = new ArrayList<Hessian2Input>();
    Set<String> fileNameList = fileStorage.getFileNameList(dir);
    if (fileNameList.isEmpty()) {
      log.info("Nothing to aggregate. Directory {} is empty.", dir);
      new Hessian2Output(fileStorage.create(targetFile)).close();
      return new AggregationInfo(0, 0, 0);
    }
    for (String fileName : fileNameList) {
      try {
        InputStream in = fileStorage.open(fileName);
        inputStreams.add(new Hessian2Input(in));
      } catch (FileNotFoundException e) {
        log.warn(e.getMessage(), e);
      }
    }

    int count = 0;
    long minTime = 0;
    long maxTime = 0;

    Hessian2Output out = null;
    OutputStream os = null;
    try {
      if (fileStorage.delete(targetFile, false)) {
        log.warn("Target file {} did not deleted!", targetFile);
      }
      os = fileStorage.create(targetFile);
      out = new Hessian2Output(os);
      MinMaxPriorityQueue<StreamInfo> queue = MinMaxPriorityQueue.create();
      for (Hessian2Input inputStream : inputStreams) {
        LogEntry logEntry;
        try {
          logEntry = (LogEntry) inputStream.readObject();
        } catch (EOFException e) {
          continue;
        }
        queue.add(new StreamInfo(inputStream, logEntry));
      }

      while (!queue.isEmpty()) {
        StreamInfo<LogEntry> streamInfo = queue.removeFirst();
        out.writeObject(streamInfo.lastLogEntry);

        if (count == 0) {
          minTime = streamInfo.lastLogEntry.getTime();
          maxTime = streamInfo.lastLogEntry.getTime();
        } else {
          maxTime = streamInfo.lastLogEntry.getTime();
        }

        count++;
        LogEntry logEntry;
        try {
          logEntry = (LogEntry) streamInfo.stream.readObject();
        } catch (EOFException e) {
          continue;
        }
        streamInfo.lastLogEntry = logEntry;
        queue.add(streamInfo);
      }
    } finally {
      if (out != null) {
        out.close();
        os.close();
      }
    }

    return new AggregationInfo(minTime, maxTime, count);
  }
コード例 #14
0
  private static class PMetaDataCache implements Cloneable {
    private static final int MIN_REMOVAL_SIZE = 3;
    private static final Comparator<PTableRef> COMPARATOR =
        new Comparator<PTableRef>() {
          @Override
          public int compare(PTableRef tableRef1, PTableRef tableRef2) {
            return Longs.compare(tableRef1.getLastAccessTime(), tableRef2.getLastAccessTime());
          }
        };
    private static final MinMaxPriorityQueue.Builder<PTableRef> BUILDER =
        MinMaxPriorityQueue.orderedBy(COMPARATOR);

    private long currentByteSize;
    private final long maxByteSize;
    private final int expectedCapacity;
    private final TimeKeeper timeKeeper;

    private final Map<PTableKey, PTableRef> tables;
    private final Map<PTableKey, PFunction> functions;

    private static Map<PTableKey, PTableRef> newMap(int expectedCapacity) {
      // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
      // safely across multiple threads (as the underlying collection is not thread safe).
      // Instead, we track access time and prune it based on the copy we've made.
      return Maps.newHashMapWithExpectedSize(expectedCapacity);
    }

    private static Map<PTableKey, PFunction> newFunctionMap(int expectedCapacity) {
      // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
      // safely across multiple threads (as the underlying collection is not thread safe).
      // Instead, we track access time and prune it based on the copy we've made.
      return Maps.newHashMapWithExpectedSize(expectedCapacity);
    }

    private static Map<PTableKey, PTableRef> cloneMap(
        Map<PTableKey, PTableRef> tables, int expectedCapacity) {
      Map<PTableKey, PTableRef> newTables = newMap(Math.max(tables.size(), expectedCapacity));
      // Copy value so that access time isn't changing anymore
      for (PTableRef tableAccess : tables.values()) {
        newTables.put(tableAccess.getTable().getKey(), new PTableRef(tableAccess));
      }
      return newTables;
    }

    private static Map<PTableKey, PFunction> cloneFunctionsMap(
        Map<PTableKey, PFunction> functions, int expectedCapacity) {
      Map<PTableKey, PFunction> newFunctions =
          newFunctionMap(Math.max(functions.size(), expectedCapacity));
      for (PFunction functionAccess : functions.values()) {
        newFunctions.put(functionAccess.getKey(), new PFunction(functionAccess));
      }
      return newFunctions;
    }

    private PMetaDataCache(PMetaDataCache toClone) {
      this.timeKeeper = toClone.timeKeeper;
      this.maxByteSize = toClone.maxByteSize;
      this.currentByteSize = toClone.currentByteSize;
      this.expectedCapacity = toClone.expectedCapacity;
      this.tables = cloneMap(toClone.tables, expectedCapacity);
      this.functions = cloneFunctionsMap(toClone.functions, expectedCapacity);
    }

    public PMetaDataCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
      this.currentByteSize = 0;
      this.maxByteSize = maxByteSize;
      this.expectedCapacity = initialCapacity;
      this.tables = newMap(this.expectedCapacity);
      this.functions = newFunctionMap(this.expectedCapacity);
      this.timeKeeper = timeKeeper;
    }

    public PTableRef get(PTableKey key) {
      PTableRef tableAccess = this.tables.get(key);
      if (tableAccess == null) {
        return null;
      }
      tableAccess.setLastAccessTime(timeKeeper.getCurrentTime());
      return tableAccess;
    }

    @Override
    public PMetaDataCache clone() {
      return new PMetaDataCache(this);
    }

    /**
     * Used when the cache is growing past its max size to clone in a single pass. Removes least
     * recently used tables to get size of cache below its max size by the overage amount.
     */
    public PMetaDataCache cloneMinusOverage(long overage) {
      assert (overage > 0);
      int nToRemove =
          Math.max(
              MIN_REMOVAL_SIZE,
              (int) Math.ceil((currentByteSize - maxByteSize) / ((double) currentByteSize / size()))
                  + 1);
      MinMaxPriorityQueue<PTableRef> toRemove = BUILDER.expectedSize(nToRemove).create();
      PMetaDataCache newCache = new PMetaDataCache(this.size(), this.maxByteSize, this.timeKeeper);

      long toRemoveBytes = 0;
      // Add to new cache, but track references to remove when done
      // to bring cache at least overage amount below it's max size.
      for (PTableRef tableRef : this.tables.values()) {
        newCache.put(tableRef.getTable().getKey(), new PTableRef(tableRef));
        toRemove.add(tableRef);
        toRemoveBytes += tableRef.getEstSize();
        while (toRemoveBytes - toRemove.peekLast().getEstSize() >= overage) {
          PTableRef removedRef = toRemove.removeLast();
          toRemoveBytes -= removedRef.getEstSize();
        }
      }
      for (PTableRef toRemoveRef : toRemove) {
        newCache.remove(toRemoveRef.getTable().getKey());
      }
      return newCache;
    }

    private PTable put(PTableKey key, PTableRef ref) {
      currentByteSize += ref.getEstSize();
      PTableRef oldTableAccess = this.tables.put(key, ref);
      PTable oldTable = null;
      if (oldTableAccess != null) {
        currentByteSize -= oldTableAccess.getEstSize();
        oldTable = oldTableAccess.getTable();
      }
      return oldTable;
    }

    public PTable put(PTableKey key, PTable value, long resolvedTime) {
      return put(key, new PTableRef(value, timeKeeper.getCurrentTime(), resolvedTime));
    }

    public PTable putDuplicate(PTableKey key, PTable value, long resolvedTime) {
      return put(key, new PTableRef(value, timeKeeper.getCurrentTime(), 0, resolvedTime));
    }

    public long getAge(PTableRef ref) {
      return timeKeeper.getCurrentTime() - ref.getCreateTime();
    }

    public PTable remove(PTableKey key) {
      PTableRef value = this.tables.remove(key);
      if (value == null) {
        return null;
      }
      currentByteSize -= value.getEstSize();
      return value.getTable();
    }

    public Iterator<PTable> iterator() {
      final Iterator<PTableRef> iterator = this.tables.values().iterator();
      return new Iterator<PTable>() {

        @Override
        public boolean hasNext() {
          return iterator.hasNext();
        }

        @Override
        public PTable next() {
          return iterator.next().getTable();
        }

        @Override
        public void remove() {
          throw new UnsupportedOperationException();
        }
      };
    }

    public int size() {
      return this.tables.size();
    }

    public long getCurrentSize() {
      return this.currentByteSize;
    }

    public long getMaxSize() {
      return this.maxByteSize;
    }
  }
コード例 #15
0
  /**
   * Generate a global load balancing plan according to the specified map of server information to
   * the most loaded regions of each server.
   *
   * <p>The load balancing invariant is that all servers are within 1 region of the average number
   * of regions per server. If the average is an integer number, all servers will be balanced to the
   * average. Otherwise, all servers will have either floor(average) or ceiling(average) regions.
   *
   * <p>HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that we can fetch from
   * both ends of the queue. At the beginning, we check whether there was empty region server just
   * discovered by Master. If so, we alternately choose new / old regions from head / tail of
   * regionsToMove, respectively. This alternation avoids clustering young regions on the newly
   * discovered region server. Otherwise, we choose new regions from head of regionsToMove.
   *
   * <p>Another improvement from HBASE-3609 is that we assign regions from regionsToMove to
   * underloaded servers in round-robin fashion. Previously one underloaded server would be filled
   * before we move onto the next underloaded server, leading to clustering of young regions.
   *
   * <p>Finally, we randomly shuffle underloaded servers so that they receive offloaded regions
   * relatively evenly across calls to balanceCluster().
   *
   * <p>The algorithm is currently implemented as such:
   *
   * <ol>
   *   <li>Determine the two valid numbers of regions each server should have,
   *       <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
   *   <li>Iterate down the most loaded servers, shedding regions from each so each server hosts
   *       exactly <b>MAX</b> regions. Stop once you reach a server that already has &lt;=
   *       <b>MAX</b> regions.
   *       <p>Order the regions to move from most recent to least.
   *   <li>Iterate down the least loaded servers, assigning regions so each server has exactly
   *       </b>MIN</b> regions. Stop once you reach a server that already has &gt;= <b>MIN</b>
   *       regions.
   *       <p>Regions being assigned to underloaded servers are those that were shed in the previous
   *       step. It is possible that there were not enough regions shed to fill each underloaded
   *       server to <b>MIN</b>. If so we end up with a number of regions required to do so,
   *       <b>neededRegions</b>.
   *       <p>It is also possible that we were able to fill each underloaded but ended up with
   *       regions that were unassigned from overloaded servers but that still do not have
   *       assignment.
   *       <p>If neither of these conditions hold (no regions needed to fill the underloaded
   *       servers, no regions leftover from overloaded servers), we are done and return. Otherwise
   *       we handle these cases below.
   *   <li>If <b>neededRegions</b> is non-zero (still have underloaded servers), we iterate the most
   *       loaded servers again, shedding a single server from each (this brings them from having
   *       <b>MAX</b> regions to having <b>MIN</b> regions).
   *   <li>We now definitely have more regions that need assignment, either from the previous step
   *       or from the original shedding from overloaded servers. Iterate the least loaded servers
   *       filling each to <b>MIN</b>.
   *   <li>If we still have more regions that need assignment, again iterate the least loaded
   *       servers, this time giving each one (filling them to </b>MAX</b>) until we run out.
   *   <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
   *       <p>In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed to end up with
   *       <b>MAX</b> regions at the end of the balancing. This ensures the minimal number of
   *       regions possible are moved.
   * </ol>
   *
   * TODO: We can at-most reassign the number of regions away from a particular server to be how
   * many they report as most loaded. Should we just keep all assignment in memory? Any objections?
   * Does this mean we need HeapSize on HMaster? Or just careful monitor? (current thinking is we
   * will hold all assignments in memory)
   *
   * @param clusterMap Map of regionservers and their load/region information to a list of their
   *     most loaded regions
   * @return a list of regions to be moved, including source and destination, or null if cluster is
   *     already balanced
   */
  @Override
  public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    List<RegionPlan> regionsToReturn = balanceMasterRegions(clusterMap);
    if (regionsToReturn != null) {
      return regionsToReturn;
    }
    filterExcludedServers(clusterMap);
    boolean emptyRegionServerPresent = false;
    long startTime = System.currentTimeMillis();

    Collection<ServerName> backupMasters = getBackupMasters();
    ClusterLoadState cs =
        new ClusterLoadState(masterServerName, backupMasters, backupMasterWeight, clusterMap);
    // construct a Cluster object with clusterMap and rest of the
    // argument as defaults
    Cluster c =
        new Cluster(
            masterServerName,
            clusterMap,
            null,
            this.regionFinder,
            getBackupMasters(),
            tablesOnMaster,
            this.rackManager);
    if (!this.needsBalance(c)) return null;

    int numServers = cs.getNumServers();
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
    int numRegions = cs.getNumRegions();
    float average = cs.getLoadAverage();
    int max = (int) Math.ceil(average);
    int min = (int) average;

    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam
        .append("Balance parameter: numRegions=")
        .append(numRegions)
        .append(", numServers=")
        .append(numServers)
        .append(", numBackupMasters=")
        .append(cs.getNumBackupMasters())
        .append(", backupMasterWeight=")
        .append(backupMasterWeight)
        .append(", max=")
        .append(max)
        .append(", min=")
        .append(min);
    LOG.debug(strBalanceParam.toString());

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<RegionPlan> regionsToMove =
        MinMaxPriorityQueue.orderedBy(rpComparator).create();
    regionsToReturn = new ArrayList<RegionPlan>();

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch regions from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<ServerName, BalanceInfo>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server :
        serversByLoad.descendingMap().entrySet()) {
      ServerAndLoad sal = server.getKey();
      int load = sal.getLoad();
      if (load <= max) {
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0));
        break;
      }
      serversOverloaded++;
      List<HRegionInfo> regions = server.getValue();
      int w = 1; // Normal region server has weight 1
      if (backupMasters != null && backupMasters.contains(sal.getServerName())) {
        w = backupMasterWeight; // Backup master has heavier weight
      }
      int numToOffload = Math.min((load - max) / w, regions.size());
      // account for the out-of-band regions which were assigned to this server
      // after some other region server crashed
      Collections.sort(regions, riComparator);
      int numTaken = 0;
      for (int i = 0; i <= numToOffload; ) {
        HRegionInfo hri = regions.get(i); // fetch from head
        if (fetchFromTail) {
          hri = regions.get(regions.size() - 1 - i);
        }
        i++;
        // Don't rebalance special regions.
        if (shouldBeOnMaster(hri) && masterServerName.equals(sal.getServerName())) continue;
        regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
        numTaken++;
        if (numTaken >= numToOffload) break;
        // fetch in alternate order if there is new region server
        if (emptyRegionServerPresent) {
          fetchFromTail = !fetchFromTail;
        }
      }
      serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken));
    }
    int totalNumMoved = regionsToMove.size();

    // Walk down least loaded, filling each to the min
    int neededRegions = 0; // number of regions needed to bring all up to min
    fetchFromTail = false;

    Map<ServerName, Integer> underloadedServers = new HashMap<ServerName, Integer>();
    int maxToTake = numRegions - min;
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
      if (maxToTake == 0) break; // no more to take
      int load = server.getKey().getLoad();
      if (load >= min && load > 0) {
        continue; // look for other servers which haven't reached min
      }
      int w = 1; // Normal region server has weight 1
      if (backupMasters != null && backupMasters.contains(server.getKey().getServerName())) {
        w = backupMasterWeight; // Backup master has heavier weight
      }
      int regionsToPut = (min - load) / w;
      if (regionsToPut == 0) {
        regionsToPut = 1;
      }
      maxToTake -= regionsToPut;
      underloadedServers.put(server.getKey().getServerName(), regionsToPut);
    }
    // number of servers that get new regions
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns =
        Arrays.asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (regionsToMove.size() > 0) {
      int cnt = 0;
      int i = incr > 0 ? 0 : underloadedServers.size() - 1;
      for (; i >= 0 && i < underloadedServers.size(); i += incr) {
        if (regionsToMove.isEmpty()) break;
        ServerName si = sns.get(i);
        int numToTake = underloadedServers.get(si);
        if (numToTake == 0) continue;

        addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn);
        if (emptyRegionServerPresent) {
          fetchFromTail = !fetchFromTail;
        }

        underloadedServers.put(si, numToTake - 1);
        cnt++;
        BalanceInfo bi = serverBalanceInfo.get(si);
        if (bi == null) {
          bi = new BalanceInfo(0, 0);
          serverBalanceInfo.put(si, bi);
        }
        bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1);
      }
      if (cnt == 0) break;
      // iterates underloadedServers in the other direction
      incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
      // If we still want to take some, increment needed
      neededRegions += i;
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededRegions == 0 && regionsToMove.isEmpty()) {
      long endTime = System.currentTimeMillis();
      LOG.info(
          "Calculated a load balance in "
              + (endTime - startTime)
              + "ms. "
              + "Moving "
              + totalNumMoved
              + " regions off of "
              + serversOverloaded
              + " overloaded servers onto "
              + serversUnderloaded
              + " less loaded servers");
      return regionsToReturn;
    }

    // Need to do a second pass.
    // Either more regions to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
      // Walk down most loaded, grabbing one from each until we get enough
      for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server :
          serversByLoad.descendingMap().entrySet()) {
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
        if (idx >= server.getValue().size()) break;
        HRegionInfo region = server.getValue().get(idx);
        if (region.isMetaRegion()) continue; // Don't move meta regions.
        regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null));
        totalNumMoved++;
        if (--neededRegions == 0) {
          // No more regions needed, done shedding
          break;
        }
      }
    }

    // Now we have a set of regions that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
      int regionCount = server.getKey().getLoad();
      if (regionCount >= min) break;
      BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
      if (balanceInfo != null) {
        regionCount += balanceInfo.getNumRegionsAdded();
      }
      if (regionCount >= min) {
        continue;
      }
      int numToTake = min - regionCount;
      int numTaken = 0;
      while (numTaken < numToTake && 0 < regionsToMove.size()) {
        addRegionPlan(
            regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
        numTaken++;
        if (emptyRegionServerPresent) {
          fetchFromTail = !fetchFromTail;
        }
      }
    }

    // If we still have regions to dish out, assign underloaded to max
    if (0 < regionsToMove.size()) {
      for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
          regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= max) {
          break;
        }
        addRegionPlan(
            regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
        if (emptyRegionServerPresent) {
          fetchFromTail = !fetchFromTail;
        }
        if (regionsToMove.isEmpty()) {
          break;
        }
      }
    }

    long endTime = System.currentTimeMillis();

    if (!regionsToMove.isEmpty() || neededRegions != 0) {
      // Emit data so can diagnose how balancer went astray.
      LOG.warn(
          "regionsToMove="
              + totalNumMoved
              + ", numServers="
              + numServers
              + ", serversOverloaded="
              + serversOverloaded
              + ", serversUnderloaded="
              + serversUnderloaded);
      StringBuilder sb = new StringBuilder();
      for (Map.Entry<ServerName, List<HRegionInfo>> e : clusterMap.entrySet()) {
        if (sb.length() > 0) sb.append(", ");
        sb.append(e.getKey().toString());
        sb.append(" ");
        sb.append(e.getValue().size());
      }
      LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info(
        "Done. Calculated a load balance in "
            + (endTime - startTime)
            + "ms. "
            + "Moving "
            + totalNumMoved
            + " regions off of "
            + serversOverloaded
            + " overloaded servers onto "
            + serversUnderloaded
            + " less loaded servers");

    return regionsToReturn;
  }