/**
   * Update the assignment plan to all the region servers
   *
   * @param plan
   * @throws IOException
   */
  private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
    LOG.info("Start to update the region servers with the new assignment plan");
    // Get the region to region server map
    Map<ServerName, List<HRegionInfo>> currentAssignment =
        this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();

    // track of the failed and succeeded updates
    int succeededNum = 0;
    Map<ServerName, Exception> failedUpdateMap = new HashMap<ServerName, Exception>();

    for (Map.Entry<ServerName, List<HRegionInfo>> entry : currentAssignment.entrySet()) {
      List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos =
          new ArrayList<Pair<HRegionInfo, List<ServerName>>>();
      try {
        // Keep track of the favored updates for the current region server
        FavoredNodesPlan singleServerPlan = null;
        // Find out all the updates for the current region server
        for (HRegionInfo region : entry.getValue()) {
          List<ServerName> favoredServerList = plan.getFavoredNodes(region);
          if (favoredServerList != null
              && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
            // Create the single server plan if necessary
            if (singleServerPlan == null) {
              singleServerPlan = new FavoredNodesPlan();
            }
            // Update the single server update
            singleServerPlan.updateAssignmentPlan(region, favoredServerList);
            regionUpdateInfos.add(
                new Pair<HRegionInfo, List<ServerName>>(region, favoredServerList));
          }
        }
        if (singleServerPlan != null) {
          // Update the current region server with its updated favored nodes
          BlockingInterface currentRegionServer =
              ((ClusterConnection) this.connection).getAdmin(entry.getKey());
          UpdateFavoredNodesRequest request =
              RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);

          UpdateFavoredNodesResponse updateFavoredNodesResponse =
              currentRegionServer.updateFavoredNodes(null, request);
          LOG.info(
              "Region server "
                  + ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName()
                  + " has updated "
                  + updateFavoredNodesResponse.getResponse()
                  + " / "
                  + singleServerPlan.getAssignmentMap().size()
                  + " regions with the assignment plan");
          succeededNum++;
        }
      } catch (Exception e) {
        failedUpdateMap.put(entry.getKey(), e);
      }
    }
    // log the succeeded updates
    LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");

    // log the failed updates
    int failedNum = failedUpdateMap.size();
    if (failedNum != 0) {
      LOG.error(
          "Failed to update the following + "
              + failedNum
              + " region servers with its corresponding favored nodes");
      for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
        LOG.error(
            "Failed to update "
                + entry.getKey().getHostAndPort()
                + " because of "
                + entry.getValue().getMessage());
      }
    }
  }
  public static void main(String args[]) throws IOException {
    Options opt = new Options();
    opt.addOption("w", "write", false, "write the assignments to hbase:meta only");
    opt.addOption(
        "u", "update", false, "update the assignments to hbase:meta and RegionServers together");
    opt.addOption("n", "dry-run", false, "do not write assignments to META");
    opt.addOption("v", "verify", false, "verify current assignments against META");
    opt.addOption("p", "print", false, "print the current assignment plan in META");
    opt.addOption("h", "help", false, "print usage");
    opt.addOption("d", "verification-details", false, "print the details of verification report");

    opt.addOption("zk", true, "to set the zookeeper quorum");
    opt.addOption("fs", true, "to set HDFS");
    opt.addOption("hbase_root", true, "to set hbase_root directory");

    opt.addOption(
        "overwrite",
        false,
        "overwrite the favored nodes for a single region,"
            + "for example: -update -r regionName -f server1:port,server2:port,server3:port");
    opt.addOption("r", true, "The region name that needs to be updated");
    opt.addOption("f", true, "The new favored nodes");

    opt.addOption(
        "tables",
        true,
        "The list of table names splitted by ',' ;" + "For example: -tables: t1,t2,...,tn");
    opt.addOption("l", "locality", true, "enforce the maxium locality");
    opt.addOption("m", "min-move", true, "enforce minium assignment move");
    opt.addOption("diff", false, "calculate difference between assignment plans");
    opt.addOption("munkres", false, "use munkres to place secondaries and tertiaries");
    opt.addOption(
        "ld",
        "locality-dispersion",
        false,
        "print locality and dispersion " + "information for current plan");
    try {
      // Set the log4j
      Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
      Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.ERROR);
      Logger.getLogger("org.apache.hadoop.hbase.master.RegionPlacementMaintainer")
          .setLevel(Level.INFO);

      CommandLine cmd = new GnuParser().parse(opt, args);
      Configuration conf = HBaseConfiguration.create();

      boolean enforceMinAssignmentMove = true;
      boolean enforceLocality = true;
      boolean verificationDetails = false;

      // Read all the options
      if ((cmd.hasOption("l") && cmd.getOptionValue("l").equalsIgnoreCase("false"))
          || (cmd.hasOption("locality")
              && cmd.getOptionValue("locality").equalsIgnoreCase("false"))) {
        enforceLocality = false;
      }

      if ((cmd.hasOption("m") && cmd.getOptionValue("m").equalsIgnoreCase("false"))
          || (cmd.hasOption("min-move")
              && cmd.getOptionValue("min-move").equalsIgnoreCase("false"))) {
        enforceMinAssignmentMove = false;
      }

      if (cmd.hasOption("zk")) {
        conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue("zk"));
        LOG.info("Setting the zk quorum: " + conf.get(HConstants.ZOOKEEPER_QUORUM));
      }

      if (cmd.hasOption("fs")) {
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY, cmd.getOptionValue("fs"));
        LOG.info("Setting the HDFS: " + conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
      }

      if (cmd.hasOption("hbase_root")) {
        conf.set(HConstants.HBASE_DIR, cmd.getOptionValue("hbase_root"));
        LOG.info("Setting the hbase root directory: " + conf.get(HConstants.HBASE_DIR));
      }

      // Create the region placement obj
      RegionPlacementMaintainer rp =
          new RegionPlacementMaintainer(conf, enforceLocality, enforceMinAssignmentMove);

      if (cmd.hasOption("d") || cmd.hasOption("verification-details")) {
        verificationDetails = true;
      }

      if (cmd.hasOption("tables")) {
        String tableNameListStr = cmd.getOptionValue("tables");
        String[] tableNames = StringUtils.split(tableNameListStr, ",");
        rp.setTargetTableName(tableNames);
      }

      if (cmd.hasOption("munkres")) {
        USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true;
      }

      // Read all the modes
      if (cmd.hasOption("v") || cmd.hasOption("verify")) {
        // Verify the region placement.
        rp.verifyRegionPlacement(verificationDetails);
      } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) {
        // Generate the assignment plan only without updating the hbase:meta and RS
        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
        printAssignmentPlan(plan);
      } else if (cmd.hasOption("w") || cmd.hasOption("write")) {
        // Generate the new assignment plan
        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
        // Print the new assignment plan
        printAssignmentPlan(plan);
        // Write the new assignment plan to META
        rp.updateAssignmentPlanToMeta(plan);
      } else if (cmd.hasOption("u") || cmd.hasOption("update")) {
        // Generate the new assignment plan
        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
        // Print the new assignment plan
        printAssignmentPlan(plan);
        // Update the assignment to hbase:meta and Region Servers
        rp.updateAssignmentPlan(plan);
      } else if (cmd.hasOption("diff")) {
        FavoredNodesPlan newPlan = rp.getNewAssignmentPlan();
        Map<String, Map<String, Float>> locality =
            FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
        Map<TableName, Integer> movesPerTable = rp.getRegionsMovement(newPlan);
        rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan);
        System.out.println("Do you want to update the assignment plan? [y/n]");
        Scanner s = new Scanner(System.in);
        String input = s.nextLine().trim();
        if (input.equals("y")) {
          System.out.println("Updating assignment plan...");
          rp.updateAssignmentPlan(newPlan);
        }
        s.close();
      } else if (cmd.hasOption("ld")) {
        Map<String, Map<String, Float>> locality =
            FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
        rp.printLocalityAndDispersionForCurrentPlan(locality);
      } else if (cmd.hasOption("p") || cmd.hasOption("print")) {
        FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan();
        printAssignmentPlan(plan);
      } else if (cmd.hasOption("overwrite")) {
        if (!cmd.hasOption("f") || !cmd.hasOption("r")) {
          throw new IllegalArgumentException(
              "Please specify: "
                  + " -update -r regionName -f server1:port,server2:port,server3:port");
        }

        String regionName = cmd.getOptionValue("r");
        String favoredNodesStr = cmd.getOptionValue("f");
        LOG.info(
            "Going to update the region "
                + regionName
                + " with the new favored nodes "
                + favoredNodesStr);
        List<ServerName> favoredNodes = null;
        HRegionInfo regionInfo =
            rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap().get(regionName);
        if (regionInfo == null) {
          LOG.error("Cannot find the region " + regionName + " from the META");
        } else {
          try {
            favoredNodes = getFavoredNodeList(favoredNodesStr);
          } catch (IllegalArgumentException e) {
            LOG.error("Cannot parse the invalid favored nodes because " + e);
          }
          FavoredNodesPlan newPlan = new FavoredNodesPlan();
          newPlan.updateAssignmentPlan(regionInfo, favoredNodes);
          rp.updateAssignmentPlan(newPlan);
        }
      } else {
        printHelp(opt);
      }
    } catch (ParseException e) {
      printHelp(opt);
    }
  }
  /**
   * Generate the assignment plan for the existing table
   *
   * @param tableName
   * @param assignmentSnapshot
   * @param regionLocalityMap
   * @param plan
   * @param munkresForSecondaryAndTertiary if set on true the assignment plan for the tertiary and
   *     secondary will be generated with Munkres algorithm, otherwise will be generated using
   *     placeSecondaryAndTertiaryRS
   * @throws IOException
   */
  private void genAssignmentPlan(
      TableName tableName,
      SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
      Map<String, Map<String, Float>> regionLocalityMap,
      FavoredNodesPlan plan,
      boolean munkresForSecondaryAndTertiary)
      throws IOException {
    // Get the all the regions for the current table
    List<HRegionInfo> regions = assignmentSnapshot.getTableToRegionMap().get(tableName);
    int numRegions = regions.size();

    // Get the current assignment map
    Map<HRegionInfo, ServerName> currentAssignmentMap =
        assignmentSnapshot.getRegionToRegionServerMap();

    // Get the all the region servers
    List<ServerName> servers = new ArrayList<ServerName>();
    try (Admin admin = this.connection.getAdmin()) {
      servers.addAll(admin.getClusterStatus().getServers());
    }

    LOG.info(
        "Start to generate assignment plan for "
            + numRegions
            + " regions from table "
            + tableName
            + " with "
            + servers.size()
            + " region servers");

    int slotsPerServer = (int) Math.ceil((float) numRegions / servers.size());
    int regionSlots = slotsPerServer * servers.size();

    // Compute the primary, secondary and tertiary costs for each region/server
    // pair. These costs are based only on node locality and rack locality, and
    // will be modified later.
    float[][] primaryCost = new float[numRegions][regionSlots];
    float[][] secondaryCost = new float[numRegions][regionSlots];
    float[][] tertiaryCost = new float[numRegions][regionSlots];

    if (this.enforceLocality && regionLocalityMap != null) {
      // Transform the locality mapping into a 2D array, assuming that any
      // unspecified locality value is 0.
      float[][] localityPerServer = new float[numRegions][regionSlots];
      for (int i = 0; i < numRegions; i++) {
        Map<String, Float> serverLocalityMap =
            regionLocalityMap.get(regions.get(i).getEncodedName());
        if (serverLocalityMap == null) {
          continue;
        }
        for (int j = 0; j < servers.size(); j++) {
          String serverName = servers.get(j).getHostname();
          if (serverName == null) {
            continue;
          }
          Float locality = serverLocalityMap.get(serverName);
          if (locality == null) {
            continue;
          }
          for (int k = 0; k < slotsPerServer; k++) {
            // If we can't find the locality of a region to a server, which occurs
            // because locality is only reported for servers which have some
            // blocks of a region local, then the locality for that pair is 0.
            localityPerServer[i][j * slotsPerServer + k] = locality.floatValue();
          }
        }
      }

      // Compute the total rack locality for each region in each rack. The total
      // rack locality is the sum of the localities of a region on all servers in
      // a rack.
      Map<String, Map<HRegionInfo, Float>> rackRegionLocality =
          new HashMap<String, Map<HRegionInfo, Float>>();
      for (int i = 0; i < numRegions; i++) {
        HRegionInfo region = regions.get(i);
        for (int j = 0; j < regionSlots; j += slotsPerServer) {
          String rack = rackManager.getRack(servers.get(j / slotsPerServer));
          Map<HRegionInfo, Float> rackLocality = rackRegionLocality.get(rack);
          if (rackLocality == null) {
            rackLocality = new HashMap<HRegionInfo, Float>();
            rackRegionLocality.put(rack, rackLocality);
          }
          Float localityObj = rackLocality.get(region);
          float locality = localityObj == null ? 0 : localityObj.floatValue();
          locality += localityPerServer[i][j];
          rackLocality.put(region, locality);
        }
      }
      for (int i = 0; i < numRegions; i++) {
        for (int j = 0; j < regionSlots; j++) {
          String rack = rackManager.getRack(servers.get(j / slotsPerServer));
          Float totalRackLocalityObj = rackRegionLocality.get(rack).get(regions.get(i));
          float totalRackLocality =
              totalRackLocalityObj == null ? 0 : totalRackLocalityObj.floatValue();

          // Primary cost aims to favor servers with high node locality and low
          // rack locality, so that secondaries and tertiaries can be chosen for
          // nodes with high rack locality. This might give primaries with
          // slightly less locality at first compared to a cost which only
          // considers the node locality, but should be better in the long run.
          primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] - totalRackLocality);

          // Secondary cost aims to favor servers with high node locality and high
          // rack locality since the tertiary will be chosen from the same rack as
          // the secondary. This could be negative, but that is okay.
          secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality);

          // Tertiary cost is only concerned with the node locality. It will later
          // be restricted to only hosts on the same rack as the secondary.
          tertiaryCost[i][j] = 1 - localityPerServer[i][j];
        }
      }
    }

    if (this.enforceMinAssignmentMove && currentAssignmentMap != null) {
      // We want to minimize the number of regions which move as the result of a
      // new assignment. Therefore, slightly penalize any placement which is for
      // a host that is not currently serving the region.
      for (int i = 0; i < numRegions; i++) {
        for (int j = 0; j < servers.size(); j++) {
          ServerName currentAddress = currentAssignmentMap.get(regions.get(i));
          if (currentAddress != null && !currentAddress.equals(servers.get(j))) {
            for (int k = 0; k < slotsPerServer; k++) {
              primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY;
            }
          }
        }
      }
    }

    // Artificially increase cost of last slot of each server to evenly
    // distribute the slop, otherwise there will be a few servers with too few
    // regions and many servers with the max number of regions.
    for (int i = 0; i < numRegions; i++) {
      for (int j = 0; j < regionSlots; j += slotsPerServer) {
        primaryCost[i][j] += LAST_SLOT_COST_PENALTY;
        secondaryCost[i][j] += LAST_SLOT_COST_PENALTY;
        tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY;
      }
    }

    RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
    primaryCost = randomizedMatrix.transform(primaryCost);
    int[] primaryAssignment = new MunkresAssignment(primaryCost).solve();
    primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment);

    // Modify the secondary and tertiary costs for each region/server pair to
    // prevent a region from being assigned to the same rack for both primary
    // and either one of secondary or tertiary.
    for (int i = 0; i < numRegions; i++) {
      int slot = primaryAssignment[i];
      String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
      for (int k = 0; k < servers.size(); k++) {
        if (!rackManager.getRack(servers.get(k)).equals(rack)) {
          continue;
        }
        if (k == slot / slotsPerServer) {
          // Same node, do not place secondary or tertiary here ever.
          for (int m = 0; m < slotsPerServer; m++) {
            secondaryCost[i][k * slotsPerServer + m] = MAX_COST;
            tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
          }
        } else {
          // Same rack, do not place secondary or tertiary here if possible.
          for (int m = 0; m < slotsPerServer; m++) {
            secondaryCost[i][k * slotsPerServer + m] = AVOID_COST;
            tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
          }
        }
      }
    }
    if (munkresForSecondaryAndTertiary) {
      randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
      secondaryCost = randomizedMatrix.transform(secondaryCost);
      int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve();
      secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment);

      // Modify the tertiary costs for each region/server pair to ensure that a
      // region is assigned to a tertiary server on the same rack as its secondary
      // server, but not the same server in that rack.
      for (int i = 0; i < numRegions; i++) {
        int slot = secondaryAssignment[i];
        String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
        for (int k = 0; k < servers.size(); k++) {
          if (k == slot / slotsPerServer) {
            // Same node, do not place tertiary here ever.
            for (int m = 0; m < slotsPerServer; m++) {
              tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
            }
          } else {
            if (rackManager.getRack(servers.get(k)).equals(rack)) {
              continue;
            }
            // Different rack, do not place tertiary here if possible.
            for (int m = 0; m < slotsPerServer; m++) {
              tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
            }
          }
        }
      }

      randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
      tertiaryCost = randomizedMatrix.transform(tertiaryCost);
      int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve();
      tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment);

      for (int i = 0; i < numRegions; i++) {
        List<ServerName> favoredServers =
            new ArrayList<ServerName>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
        ServerName s = servers.get(primaryAssignment[i] / slotsPerServer);
        favoredServers.add(
            ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));

        s = servers.get(secondaryAssignment[i] / slotsPerServer);
        favoredServers.add(
            ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));

        s = servers.get(tertiaryAssignment[i] / slotsPerServer);
        favoredServers.add(
            ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
        // Update the assignment plan
        plan.updateAssignmentPlan(regions.get(i), favoredServers);
      }
      LOG.info(
          "Generated the assignment plan for "
              + numRegions
              + " regions from table "
              + tableName
              + " with "
              + servers.size()
              + " region servers");
      LOG.info("Assignment plan for secondary and tertiary generated " + "using MunkresAssignment");
    } else {
      Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
      for (int i = 0; i < numRegions; i++) {
        primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer));
      }
      FavoredNodeAssignmentHelper favoredNodeHelper =
          new FavoredNodeAssignmentHelper(servers, conf);
      favoredNodeHelper.initialize();
      Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
          favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap);
      for (int i = 0; i < numRegions; i++) {
        List<ServerName> favoredServers =
            new ArrayList<ServerName>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
        HRegionInfo currentRegion = regions.get(i);
        ServerName s = primaryRSMap.get(currentRegion);
        favoredServers.add(
            ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));

        ServerName[] secondaryAndTertiary = secondaryAndTertiaryMap.get(currentRegion);
        s = secondaryAndTertiary[0];
        favoredServers.add(
            ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));

        s = secondaryAndTertiary[1];
        favoredServers.add(
            ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
        // Update the assignment plan
        plan.updateAssignmentPlan(regions.get(i), favoredServers);
      }
      LOG.info(
          "Generated the assignment plan for "
              + numRegions
              + " regions from table "
              + tableName
              + " with "
              + servers.size()
              + " region servers");
      LOG.info(
          "Assignment plan for secondary and tertiary generated "
              + "using placeSecondaryAndTertiaryWithRestrictions method");
    }
  }