Beispiel #1
0
  /**
   * Get the HRegionInfo from cache, if not there, from the hbase:meta table. Be careful. Does RPC.
   * Do not hold a lock or synchronize when you call this method.
   *
   * @param regionName
   * @return HRegionInfo for the region
   */
  @SuppressWarnings("deprecation")
  protected HRegionInfo getRegionInfo(final byte[] regionName) {
    String encodedName = HRegionInfo.encodeRegionName(regionName);
    RegionState regionState = getRegionState(encodedName);
    if (regionState != null) {
      return regionState.getRegion();
    }

    try {
      Pair<HRegionInfo, ServerName> p =
          MetaTableAccessor.getRegion(server.getConnection(), regionName);
      HRegionInfo hri = p == null ? null : p.getFirst();
      if (hri != null) {
        createRegionState(hri);
      }
      return hri;
    } catch (IOException e) {
      server.abort(
          "Aborting because error occoured while reading "
              + Bytes.toStringBinary(regionName)
              + " from hbase:meta",
          e);
      return null;
    }
  }
Beispiel #2
0
  /**
   * Wait on region to clear regions-in-transition.
   *
   * <p>If the region isn't in transition, returns immediately. Otherwise, method blocks until the
   * region is out of transition.
   */
  public synchronized void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri)
      throws InterruptedException {
    if (!isRegionInTransition(hri)) return;

    while (!server.isStopped() && isRegionInTransition(hri)) {
      RegionState rs = getRegionState(hri);
      LOG.info("Waiting on " + rs + " to clear regions-in-transition");
      waitForUpdate(100);
    }

    if (server.isStopped()) {
      LOG.info("Giving up wait on region in " + "transition because stoppable.isStopped is set");
    }
  }
  @Before
  public void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    conf = TEST_UTIL.getConfiguration();
    // Use a different ZK wrapper instance for each tests.
    zkw =
        new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
    ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1);
    LOG.debug(zkw.baseZNode + " created");
    ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1);
    LOG.debug(zkw.splitLogZNode + " created");

    stopped = false;
    resetCounters();

    // By default, we let the test manage the error as before, so the server
    //  does not appear as dead from the master point of view, only from the split log pov.
    Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true);
    Mockito.when(master.getServerManager()).thenReturn(sm);

    to = 4000;
    conf.setInt("hbase.splitlog.manager.timeout", to);
    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
    to = to + 4 * 100;
  }
Beispiel #4
0
 /**
  * A dead server's wals have been split so that all the regions used to be open on it can be
  * safely assigned now. Mark them assignable.
  */
 public synchronized void logSplit(final ServerName serverName) {
   for (Iterator<Map.Entry<String, ServerName>> it = lastAssignments.entrySet().iterator();
       it.hasNext(); ) {
     Map.Entry<String, ServerName> e = it.next();
     if (e.getValue().equals(serverName)) {
       it.remove();
     }
   }
   long now = System.currentTimeMillis();
   if (LOG.isDebugEnabled()) {
     LOG.debug("Adding to log splitting servers " + serverName);
   }
   processedServers.put(serverName, Long.valueOf(now));
   Configuration conf = server.getConfiguration();
   long obsoleteTime = conf.getLong(LOG_SPLIT_TIME, DEFAULT_LOG_SPLIT_TIME);
   // Doesn't have to be very accurate about the clean up time
   if (now > lastProcessedServerCleanTime + obsoleteTime) {
     lastProcessedServerCleanTime = now;
     long cutoff = now - obsoleteTime;
     for (Iterator<Map.Entry<ServerName, Long>> it = processedServers.entrySet().iterator();
         it.hasNext(); ) {
       Map.Entry<ServerName, Long> e = it.next();
       if (e.getValue().longValue() < cutoff) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Removed from log splitting servers " + e.getKey());
         }
         it.remove();
       }
     }
   }
 }
Beispiel #5
0
 /**
  * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held.
  *
  * @param hris The hris to check if empty in hbase:meta and if so, clean them up.
  */
 private void cleanIfNoMetaEntry(Set<HRegionInfo> hris) {
   if (hris.isEmpty()) return;
   for (HRegionInfo hri : hris) {
     try {
       // This is RPC to meta table. It is done while we have a synchronize on
       // regionstates. No progress will be made if meta is not available at this time.
       // This is a cleanup task. Not critical.
       if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes())
           == null) {
         regionOffline(hri);
         FSUtils.deleteRegionDir(server.getConfiguration(), hri);
       }
     } catch (IOException e) {
       LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);
     }
   }
 }
Beispiel #6
0
 private int getRegionReplication(HRegionInfo r) throws IOException {
   if (tableStateManager != null) {
     HTableDescriptor htd = server.getTableDescriptors().get(r.getTable());
     if (htd != null) {
       return htd.getRegionReplication();
     }
   }
   return 1;
 }
Beispiel #7
0
  /**
   * This is an EXPENSIVE clone. Cloning though is the safest thing to do. Can't let out original
   * since it can change and at least the load balancer wants to iterate this exported list. We need
   * to synchronize on regions since all access to this.servers is under a lock on this.regions.
   *
   * @return A clone of current assignments by table.
   */
  protected Map<TableName, Map<ServerName, List<HRegionInfo>>> getAssignmentsByTable() {
    Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
        new HashMap<TableName, Map<ServerName, List<HRegionInfo>>>();
    synchronized (this) {
      if (!server
          .getConfiguration()
          .getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false)) {
        Map<ServerName, List<HRegionInfo>> svrToRegions =
            new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
        for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
          svrToRegions.put(e.getKey(), new ArrayList<HRegionInfo>(e.getValue()));
        }
        result.put(TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME), svrToRegions);
      } else {
        for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
          for (HRegionInfo hri : e.getValue()) {
            if (hri.isMetaRegion()) continue;
            TableName tablename = hri.getTable();
            Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
            if (svrToRegions == null) {
              svrToRegions = new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
              result.put(tablename, svrToRegions);
            }
            List<HRegionInfo> regions = svrToRegions.get(e.getKey());
            if (regions == null) {
              regions = new ArrayList<HRegionInfo>();
              svrToRegions.put(e.getKey(), regions);
            }
            regions.add(hri);
          }
        }
      }
    }

    Map<ServerName, ServerLoad> onlineSvrs = serverManager.getOnlineServers();
    // Take care of servers w/o assignments, and remove servers in draining mode
    List<ServerName> drainingServers = this.serverManager.getDrainingServersList();
    for (Map<ServerName, List<HRegionInfo>> map : result.values()) {
      for (ServerName svr : onlineSvrs.keySet()) {
        if (!map.containsKey(svr)) {
          map.put(svr, new ArrayList<HRegionInfo>());
        }
      }
      map.keySet().removeAll(drainingServers);
    }
    return result;
  }
Beispiel #8
0
 /**
  * Compute the average load across all region servers. Currently, this uses a very naive
  * computation - just uses the number of regions being served, ignoring stats about number of
  * requests.
  *
  * @return the average load
  */
 protected synchronized double getAverageLoad() {
   int numServers = 0, totalLoad = 0;
   for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
     Set<HRegionInfo> regions = e.getValue();
     ServerName serverName = e.getKey();
     int regionCount = regions.size();
     if (serverManager.isServerOnline(serverName)) {
       totalLoad += regionCount;
       numServers++;
     }
   }
   if (numServers > 1) {
     // The master region server holds only a couple regions.
     // Don't consider this server in calculating the average load
     // if there are other region servers to avoid possible confusion.
     Set<HRegionInfo> hris = serverHoldings.get(server.getServerName());
     if (hris != null) {
       totalLoad -= hris.size();
       numServers--;
     }
   }
   return numServers == 0 ? 0.0 : (double) totalLoad / (double) numServers;
 }