@Override
  public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
    HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER);
    assert master != null : "No Master in context!";

    Configuration conf = master.getConfiguration();
    HBaseAdmin admin = new HBaseAdmin(conf);

    Map<String, Integer> frags = getFragmentationInfo(master, conf);

    ServerName rootLocation = getRootLocationOrNull(master);
    ServerName metaLocation = master.getCatalogTracker().getMetaLocation();
    List<ServerName> servers = master.getServerManager().getOnlineServersList();
    Set<ServerName> deadServers = master.getServerManager().getDeadServers();

    response.setContentType("text/html");
    MasterStatusTmpl tmpl =
        new MasterStatusTmpl()
            .setFrags(frags)
            .setShowAppendWarning(shouldShowAppendWarning(conf))
            .setRootLocation(rootLocation)
            .setMetaLocation(metaLocation)
            .setServers(servers)
            .setDeadServers(deadServers);
    if (request.getParameter("filter") != null) tmpl.setFilter(request.getParameter("filter"));
    if (request.getParameter("format") != null) tmpl.setFormat(request.getParameter("format"));
    tmpl.render(response.getWriter(), master, admin);
  }
  @Test
  public void testRPCException() throws Exception {
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.MASTER_PORT, "0");

    HMaster hm = new HMaster(conf);

    HServerAddress hma = hm.getMasterAddress();
    try {
      HMasterInterface inf =
          (HMasterInterface)
              HBaseRPC.getProxy(
                  HMasterInterface.class,
                  HBaseRPCProtocolVersion.versionID,
                  hma.getInetSocketAddress(),
                  conf,
                  100);
      inf.isMasterRunning();
      fail();
    } catch (RemoteException ex) {
      assertTrue(
          ex.getMessage()
              .startsWith(
                  "org.apache.hadoop.hbase.ipc.ServerNotRunningException: Server is not running yet"));
    } catch (Throwable t) {
      fail("Unexpected throwable: " + t);
    }
  }
Ejemplo n.º 3
0
  @Test(timeout = 300000)
  public void testClusterRequests() throws Exception {

    // sending fake request to master to see how metric value has changed
    RegionServerStatusProtos.RegionServerReportRequest.Builder request =
        RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
    ServerName serverName = cluster.getMaster(0).getServerName();
    request.setServer(ProtobufUtil.toServerName(serverName));

    MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
    ClusterStatusProtos.ServerLoad sl =
        ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(10000).build();
    masterSource.init();
    request.setLoad(sl);
    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 10000, masterSource);

    sl = ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(15000).build();
    request.setLoad(sl);
    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 15000, masterSource);

    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
    master.stopMaster();
  }
  @Test(timeout = 30000)
  public void testInfo() {
    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master);
    assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0);
    assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0);
    assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0);
    assertEquals(master.getClusterId(), info.getClusterId());
    assertEquals(master.getMasterActiveTime(), info.getActiveTime());
    assertEquals(master.getMasterStartTime(), info.getStartTime());
    assertEquals(master.getMasterCoprocessors().length, info.getCoprocessors().length);
    assertEquals(
        master.getServerManager().getOnlineServersList().size(), info.getNumRegionServers());
    assertEquals(5, info.getNumRegionServers());

    String zkServers = info.getZookeeperQuorum();
    assertEquals(zkServers.split(",").length, TEST_UTIL.getZkCluster().getZooKeeperServerNum());

    final int index = 3;
    LOG.info("Stopping " + TEST_UTIL.getMiniHBaseCluster().getRegionServer(index));
    TEST_UTIL.getMiniHBaseCluster().stopRegionServer(index, false);
    TEST_UTIL.getMiniHBaseCluster().waitOnRegionServer(index);
    // We stopped the regionserver but could take a while for the master to notice it so hang here
    // until it does... then move forward to see if metrics wrapper notices.
    while (TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size()
        != 4) {
      Threads.sleep(10);
    }
    assertEquals(4, info.getNumRegionServers());
    assertEquals(1, info.getNumDeadRegionServers());
    assertEquals(1, info.getNumWALFiles());
  }
Ejemplo n.º 5
0
 @Test
 public void testFsUriSetProperly() throws Exception {
   HMaster master = UTIL.getMiniHBaseCluster().getMaster();
   MasterFileSystem fs = master.getMasterFileSystem();
   Path masterRoot = FSUtils.getRootDir(fs.conf);
   Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf());
   // make sure the fs and the found root dir have the same scheme
   LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf()));
   LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.conf));
   // make sure the set uri matches by forcing it.
   assertEquals(masterRoot, rootDir);
 }
Ejemplo n.º 6
0
 @Test
 public void testMoveRegionWhenNotInitialized() {
   MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
   HMaster m = cluster.getMaster();
   try {
     m.setInitialized(false); // fake it, set back later
     HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
     m.move(meta.getEncodedNameAsBytes(), null);
     fail("Region should not be moved since master is not initialized");
   } catch (IOException ioe) {
     assertTrue(ioe instanceof PleaseHoldException);
   } finally {
     m.setInitialized(true);
   }
 }
 private ServerName getRootLocationOrNull(HMaster master) {
   try {
     return master.getCatalogTracker().getRootLocation();
   } catch (InterruptedException e) {
     LOG.warn("Unable to get root location", e);
     return null;
   }
 }
 @Override
 public void run() {
   super.run();
   if (this.zkcluster != null) {
     try {
       this.zkcluster.shutdown();
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
 }
Ejemplo n.º 9
0
  @Test
  public void testRemoveStaleRecoveringRegionsDuringMasterInitialization() throws Exception {
    // this test is for when distributed log replay is enabled
    if (!UTIL.getConfiguration().getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false)) return;

    LOG.info("Starting testRemoveStaleRecoveringRegionsDuringMasterInitialization");
    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    MasterFileSystem fs = master.getMasterFileSystem();

    String failedRegion = "failedRegoin1";
    String staleRegion = "staleRegion";
    ServerName inRecoveryServerName = ServerName.valueOf("mgr,1,1");
    ServerName previouselyFaildServerName = ServerName.valueOf("previous,1,1");
    String walPath =
        "/hbase/data/.logs/" + inRecoveryServerName.getServerName() + "-splitting/test";
    // Create a ZKW to use in the test
    ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
    zkw.getRecoverableZooKeeper()
        .create(
            ZKSplitLog.getEncodedNodeName(zkw, walPath),
            new SplitLogTask.Owned(inRecoveryServerName, fs.getLogRecoveryMode()).toByteArray(),
            Ids.OPEN_ACL_UNSAFE,
            CreateMode.PERSISTENT);
    String staleRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, staleRegion);
    ZKUtil.createWithParents(zkw, staleRegionPath);
    String inRecoveringRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, failedRegion);
    inRecoveringRegionPath =
        ZKUtil.joinZNode(inRecoveringRegionPath, inRecoveryServerName.getServerName());
    ZKUtil.createWithParents(zkw, inRecoveringRegionPath);
    Set<ServerName> servers = new HashSet<ServerName>();
    servers.add(previouselyFaildServerName);
    fs.removeStaleRecoveringRegionsFromZK(servers);

    // verification
    assertFalse(ZKUtil.checkExists(zkw, staleRegionPath) != -1);
    assertTrue(ZKUtil.checkExists(zkw, inRecoveringRegionPath) != -1);

    ZKUtil.deleteChildrenRecursively(zkw, zkw.recoveringRegionsZNode);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.splitLogZNode);
    zkw.close();
  }
  @Test
  public void testRPCException() throws Exception {
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.MASTER_PORT, "0");

    HMaster hm = new HMaster(conf);

    ServerName sm = hm.getServerName();
    InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort());
    int i = 0;
    // retry the RPC a few times; we have seen SocketTimeoutExceptions if we
    // try to connect too soon. Retry on SocketTimeoutException.
    while (i < 20) {
      try {
        MasterMonitorProtocol inf =
            (MasterMonitorProtocol)
                HBaseClientRPC.getProxy(MasterMonitorProtocol.class, isa, conf, 100 * 10);
        inf.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance());
        fail();
      } catch (ServiceException ex) {
        IOException ie = ProtobufUtil.getRemoteException(ex);
        if (!(ie instanceof SocketTimeoutException)) {
          if (ie.getMessage()
              .startsWith(
                  "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")) {
            return;
          }
        } else {
          System.err.println("Got SocketTimeoutException. Will retry. ");
        }
      } catch (Throwable t) {
        fail("Unexpected throwable: " + t);
      }
      Thread.sleep(100);
      i++;
    }
    fail();
  }
Ejemplo n.º 11
0
  @Test
  public void testMoveThrowsPleaseHoldException() throws IOException {
    TableName tableName = TableName.valueOf("testMoveThrowsPleaseHoldException");
    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor("value");
    htd.addFamily(hcd);

    admin.createTable(htd, null);
    try {
      List<HRegionInfo> tableRegions = admin.getTableRegions(tableName);

      master.setInitialized(false); // fake it, set back later
      admin.move(tableRegions.get(0).getEncodedNameAsBytes(), null);
      fail("Region should not be moved since master is not initialized");
    } catch (IOException ioe) {
      assertTrue(StringUtils.stringifyException(ioe).contains("PleaseHoldException"));
    } finally {
      master.setInitialized(true);
      TEST_UTIL.deleteTable(tableName);
    }
  }
Ejemplo n.º 12
0
  private int startMaster() {
    Configuration conf = getConf();
    try {
      // If 'local', defer to LocalHBaseCluster instance.  Starts master
      // and regionserver both in the one JVM.
      if (LocalHBaseCluster.isLocal(conf)) {
        final MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster();
        File zkDataPath = new File(conf.get(HConstants.ZOOKEEPER_DATA_DIR));
        int zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0);
        if (zkClientPort == 0) {
          throw new IOException("No config value for " + HConstants.ZOOKEEPER_CLIENT_PORT);
        }
        zooKeeperCluster.setDefaultClientPort(zkClientPort);

        // login the zookeeper server principal (if using security)
        ZKUtil.loginServer(
            conf,
            "hbase.zookeeper.server.keytab.file",
            "hbase.zookeeper.server.kerberos.principal",
            null);

        int clientPort = zooKeeperCluster.startup(zkDataPath);
        if (clientPort != zkClientPort) {
          String errorMsg =
              "Could not start ZK at requested port of "
                  + zkClientPort
                  + ".  ZK was started at port: "
                  + clientPort
                  + ".  Aborting as clients (e.g. shell) will not be able to find "
                  + "this ZK quorum.";
          System.err.println(errorMsg);
          throw new IOException(errorMsg);
        }
        conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
        // Need to have the zk cluster shutdown when master is shutdown.
        // Run a subclass that does the zk cluster shutdown on its way out.
        LocalHBaseCluster cluster =
            new LocalHBaseCluster(conf, 1, 1, LocalHMaster.class, HRegionServer.class);
        ((LocalHMaster) cluster.getMaster(0)).setZKCluster(zooKeeperCluster);
        cluster.startup();
        waitOnMasterThreads(cluster);
      } else {
        HMaster master = HMaster.constructMaster(masterClass, conf);
        if (master.isStopped()) {
          LOG.info("Won't bring the Master up as a shutdown is requested");
          return -1;
        }
        master.start();
        master.join();
        if (master.isAborted()) throw new RuntimeException("HMaster Aborted");
      }
    } catch (Throwable t) {
      LOG.error("Failed to start master", t);
      return -1;
    }
    return 0;
  }
Ejemplo n.º 13
0
  @Test
  public void testDefaultMasterMetrics() throws Exception {
    MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
    metricsHelper.assertGauge("numRegionServers", 2, masterSource);
    metricsHelper.assertGauge("averageLoad", 2, masterSource);
    metricsHelper.assertGauge("numDeadRegionServers", 0, masterSource);

    metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource);
    metricsHelper.assertGauge("masterActiveTime", master.getMasterActiveTime(), masterSource);

    metricsHelper.assertTag("isActiveMaster", "true", masterSource);
    metricsHelper.assertTag("serverName", master.getServerName().toString(), masterSource);
    metricsHelper.assertTag("clusterId", master.getClusterId(), masterSource);
    metricsHelper.assertTag("zookeeperQuorum", master.getZooKeeper().getQuorum(), masterSource);
  }
Ejemplo n.º 14
0
  @Test
  @SuppressWarnings("deprecation")
  public void testMasterOpsWhileSplitting() throws Exception {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();

    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
      assertTrue(m.getTableStateManager().isTableState(TABLENAME, TableState.State.ENABLED));
      TEST_UTIL.loadTable(ht, FAMILYNAME, false);
    }

    List<Pair<HRegionInfo, ServerName>> tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME);
    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
    assertEquals(1, tableRegions.size());
    assertArrayEquals(HConstants.EMPTY_START_ROW, tableRegions.get(0).getFirst().getStartKey());
    assertArrayEquals(HConstants.EMPTY_END_ROW, tableRegions.get(0).getFirst().getEndKey());

    // Now trigger a split and stop when the split is in progress
    LOG.info("Splitting table");
    TEST_UTIL.getAdmin().split(TABLENAME);
    LOG.info("Waiting for split result to be about to open");
    RegionStates regionStates = m.getAssignmentManager().getRegionStates();
    while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) {
      Thread.sleep(100);
    }
    LOG.info("Making sure we can call getTableRegions while opening");
    tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME, false);

    LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
    // We have three regions because one is split-in-progress
    assertEquals(3, tableRegions.size());
    LOG.info("Making sure we can call getTableRegionClosest while opening");
    Pair<HRegionInfo, ServerName> pair = m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
    LOG.info("Result is: " + pair);
    Pair<HRegionInfo, ServerName> tableRegionFromName =
        MetaTableAccessor.getRegion(m.getConnection(), pair.getFirst().getRegionName());
    assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
  }
Ejemplo n.º 15
0
  /** This tests retaining assignments on a cluster restart */
  @Test(timeout = 300000)
  public void testRetainAssignmentOnRestart() throws Exception {
    UTIL.startMiniCluster(2);
    while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
      Threads.sleep(1);
    }
    // Turn off balancer
    UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
    LOG.info("\n\nCreating tables");
    for (byte[] TABLE : TABLES) {
      UTIL.createTable(TABLE, FAMILY);
    }
    for (byte[] TABLE : TABLES) {
      UTIL.waitTableEnabled(TABLE);
    }

    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    UTIL.waitUntilNoRegionsInTransition(120000);

    // We don't have to use SnapshotOfRegionAssignmentFromMeta.
    // We use it here because AM used to use it to load all user region placements
    SnapshotOfRegionAssignmentFromMeta snapshot =
        new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection());
    snapshot.initialize();
    Map<HRegionInfo, ServerName> regionToRegionServerMap = snapshot.getRegionToRegionServerMap();

    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> threads = cluster.getLiveRegionServerThreads();
    assertEquals(2, threads.size());
    int[] rsPorts = new int[3];
    for (int i = 0; i < 2; i++) {
      rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort();
    }
    rsPorts[2] = cluster.getMaster().getServerName().getPort();
    for (ServerName serverName : regionToRegionServerMap.values()) {
      boolean found = false; // Test only, no need to optimize
      for (int k = 0; k < 3 && !found; k++) {
        found = serverName.getPort() == rsPorts[k];
      }
      assertTrue(found);
    }

    LOG.info("\n\nShutting down HBase cluster");
    cluster.shutdown();
    cluster.waitUntilShutDown();

    LOG.info("\n\nSleeping a bit");
    Thread.sleep(2000);

    LOG.info("\n\nStarting cluster the second time with the same ports");
    try {
      cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 4);
      master = cluster.startMaster().getMaster();
      for (int i = 0; i < 3; i++) {
        cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, rsPorts[i]);
        cluster.startRegionServer();
      }
    } finally {
      // Reset region server port so as not to conflict with other tests
      cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, 0);
      cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 2);
    }

    // Make sure live regionservers are on the same host/port
    List<ServerName> localServers = master.getServerManager().getOnlineServersList();
    assertEquals(4, localServers.size());
    for (int i = 0; i < 3; i++) {
      boolean found = false;
      for (ServerName serverName : localServers) {
        if (serverName.getPort() == rsPorts[i]) {
          found = true;
          break;
        }
      }
      assertTrue(found);
    }

    // Wait till master is initialized and all regions are assigned
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    int expectedRegions = regionToRegionServerMap.size() + 1;
    while (!master.isInitialized()
        || regionStates.getRegionAssignments().size() != expectedRegions) {
      Threads.sleep(100);
    }

    snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection());
    snapshot.initialize();
    Map<HRegionInfo, ServerName> newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap();
    assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size());
    for (Map.Entry<HRegionInfo, ServerName> entry : newRegionToRegionServerMap.entrySet()) {
      if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) continue;
      ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
      ServerName currentServer = entry.getValue();
      assertEquals(oldServer.getHostAndPort(), currentServer.getHostAndPort());
      assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode());
    }
  }