@Test(timeout = 60000)
 public void testExceptionDuringInitialization() throws Exception {
   Configuration conf = TEST_UTIL.getConfiguration();
   conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
   conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
   conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
   TEST_UTIL.startMiniCluster(2);
   try {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     // Trigger one regionserver to fail as if it came up with a coprocessor
     // that fails during initialization
     final HRegionServer regionServer = cluster.getRegionServer(0);
     conf.set(
         CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         FailedInitializationObserver.class.getName());
     regionServer
         .getRegionServerCoprocessorHost()
         .loadSystemCoprocessors(conf, CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
     TEST_UTIL.waitFor(
         10000,
         1000,
         new Predicate<Exception>() {
           @Override
           public boolean evaluate() throws Exception {
             return regionServer.isAborted();
           }
         });
   } finally {
     TEST_UTIL.shutdownMiniCluster();
   }
 }
Exemplo n.º 2
0
 private static void waitForQuotaEnabled() throws Exception {
   UTIL.waitFor(
       60000,
       new Waiter.Predicate<Exception>() {
         @Override
         public boolean evaluate() throws Exception {
           HMaster master = UTIL.getHBaseCluster().getMaster();
           if (master == null) {
             return false;
           }
           MasterQuotaManager quotaManager = master.getMasterQuotaManager();
           return quotaManager != null && quotaManager.isQuotaEnabled();
         }
       });
 }
Exemplo n.º 3
0
 @Test
 public void testStatePreserve() throws Exception {
   final String nsp1 = prefix + "_testStatePreserve";
   NamespaceDescriptor nspDesc =
       NamespaceDescriptor.create(nsp1)
           .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20")
           .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "10")
           .build();
   ADMIN.createNamespace(nspDesc);
   TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1");
   TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
   TableName tableThree = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table3");
   HTableDescriptor tableDescOne = new HTableDescriptor(tableOne);
   HTableDescriptor tableDescTwo = new HTableDescriptor(tableTwo);
   HTableDescriptor tableDescThree = new HTableDescriptor(tableThree);
   ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3);
   ADMIN.createTable(tableDescTwo, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3);
   ADMIN.createTable(tableDescThree, Bytes.toBytes("1"), Bytes.toBytes("1000"), 4);
   ADMIN.disableTable(tableThree);
   deleteTable(tableThree);
   // wait for chore to complete
   UTIL.waitFor(
       1000,
       new Waiter.Predicate<Exception>() {
         @Override
         public boolean evaluate() throws Exception {
           return (getNamespaceState(nsp1).getTables().size() == 2);
         }
       });
   NamespaceTableAndRegionInfo before = getNamespaceState(nsp1);
   restartMaster();
   NamespaceTableAndRegionInfo after = getNamespaceState(nsp1);
   assertEquals(
       "Expected: " + before.getTables() + " Found: " + after.getTables(),
       before.getTables().size(),
       after.getTables().size());
 }
Exemplo n.º 4
0
  @Test
  public void testTraceCreateTable() throws Exception {
    TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
    Table table;
    try {

      table = TEST_UTIL.createTable(TableName.valueOf("table"), FAMILY_BYTES);
    } finally {
      tableCreationSpan.close();
    }

    // Some table creation is async.  Need to make sure that everything is full in before
    // checking to see if the spans are there.
    TEST_UTIL.waitFor(
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            return rcvr.getSpans().size() >= 5;
          }
        });

    Collection<Span> spans = rcvr.getSpans();
    TraceTree traceTree = new TraceTree(spans);
    Collection<Span> roots = traceTree.getSpansByParent().find(Span.ROOT_SPAN_ID);

    assertEquals(1, roots.size());
    Span createTableRoot = roots.iterator().next();

    assertEquals("creating table", createTableRoot.getDescription());

    int createTableCount = 0;

    for (Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) {
      if (s.getDescription().startsWith("MasterService.CreateTable")) {
        createTableCount++;
      }
    }

    assertTrue(createTableCount >= 1);
    assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3);
    assertTrue(spans.size() > 5);

    Put put = new Put("row".getBytes());
    put.add(FAMILY_BYTES, "col".getBytes(), "value".getBytes());

    TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
    try {
      table.put(put);
    } finally {
      putSpan.close();
    }

    spans = rcvr.getSpans();
    traceTree = new TraceTree(spans);
    roots = traceTree.getSpansByParent().find(Span.ROOT_SPAN_ID);

    assertEquals(2, roots.size());
    Span putRoot = null;
    for (Span root : roots) {
      if (root.getDescription().equals("doing put")) {
        putRoot = root;
      }
    }

    assertNotNull(putRoot);
  }
Exemplo n.º 5
0
  @Test
  public void testLogCleaning() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    // set TTL
    long ttl = 10000;
    conf.setLong("hbase.master.logcleaner.ttl", ttl);
    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    Replication.decorateMasterConfiguration(conf);
    Server server = new DummyServer();
    ReplicationQueues repQueues =
        ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
    repQueues.init(server.getServerName().toString());
    final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME);
    String fakeMachineName = URLEncoder.encode(server.getServerName().toString(), "UTF8");

    final FileSystem fs = FileSystem.get(conf);

    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
    long now = System.currentTimeMillis();
    fs.delete(oldLogDir, true);
    fs.mkdirs(oldLogDir);
    // Case 1: 2 invalid files, which would be deleted directly
    fs.createNewFile(new Path(oldLogDir, "a"));
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    System.out.println("Now is: " + now);
    for (int i = 1; i < 31; i++) {
      // Case 3: old files which would be deletable for the first log cleaner
      // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
      Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i));
      fs.createNewFile(fileName);
      // Case 4: put 3 old log files in ZK indicating that they are scheduled
      // for replication so these files would pass the first log cleaner
      // (TimeToLiveLogCleaner) but would be rejected by the second
      // (ReplicationLogCleaner)
      if (i % (30 / 3) == 1) {
        repQueues.addLog(fakeMachineName, fileName.getName());
        System.out.println("Replication log file: " + fileName);
      }
    }

    // sleep for sometime to get newer modifcation time
    Thread.sleep(ttl);
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

    // Case 2: 1 newer file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000)));

    for (FileStatus stat : fs.listStatus(oldLogDir)) {
      System.out.println(stat.getPath().toString());
    }

    assertEquals(34, fs.listStatus(oldLogDir).length);

    LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir);
    cleaner.chore();

    // We end up with the current log file, a newer one and the 3 old log
    // files which are scheduled for replication
    TEST_UTIL.waitFor(
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            return 5 == fs.listStatus(oldLogDir).length;
          }
        });

    for (FileStatus file : fs.listStatus(oldLogDir)) {
      System.out.println("Kept log files: " + file.getPath().getName());
    }
  }
Exemplo n.º 6
0
  @Test
  public void testRegionMerge() throws Exception {
    String nsp1 = prefix + "_regiontest";
    NamespaceDescriptor nspDesc =
        NamespaceDescriptor.create(nsp1)
            .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3")
            .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2")
            .build();
    ADMIN.createNamespace(nspDesc);
    final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
    byte[] columnFamily = Bytes.toBytes("info");
    HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
    tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
    final int initialRegions = 3;
    ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
    Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    try (Table table = connection.getTable(tableTwo)) {
      UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
    }
    ADMIN.flush(tableTwo);
    List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
    Collections.sort(hris);
    // merge the two regions
    final Set<String> encodedRegionNamesToMerge =
        Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
    ADMIN.mergeRegions(
        hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
    UTIL.waitFor(
        10000,
        100,
        new Waiter.ExplainingPredicate<Exception>() {

          @Override
          public boolean evaluate() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                return false;
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return false;
              }
            }
            return true;
          }

          @Override
          public String explainFailure() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                return hri + " which is expected to be merged is still online";
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return hri + " is still in not opened";
              }
            }
            return "Unknown";
          }
        });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions - 1, hris.size());
    Collections.sort(hris);

    final HRegionInfo hriToSplit = hris.get(1);
    ADMIN.split(tableTwo, Bytes.toBytes("500"));

    UTIL.waitFor(
        10000,
        100,
        new Waiter.ExplainingPredicate<Exception>() {

          @Override
          public boolean evaluate() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                return false;
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return false;
              }
            }
            return true;
          }

          @Override
          public String explainFailure() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                return hriToSplit + " which is expected to be split is still online";
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return hri + " is still in not opened";
              }
            }
            return "Unknown";
          }
        });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);

    // fail region merge through Coprocessor hook
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HRegionServer regionServer = cluster.getRegionServer(0);
    RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost();
    Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName());
    CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor;
    regionServerObserver.failMerge(true);
    regionServerObserver.triggered = false;

    ADMIN.mergeRegions(
        hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
    regionServerObserver.waitUtilTriggered();
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // verify that we cannot split
    HRegionInfo hriToSplit2 = hris.get(1);
    ADMIN.split(
        tableTwo,
        TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true));
    Thread.sleep(2000);
    assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
  }