@Test
  public void testBulkSplitOptimization() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
    FileSystem fs = cluster.getFileSystem();
    Path testDir = new Path(getUsableDir(), "testmf");
    FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
    FileStatus[] stats = fs.listStatus(testDir);

    System.out.println("Number of generated files: " + stats.length);
    FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
    FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);

    // initiate splits
    getConnector()
        .tableOperations()
        .setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");

    sleepUninterruptibly(2, TimeUnit.SECONDS);

    // wait until over split threshold -- should be 78 splits
    while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
      sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }

    FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
    VerifyIngest.Opts opts = new VerifyIngest.Opts();
    opts.timestamp = 1;
    opts.dataSize = 50;
    opts.random = 56;
    opts.rows = 100000;
    opts.startRow = 0;
    opts.cols = 1;
    opts.setTableName(tableName);

    AuthenticationToken adminToken = getAdminToken();
    if (adminToken instanceof PasswordToken) {
      PasswordToken token = (PasswordToken) getAdminToken();
      opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
      opts.setPrincipal(getAdminPrincipal());
    } else if (adminToken instanceof KerberosToken) {
      ClientConfiguration clientConf = cluster.getClientConfig();
      opts.updateKerberosCredentials(clientConf);
    } else {
      Assert.fail("Unknown token type");
    }

    VerifyIngest.verifyIngest(c, opts, new ScannerOpts());

    // ensure each tablet does not have all map files, should be ~2.5 files per tablet
    FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
  }
Example #2
0
 @Test
 public void interleaveSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   c.tableOperations()
       .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
   ReadWriteIT.interleaveTest(c);
   UtilWaitThread.sleep(5 * 1000);
   assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
 }
 @Test(timeout = 4 * 60 * 1000)
 public void binaryStressTest() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("bt");
   c.tableOperations().setProperty("bt", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   BinaryIT.runTest(c);
   String id = c.tableOperations().tableIdMap().get("bt");
   FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
   FileStatus[] dir =
       fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id));
   assertTrue(dir.length > 7);
 }
Example #4
0
 @Test
 public void deleteSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   DeleteIT.deleteTest(c, cluster);
   c.tableOperations().flush("test_ingest", null, null, true);
   for (int i = 0; i < 5; i++) {
     UtilWaitThread.sleep(10 * 1000);
     if (c.tableOperations().listSplits("test_ingest").size() > 20) break;
   }
   assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
 }
Example #5
0
 @Test
 public void tabletShouldSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
   c.tableOperations()
       .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
   TestIngest.Opts opts = new TestIngest.Opts();
   opts.rows = 100000;
   TestIngest.ingest(c, opts, new BatchWriterOpts());
   VerifyIngest.Opts vopts = new VerifyIngest.Opts();
   vopts.rows = opts.rows;
   VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
   UtilWaitThread.sleep(15 * 1000);
   String id = c.tableOperations().tableIdMap().get("test_ingest");
   Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
   KeyExtent extent = new KeyExtent(new Text(id), null, null);
   s.setRange(extent.toMetadataRange());
   MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
   int count = 0;
   int shortened = 0;
   for (Entry<Key, Value> entry : s) {
     extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
     if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14) shortened++;
     count++;
   }
   assertTrue(shortened > 0);
   assertTrue(count > 10);
   assertEquals(
       0,
       cluster
           .exec(
               CheckForMetadataProblems.class,
               "-i",
               cluster.getInstanceName(),
               "-u",
               "root",
               "-p",
               ROOT_PASSWORD,
               "-z",
               cluster.getZooKeepers())
           .waitFor());
 }