@Test
  public void testBulkSplitOptimization() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
    FileSystem fs = cluster.getFileSystem();
    Path testDir = new Path(getUsableDir(), "testmf");
    FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
    FileStatus[] stats = fs.listStatus(testDir);

    System.out.println("Number of generated files: " + stats.length);
    FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
    FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);

    // initiate splits
    getConnector()
        .tableOperations()
        .setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");

    sleepUninterruptibly(2, TimeUnit.SECONDS);

    // wait until over split threshold -- should be 78 splits
    while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
      sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }

    FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
    VerifyIngest.Opts opts = new VerifyIngest.Opts();
    opts.timestamp = 1;
    opts.dataSize = 50;
    opts.random = 56;
    opts.rows = 100000;
    opts.startRow = 0;
    opts.cols = 1;
    opts.setTableName(tableName);

    AuthenticationToken adminToken = getAdminToken();
    if (adminToken instanceof PasswordToken) {
      PasswordToken token = (PasswordToken) getAdminToken();
      opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
      opts.setPrincipal(getAdminPrincipal());
    } else if (adminToken instanceof KerberosToken) {
      ClientConfiguration clientConf = cluster.getClientConfig();
      opts.updateKerberosCredentials(clientConf);
    } else {
      Assert.fail("Unknown token type");
    }

    VerifyIngest.verifyIngest(c, opts, new ScannerOpts());

    // ensure each tablet does not have all map files, should be ~2.5 files per tablet
    FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
  }
Exemplo n.º 2
0
  private long batchScan(Connector c, List<Range> ranges, int threads) throws Exception {
    BatchScanner bs = c.createBatchScanner("test_ingest", TestIngest.AUTHS, threads);

    bs.setRanges(ranges);

    int count = 0;

    long t1 = System.currentTimeMillis();

    byte rval[] = new byte[50];
    Random random = new Random();

    for (Entry<Key, Value> entry : bs) {
      count++;
      int row = VerifyIngest.getRow(entry.getKey());
      int col = VerifyIngest.getCol(entry.getKey());

      if (row < 0 || row >= NUM_TO_INGEST) {
        throw new Exception("unexcepted row " + row);
      }

      rval = TestIngest.genRandomValue(random, rval, 2, row, col);

      if (entry.getValue().compareTo(rval) != 0) {
        throw new Exception("unexcepted value row=" + row + " col=" + col);
      }
    }

    long t2 = System.currentTimeMillis();

    bs.close();

    if (count != NUM_TO_INGEST) {
      throw new Exception("Batch Scan did not return expected number of values " + count);
    }

    return t2 - t1;
  }
Exemplo n.º 3
0
 @Test
 public void tabletShouldSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
   c.tableOperations()
       .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
   TestIngest.Opts opts = new TestIngest.Opts();
   opts.rows = 100000;
   TestIngest.ingest(c, opts, new BatchWriterOpts());
   VerifyIngest.Opts vopts = new VerifyIngest.Opts();
   vopts.rows = opts.rows;
   VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
   UtilWaitThread.sleep(15 * 1000);
   String id = c.tableOperations().tableIdMap().get("test_ingest");
   Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
   KeyExtent extent = new KeyExtent(new Text(id), null, null);
   s.setRange(extent.toMetadataRange());
   MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
   int count = 0;
   int shortened = 0;
   for (Entry<Key, Value> entry : s) {
     extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
     if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14) shortened++;
     count++;
   }
   assertTrue(shortened > 0);
   assertTrue(count > 10);
   assertEquals(
       0,
       cluster
           .exec(
               CheckForMetadataProblems.class,
               "-i",
               cluster.getInstanceName(),
               "-u",
               "root",
               "-p",
               ROOT_PASSWORD,
               "-z",
               cluster.getZooKeepers())
           .waitFor());
 }
Exemplo n.º 4
0
 public static void deleteTest(Connector c, MiniAccumuloClusterImpl cluster) throws Exception {
   VerifyIngest.Opts vopts = new VerifyIngest.Opts();
   TestIngest.Opts opts = new TestIngest.Opts();
   vopts.rows = opts.rows = 1000;
   vopts.cols = opts.cols = 1;
   vopts.random = opts.random = 56;
   TestIngest.ingest(c, opts, BWOPTS);
   assertEquals(
       0,
       cluster
           .exec(
               TestRandomDeletes.class,
               "-u",
               "root",
               "-p",
               ROOT_PASSWORD,
               "-i",
               cluster.getInstanceName(),
               "-z",
               cluster.getZooKeepers())
           .waitFor());
   TestIngest.ingest(c, opts, BWOPTS);
   VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
 }