Пример #1
0
  @Test
  public void test() throws Exception {
    Connector c = getConnector();
    // make a table
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // write to it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();

    // create a fake _tmp file in its directory
    String id = c.tableOperations().tableIdMap().get(tableName);
    FileSystem fs = getCluster().getFileSystem();
    Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp");
    fs.create(tmp).close();
    for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
      getCluster().killProcess(ServerType.TABLET_SERVER, tserver);
    }
    getCluster().start();

    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
    FunctionalTestUtils.count(scanner);
    assertFalse(fs.exists(tmp));
  }
Пример #2
0
 private void waitForCleanStore(MetaDataStateStore store) {
   while (true) {
     try {
       FunctionalTestUtils.count(store);
     } catch (Exception ex) {
       System.out.println(ex);
       UtilWaitThread.sleep(250);
       continue;
     }
     break;
   }
 }
  @Test
  public void testBulkSplitOptimization() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
    FileSystem fs = cluster.getFileSystem();
    Path testDir = new Path(getUsableDir(), "testmf");
    FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
    FileStatus[] stats = fs.listStatus(testDir);

    System.out.println("Number of generated files: " + stats.length);
    FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
    FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);

    // initiate splits
    getConnector()
        .tableOperations()
        .setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");

    sleepUninterruptibly(2, TimeUnit.SECONDS);

    // wait until over split threshold -- should be 78 splits
    while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
      sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }

    FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
    VerifyIngest.Opts opts = new VerifyIngest.Opts();
    opts.timestamp = 1;
    opts.dataSize = 50;
    opts.random = 56;
    opts.rows = 100000;
    opts.startRow = 0;
    opts.cols = 1;
    opts.setTableName(tableName);

    AuthenticationToken adminToken = getAdminToken();
    if (adminToken instanceof PasswordToken) {
      PasswordToken token = (PasswordToken) getAdminToken();
      opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
      opts.setPrincipal(getAdminPrincipal());
    } else if (adminToken instanceof KerberosToken) {
      ClientConfiguration clientConf = cluster.getClientConfig();
      opts.updateKerberosCredentials(clientConf);
    } else {
      Assert.fail("Unknown token type");
    }

    VerifyIngest.verifyIngest(c, opts, new ScannerOpts());

    // ensure each tablet does not have all map files, should be ~2.5 files per tablet
    FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
  }
Пример #4
0
  @Test(timeout = 3 * 60 * 1000)
  public void run() throws Exception {
    Connector c = getConnector();
    c.tableOperations().create("test_ingest");
    c.tableOperations().setProperty("test_ingest", Property.TABLE_MAJC_RATIO.getKey(), "10");
    c.tableOperations()
        .addSplits("test_ingest", TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));

    // the following loop should create three tablets in each map file
    for (int i = 0; i < 3; i++) {
      TestIngest.Opts opts = new TestIngest.Opts();
      opts.timestamp = i;
      opts.dataSize = 50;
      opts.rows = NUM_TO_INGEST;
      opts.cols = 1;
      opts.random = i;
      TestIngest.ingest(c, opts, new BatchWriterOpts());

      c.tableOperations().flush("test_ingest", null, null, true);
      FunctionalTestUtils.checkRFiles(c, "test_ingest", NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
    }

    List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);

    for (int i = 0; i < NUM_TO_INGEST; i++) {
      ranges.add(new Range(TestIngest.generateRow(i, 0)));
    }

    long time1 = batchScan(c, ranges, 1);
    // run it again, now that stuff is cached on the client and sever
    time1 = batchScan(c, ranges, 1);
    long time2 = batchScan(c, ranges, NUM_TABLETS);

    System.out.printf("Single thread scan time   %6.2f %n", time1 / 1000.0);
    System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
  }