@Before
  public void createMockKeyValues() throws Exception {
    // Make a MockInstance here, by setting the instance name to be the same as this mock instance
    // we can "trick" the InputFormat into using a MockInstance
    mockInstance = new MockInstance(test.getMethodName());
    inputformat = new HiveAccumuloTableInputFormat();
    conf = new JobConf();
    conf.set(AccumuloSerDeParameters.TABLE_NAME, TEST_TABLE);
    conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true");
    conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName());
    conf.set(AccumuloSerDeParameters.USER_NAME, USER);
    conf.set(AccumuloSerDeParameters.USER_PASS, PASS);
    conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but
    // required by input format.

    columnNames = Arrays.asList("name", "sid", "dgrs", "mills");
    columnTypes =
        Arrays.<TypeInfo>asList(
            TypeInfoFactory.stringTypeInfo,
            TypeInfoFactory.intTypeInfo,
            TypeInfoFactory.doubleTypeInfo,
            TypeInfoFactory.longTypeInfo);
    conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:name,cf:sid,cf:dgrs,cf:mills");
    conf.set(serdeConstants.LIST_COLUMNS, "name,sid,dgrs,mills");
    conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,double,bigint");

    con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes()));
    con.tableOperations().create(TEST_TABLE);
    con.securityOperations().changeUserAuthorizations(USER, new Authorizations("blah"));
    BatchWriterConfig writerConf = new BatchWriterConfig();
    BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf);

    Mutation m1 = new Mutation(new Text("r1"));
    m1.put(COLUMN_FAMILY, NAME, new Value("brian".getBytes()));
    m1.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("1")));
    m1.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("44.5")));
    m1.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("555")));

    Mutation m2 = new Mutation(new Text("r2"));
    m2.put(COLUMN_FAMILY, NAME, new Value("mark".getBytes()));
    m2.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("2")));
    m2.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("55.5")));
    m2.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("666")));

    Mutation m3 = new Mutation(new Text("r3"));
    m3.put(COLUMN_FAMILY, NAME, new Value("dennis".getBytes()));
    m3.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("3")));
    m3.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("65.5")));
    m3.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("777")));

    writer.addMutation(m1);
    writer.addMutation(m2);
    writer.addMutation(m3);

    writer.close();
  }
Exemple #2
0
  @Test(timeout = 60 * 1000)
  public void run() throws Exception {
    Connector c = getConnector();
    c.tableOperations().create("rdel1");
    Map<String, Set<Text>> groups = new HashMap<String, Set<Text>>();
    groups.put("lg1", Collections.singleton(new Text("foo")));
    groups.put("dg", Collections.<Text>emptySet());
    c.tableOperations().setLocalityGroups("rdel1", groups);
    IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
    c.tableOperations().attachIterator("rdel1", setting, EnumSet.of(IteratorScope.majc));
    c.tableOperations().setProperty("rdel1", Property.TABLE_MAJC_RATIO.getKey(), "100");

    BatchWriter bw = c.createBatchWriter("rdel1", new BatchWriterConfig());

    bw.addMutation(nm("r1", "foo", "cf1", "v1"));
    bw.addMutation(nm("r1", "bar", "cf1", "v2"));

    bw.flush();
    c.tableOperations().flush("rdel1", null, null, true);

    checkRFiles(c, "rdel1", 1, 1, 1, 1);

    int count = 0;
    Scanner scanner = c.createScanner("rdel1", Authorizations.EMPTY);
    for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) {
      count++;
    }
    if (count != 2) throw new Exception("1 count=" + count);

    bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));

    bw.flush();
    c.tableOperations().flush("rdel1", null, null, true);

    checkRFiles(c, "rdel1", 1, 1, 2, 2);

    count = 0;
    scanner = c.createScanner("rdel1", Authorizations.EMPTY);
    for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) {
      count++;
    }
    if (count != 3) throw new Exception("2 count=" + count);

    c.tableOperations().compact("rdel1", null, null, false, true);

    checkRFiles(c, "rdel1", 1, 1, 0, 0);

    count = 0;
    scanner = c.createScanner("rdel1", Authorizations.EMPTY);
    for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) {
      count++;
    }
    if (count != 0) throw new Exception("3 count=" + count);

    bw.close();
  }
  private static long write(Connector conn, ArrayList<byte[]> cfset, String table)
      throws TableNotFoundException, MutationsRejectedException {
    Random rand = new Random();

    byte val[] = new byte[50];

    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());

    long t1 = System.currentTimeMillis();

    for (int i = 0; i < 1 << 15; i++) {
      byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]);

      Mutation m = new Mutation(row);
      for (byte[] cf : cfset) {
        byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]);
        rand.nextBytes(val);
        m.put(cf, cq, val);
      }

      bw.addMutation(m);
    }

    bw.close();

    long t2 = System.currentTimeMillis();

    return t2 - t1;
  }
Exemple #4
0
  @Test
  public void test() throws Exception {
    Connector c = getConnector();
    // make a table
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // write to it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();

    // create a fake _tmp file in its directory
    String id = c.tableOperations().tableIdMap().get(tableName);
    FileSystem fs = getCluster().getFileSystem();
    Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp");
    fs.create(tmp).close();
    for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
      getCluster().killProcess(ServerType.TABLET_SERVER, tserver);
    }
    getCluster().start();

    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
    FunctionalTestUtils.count(scanner);
    assertFalse(fs.exists(tmp));
  }
  public static void run(
      String instanceName, String zookeepers, AuthenticationToken rootPassword, String args[])
      throws Exception {
    // edit this method to play with Accumulo

    Instance instance = new ZooKeeperInstance(instanceName, zookeepers);

    Connector conn = instance.getConnector("root", rootPassword);

    conn.tableOperations().create("foo");

    BatchWriterConfig bwConfig = new BatchWriterConfig();
    bwConfig.setMaxLatency(60000l, java.util.concurrent.TimeUnit.MILLISECONDS);
    bwConfig.setMaxWriteThreads(3);
    bwConfig.setMaxMemory(50000000);
    BatchWriter bw = conn.createBatchWriter("foo", bwConfig);
    Mutation m = new Mutation("r1");
    m.put("cf1", "cq1", "v1");
    m.put("cf1", "cq2", "v3");
    bw.addMutation(m);
    bw.close();

    Scanner scanner = conn.createScanner("foo", Constants.NO_AUTHS);
    for (Entry<Key, Value> entry : scanner) {
      System.out.println(entry.getKey() + " " + entry.getValue());
    }
  }
 @Test
 public void test() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
   Mutation m = new Mutation("row1");
   m.put("cf", "col1", "Test");
   bw.addMutation(m);
   bw.close();
   scanCheck(c, tableName, "Test");
   FileSystem fs = getCluster().getFileSystem();
   Path jarPath = new Path(rootPath + "/lib/ext/Test.jar");
   copyStreamToFileSystem(fs, "/TestCombinerX.jar", jarPath);
   sleepUninterruptibly(1, TimeUnit.SECONDS);
   IteratorSetting is =
       new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner");
   Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf")));
   c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan));
   sleepUninterruptibly(ZOOKEEPER_PROPAGATION_TIME, TimeUnit.MILLISECONDS);
   scanCheck(c, tableName, "TestX");
   fs.delete(jarPath, true);
   copyStreamToFileSystem(fs, "/TestCombinerY.jar", jarPath);
   sleepUninterruptibly(5, TimeUnit.SECONDS);
   scanCheck(c, tableName, "TestY");
   fs.delete(jarPath, true);
 }
Exemple #7
0
  private void runMergeTest(
      Connector conn,
      String table,
      String[] splits,
      String[] expectedSplits,
      String[] inserts,
      String start,
      String end)
      throws Exception {
    System.out.println(
        "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);

    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
    TreeSet<Text> splitSet = new TreeSet<Text>();
    for (String split : splits) {
      splitSet.add(new Text(split));
    }
    conn.tableOperations().addSplits(table, splitSet);

    BatchWriter bw = conn.createBatchWriter(table, null);
    HashSet<String> expected = new HashSet<String>();
    for (String row : inserts) {
      Mutation m = new Mutation(row);
      m.put("cf", "cq", row);
      bw.addMutation(m);
      expected.add(row);
    }

    bw.close();

    conn.tableOperations()
        .merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));

    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);

    HashSet<String> observed = new HashSet<String>();
    for (Entry<Key, Value> entry : scanner) {
      String row = entry.getKey().getRowData().toString();
      if (!observed.add(row)) {
        throw new Exception("Saw data twice " + table + " " + row);
      }
    }

    if (!observed.equals(expected)) {
      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
    }

    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
    HashSet<Text> ess = new HashSet<Text>();
    for (String es : expectedSplits) {
      ess.add(new Text(es));
    }

    if (!currentSplits.equals(ess)) {
      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
    }
  }
  private static AccumuloBackedGraph setupGraph(
      Instance instance, Connector conn, String tableName, int numEntries) {
    long ageOffTimeInMilliseconds = (30 * 24 * 60 * 60 * 1000L); // 30 days in milliseconds

    try {
      // Create table
      // (this method creates the table, removes the versioning iterator, and adds the
      // SetOfStatisticsCombiner iterator,
      // and sets the age off iterator to age data off after it is more than
      // ageOffTimeInMilliseconds milliseconds old).
      TableUtils.createTable(conn, tableName, ageOffTimeInMilliseconds);

      // Create numEntries edges and add to Accumulo
      BatchWriter writer = conn.createBatchWriter(tableName, 1000000L, 1000L, 1);
      for (int i = 0; i < numEntries; i++) {
        Edge edge =
            new Edge(
                "customer",
                "" + i,
                "product",
                "B",
                "purchase",
                "instore",
                true,
                visibilityString,
                sevenDaysBefore,
                sixDaysBefore);
        SetOfStatistics statistics = new SetOfStatistics();
        statistics.addStatistic("count", new Count(i));
        Key key = ConversionUtils.getKeysFromEdge(edge).getFirst();
        Value value = ConversionUtils.getValueFromSetOfStatistics(statistics);
        Mutation m = new Mutation(key.getRow());
        m.put(
            key.getColumnFamily(),
            key.getColumnQualifier(),
            new ColumnVisibility(key.getColumnVisibility()),
            key.getTimestamp(),
            value);
        writer.addMutation(m);
      }
      writer.close();

      // Create Accumulo backed graph
      AccumuloBackedGraph graph = new AccumuloBackedGraph(conn, tableName);
      return graph;
    } catch (AccumuloException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    } catch (AccumuloSecurityException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    } catch (TableExistsException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    } catch (TableNotFoundException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    }
    return null;
  }
Exemple #9
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    Connector conn = state.getConnector();

    Random rand = (Random) state.get("rand");

    @SuppressWarnings("unchecked")
    List<String> tableNames = (List<String>) state.get("tables");

    String tableName = tableNames.get(rand.nextInt(tableNames.size()));

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs = FileSystem.get(conf);

    String bulkDir = "/tmp/concurrent_bulk/b_" + String.format("%016x", Math.abs(rand.nextLong()));

    fs.mkdirs(new Path(bulkDir));
    fs.mkdirs(new Path(bulkDir + "_f"));

    try {
      BatchWriter bw = new RFileBatchWriter(conf, fs, bulkDir + "/file01.rf");
      try {
        TreeSet<Long> rows = new TreeSet<Long>();
        int numRows = rand.nextInt(100000);
        for (int i = 0; i < numRows; i++) {
          rows.add(Math.abs(rand.nextLong()));
        }

        for (Long row : rows) {
          Mutation m = new Mutation(String.format("%016x", row));
          long val = Math.abs(rand.nextLong());
          for (int j = 0; j < 10; j++) {
            m.put("cf", "cq" + j, new Value(String.format("%016x", val).getBytes()));
          }

          bw.addMutation(m);
        }
      } finally {
        bw.close();
      }

      conn.tableOperations()
          .importDirectory(tableName, bulkDir, bulkDir + "_f", rand.nextBoolean());

      log.debug("BulkImported to " + tableName);
    } catch (TableNotFoundException e) {
      log.debug("BulkImport " + tableName + " failed, doesnt exist");
    } catch (TableOfflineException toe) {
      log.debug("BulkImport " + tableName + " failed, offline");
    } finally {
      fs.delete(new Path(bulkDir), true);
      fs.delete(new Path(bulkDir + "_f"), true);
    }
  }
  @Test
  public void testGetProtectedField() throws Exception {
    FileInputFormat.addInputPath(conf, new Path("unused"));

    BatchWriterConfig writerConf = new BatchWriterConfig();
    BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf);

    Authorizations origAuths = con.securityOperations().getUserAuthorizations(USER);
    con.securityOperations()
        .changeUserAuthorizations(USER, new Authorizations(origAuths.toString() + ",foo"));

    Mutation m = new Mutation("r4");
    m.put(COLUMN_FAMILY, NAME, new ColumnVisibility("foo"), new Value("frank".getBytes()));
    m.put(COLUMN_FAMILY, SID, new ColumnVisibility("foo"), new Value(parseIntBytes("4")));
    m.put(COLUMN_FAMILY, DEGREES, new ColumnVisibility("foo"), new Value(parseDoubleBytes("60.6")));
    m.put(COLUMN_FAMILY, MILLIS, new ColumnVisibility("foo"), new Value(parseLongBytes("777")));

    writer.addMutation(m);
    writer.close();

    conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo");

    InputSplit[] splits = inputformat.getSplits(conf, 0);
    assertEquals(splits.length, 1);
    RecordReader<Text, AccumuloHiveRow> reader = inputformat.getRecordReader(splits[0], conf, null);
    Text rowId = new Text("r1");
    AccumuloHiveRow row = new AccumuloHiveRow();
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes());

    rowId = new Text("r2");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes());

    rowId = new Text("r3");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes());

    rowId = new Text("r4");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "frank".getBytes());

    assertFalse(reader.next(rowId, row));
  }
  /** Write entries to a table. */
  public static void writeEntries(
      Connector connector, Map<Key, Value> map, String table, boolean createIfNotExist) {
    if (createIfNotExist && !connector.tableOperations().exists(table))
      try {
        connector.tableOperations().create(table);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.error("trouble creating " + table, e);
        throw new RuntimeException(e);
      } catch (TableExistsException e) {
        log.error("crazy", e);
        throw new RuntimeException(e);
      }

    BatchWriterConfig bwc = new BatchWriterConfig();
    BatchWriter bw;
    try {
      bw = connector.createBatchWriter(table, bwc);
    } catch (TableNotFoundException e) {
      log.error("tried to write to a non-existant table " + table, e);
      throw new RuntimeException(e);
    }

    try {
      for (Map.Entry<Key, Value> entry : map.entrySet()) {
        Key k = entry.getKey();
        ByteSequence rowData = k.getRowData(),
            cfData = k.getColumnFamilyData(),
            cqData = k.getColumnQualifierData();
        Mutation m = new Mutation(rowData.getBackingArray(), rowData.offset(), rowData.length());
        m.put(
            cfData.getBackingArray(),
            cqData.getBackingArray(),
            k.getColumnVisibilityParsed(),
            entry.getValue().get());
        bw.addMutation(m);
      }

    } catch (MutationsRejectedException e) {
      log.error("mutations rejected", e);
      throw new RuntimeException(e);
    } finally {
      try {
        bw.close();
      } catch (MutationsRejectedException e) {
        log.error("mutations rejected while trying to close BatchWriter", e);
      }
    }
  }
  static void initializeClone(String srcTableId, String tableId, Connector conn, BatchWriter bw)
      throws TableNotFoundException, MutationsRejectedException {
    TabletIterator ti =
        new TabletIterator(
            createCloneScanner(srcTableId, conn),
            new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(),
            true,
            true);

    if (!ti.hasNext())
      throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId);

    while (ti.hasNext()) bw.addMutation(createCloneMutation(srcTableId, tableId, ti.next()));

    bw.flush();
  }
Exemple #13
0
 @Test
 public void merge() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
   BatchWriter bw = c.createBatchWriter(tableName, null);
   for (String row : "a b c d e f g h i j k".split(" ")) {
     Mutation m = new Mutation(row);
     m.put("cf", "cq", "value");
     bw.addMutation(m);
   }
   bw.close();
   c.tableOperations().flush(tableName, null, null, true);
   c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
   assertEquals(8, c.tableOperations().listSplits(tableName).size());
 }
  @Test
  public void testMap() throws Exception {
    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
    c.tableOperations().create(TEST_TABLE_1);
    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
    for (int i = 0; i < 100; i++) {
      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
      bw.addMutation(m);
    }
    bw.close();

    MRTester.main(new String[] {"root", "", TEST_TABLE_1});
    assertNull(e1);
    assertNull(e2);
  }
Exemple #15
0
  private int ingestLine(BatchWriter bw, String line) throws MutationsRejectedException {
    String[] parts = line.split(",");
    String longitude = StringUtils.leftPad(parts[0], PADSIZE_LATLON, '0');
    String latitude = StringUtils.leftPad(parts[1], PADSIZE_LATLON, '0');
    String ndsi = parts[2];
    // String ndsi_count = parts[3];
    String land_sea_mask = parts[4];

    Text longText = new Text(longitude);
    Text latText = new Text(latitude);

    Mutation m = new Mutation(longText);
    m.put(COLF_NDSI, latText, new Value(ndsi.getBytes(StandardCharsets.UTF_8)));
    m.put(COLF_LSM, latText, new Value(land_sea_mask.getBytes(StandardCharsets.UTF_8)));
    bw.addMutation(m);

    return 2;
  }
 public static void removeBulkLoadEntries(Connector conn, String tableId, long tid)
     throws Exception {
   Scanner mscanner =
       new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
   mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
   mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
   BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
   for (Entry<Key, Value> entry : mscanner) {
     log.debug("Looking at entry " + entry + " with tid " + tid);
     if (Long.parseLong(entry.getValue().toString()) == tid) {
       log.debug("deleting entry " + entry);
       Mutation m = new Mutation(entry.getKey().getRow());
       m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
       bw.addMutation(m);
     }
   }
   bw.close();
 }
 @Test
 public void aggregationTest() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
   SummingCombiner.setEncodingType(setting, Type.STRING);
   SummingCombiner.setColumns(
       setting, Collections.singletonList(new IteratorSetting.Column("cf")));
   c.tableOperations().attachIterator(tableName, setting);
   BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
   for (int i = 0; i < 10; i++) {
     Mutation m = new Mutation("row1");
     m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
     bw.addMutation(m);
   }
   bw.close();
   checkSum(tableName, c);
 }
  static void runTest(Connector c, MiniAccumuloClusterImpl cluster)
      throws AccumuloException, AccumuloSecurityException, TableExistsException,
          TableNotFoundException, MutationsRejectedException, IOException, InterruptedException,
          NoSuchAlgorithmException {
    c.tableOperations().create(tablename);
    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
    for (int i = 0; i < 10; i++) {
      Mutation m = new Mutation("" + i);
      m.put(input_cf, input_cq, "row" + i);
      bw.addMutation(m);
    }
    bw.close();
    Process hash =
        cluster.exec(
            RowHash.class,
            Collections.singletonList(hadoopTmpDirArg),
            "-i",
            c.getInstance().getInstanceName(),
            "-z",
            c.getInstance().getZooKeepers(),
            "-u",
            "root",
            "-p",
            ROOT_PASSWORD,
            "-t",
            tablename,
            "--column",
            input_cfcq);
    assertEquals(0, hash.waitFor());

    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
    s.fetchColumn(new Text(input_cf), new Text(output_cq));
    int i = 0;
    for (Entry<Key, Value> entry : s) {
      MessageDigest md = MessageDigest.getInstance("MD5");
      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
      assertEquals(entry.getValue().toString(), new String(check));
      i++;
    }
  }
Exemple #19
0
 @Test
 public void mergeSize() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   c.tableOperations()
       .addSplits(
           tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
   BatchWriter bw = c.createBatchWriter(tableName, null);
   for (String row : "c e f y".split(" ")) {
     Mutation m = new Mutation(row);
     m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
     bw.addMutation(m);
   }
   bw.close();
   c.tableOperations().flush(tableName, null, null, true);
   Merge merge = new Merge();
   merge.mergomatic(c, tableName, null, null, 100, false);
   assertArrayEquals(
       "b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
   merge.mergomatic(c, tableName, null, null, 100, true);
   assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
 }
  private static long scrambleDeleteHalfAndCheck(
      ClientOnDefaultTable opts,
      ScannerOpts scanOpts,
      BatchWriterOpts bwOpts,
      String tableName,
      Set<RowColumn> rows)
      throws Exception {
    int result = 0;
    ArrayList<RowColumn> entries = new ArrayList<RowColumn>(rows);
    java.util.Collections.shuffle(entries);

    Connector connector = opts.getConnector();
    BatchWriter mutations = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());

    for (int i = 0; i < (entries.size() + 1) / 2; i++) {
      RowColumn rc = entries.get(i);
      Mutation m = new Mutation(rc.row);
      m.putDelete(
          new Text(rc.column.columnFamily),
          new Text(rc.column.columnQualifier),
          new ColumnVisibility(rc.column.getColumnVisibility()),
          rc.timestamp + 1);
      mutations.addMutation(m);
      rows.remove(rc);
      result++;
    }

    mutations.close();

    Set<RowColumn> current = scanAll(opts, scanOpts, tableName);
    current.removeAll(rows);
    if (current.size() > 0) {
      throw new RuntimeException(current.size() + " records not deleted");
    }
    return result;
  }
  private void createEntries(Opts opts)
      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {

    // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
    // the write operation as it is occurs asynchronously. You can optionally create additional
    // Spans
    // within a given Trace as seen below around the flush
    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);

    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
    BatchWriter batchWriter =
        opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());

    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");

    batchWriter.addMutation(m);
    // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
    scope.getSpan().addTimelineAnnotation("Initiating Flush");
    batchWriter.flush();

    batchWriter.close();
    scope.close();
  }
  public static void main(String[] args)
      throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
          TableExistsException {
    System.out.println("START");

    String instanceName = "development";
    String zooKeepers = "localhost";
    String user = "******";
    byte[] pass = "******".getBytes();
    String tableName = "users";

    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
    Connector connector = instance.getConnector(user, pass);
    MultiTableBatchWriter writer = connector.createMultiTableBatchWriter(200000l, 300, 4);

    if (!connector.tableOperations().exists(tableName)) {
      connector.tableOperations().create(tableName);
    }

    BatchWriter bw = writer.getBatchWriter(tableName);

    try {
      String userId = "medined";
      int age = 48;
      int height = 70;
      Mutation m = new Mutation(new Text(userId));
      m.put(new Text("age"), new Text(""), new Value(new Integer(age).toString().getBytes()));
      m.put(new Text("height"), new Text(""), new Value(new Integer(height).toString().getBytes()));
      bw.addMutation(m);
    } finally {
      if (writer != null) {
        writer.close();
      }
    }
    System.out.println("END");
  }
  @Test
  public void run() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);

    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());

    for (int i = 0; i < 1000; i++) {
      Mutation m = new Mutation(new Text(String.format("%08d", i)));
      for (int j = 0; j < 3; j++)
        m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));

      bw.addMutation(m);
    }

    bw.close();

    Scanner scanner = c.createScanner(tableName, new Authorizations());
    scanner.setReadaheadThreshold(20000);
    scanner.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000)));

    // test by making a slow iterator and then a couple of fast ones.
    // when then checking we shouldn't have any running except the slow iterator
    IteratorSetting setting = new IteratorSetting(21, SlowIterator.class);
    SlowIterator.setSeekSleepTime(setting, Long.MAX_VALUE);
    SlowIterator.setSleepTime(setting, Long.MAX_VALUE);
    scanner.addScanIterator(setting);

    final Iterator<Entry<Key, Value>> slow = scanner.iterator();

    final List<Future<Boolean>> callables = new ArrayList<>();
    final CountDownLatch latch = new CountDownLatch(10);
    for (int i = 0; i < 10; i++) {
      Future<Boolean> callable =
          service.submit(
              new Callable<Boolean>() {
                public Boolean call() {
                  latch.countDown();
                  while (slow.hasNext()) {

                    slow.next();
                  }
                  return slow.hasNext();
                }
              });
      callables.add(callable);
    }

    latch.await();

    log.info("Starting SessionBlockVerifyIT");

    // let's add more for good measure.
    for (int i = 0; i < 2; i++) {
      Scanner scanner2 = c.createScanner(tableName, new Authorizations());

      scanner2.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000)));

      scanner2.setBatchSize(1);
      Iterator<Entry<Key, Value>> iter = scanner2.iterator();
      // call super's verify mechanism
      verify(iter, 0, 1000);
    }

    int sessionsFound = 0;
    // we have configured 1 tserver, so we can grab the one and only
    String tserver = Iterables.getOnlyElement(c.instanceOperations().getTabletServers());

    final List<ActiveScan> scans = c.instanceOperations().getActiveScans(tserver);

    for (ActiveScan scan : scans) {
      // only here to minimize chance of seeing meta extent scans

      if (tableName.equals(scan.getTable()) && scan.getSsiList().size() > 0) {
        assertEquals("Not the expected iterator", 1, scan.getSsiList().size());
        assertTrue(
            "Not the expected iterator",
            scan.getSsiList().iterator().next().contains("SlowIterator"));
        sessionsFound++;
      }
    }

    /**
     * The message below indicates the problem that we experience within ACCUMULO-3509. The issue
     * manifests as a blockage in the Scanner synchronization that prevent us from making the close
     * call against it. Since the close blocks until a read is finished, we ultimately have a block
     * within the sweep of SessionManager. As a result never reap subsequent idle sessions AND we
     * will orphan the sessionsToCleanup in the sweep, leading to an inaccurate count within
     * sessionsFound.
     */
    assertEquals(
        "Must have ten sessions. Failure indicates a synchronization block within the sweep mechanism",
        10,
        sessionsFound);
    for (Future<Boolean> callable : callables) {
      callable.cancel(true);
    }
    service.shutdown();
  }
Exemple #24
0
  @Override
  public void client() {

    Connector conn = getConnector();
    String tableName = getTestProperty("TABLE");

    // get batch writer configuration
    long maxMemory = Long.parseLong(getTestProperty("MAX_MEMORY"));
    long maxLatency = Long.parseLong(getTestProperty("MAX_LATENCY"));
    int maxWriteThreads = Integer.parseInt(getTestProperty("NUM_THREADS"));

    // create batch writer
    BatchWriter bw = null;
    try {
      bw =
          conn.createBatchWriter(
              tableName,
              new BatchWriterConfig()
                  .setMaxMemory(maxMemory)
                  .setMaxLatency(maxLatency, TimeUnit.MILLISECONDS)
                  .setMaxWriteThreads(maxWriteThreads));
    } catch (TableNotFoundException e) {
      log.error("Table '" + tableName + "' not found.", e);
    }

    // configure writing
    Random r = new Random();
    String ingestInstanceId = UUID.randomUUID().toString();
    long numIngestEntries = Long.parseLong(getTestProperty("NUM_ENTRIES"));
    long minRow = 0L;
    long maxRow = 9223372036854775807L;
    int maxColF = 32767;
    int maxColQ = 32767;
    long count = 0;
    long totalBytes = 0;

    ColumnVisibility cv = new ColumnVisibility();

    // start timer
    startTimer();

    // write specified number of entries
    while (count < numIngestEntries) {
      count++;
      long rowId = ContinuousIngest.genLong(minRow, maxRow, r);
      Mutation m =
          ContinuousIngest.genMutation(
              rowId,
              r.nextInt(maxColF),
              r.nextInt(maxColQ),
              cv,
              ingestInstanceId.getBytes(StandardCharsets.UTF_8),
              count,
              null,
              r,
              false);
      totalBytes += m.numBytes();
      try {
        bw.addMutation(m);
      } catch (MutationsRejectedException e) {
        log.error("Mutations rejected.", e);
        System.exit(-1);
      }
    }

    // close writer
    try {
      bw.close();
    } catch (MutationsRejectedException e) {
      log.error("Could not close BatchWriter due to mutations being rejected.", e);
      System.exit(-1);
    }

    // stop timer
    stopTimer(count, totalBytes);
  }
  @Test
  public void waitsUntilEntriesAreReplicated() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");
    Text tableId = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(),
        file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.addMutation(m);

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();
    bw.close();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // After both metadata and replication
    Assert.assertTrue(done.get());
    Assert.assertFalse(exception.get());
  }
  @Test
  public void laterCreatedLogsDontBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    System.out.println("Reading metadata first time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // We need to wait long enough for the table to read once
    Thread.sleep(2000);

    // Write another file, but also delete the old files
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m =
        new Mutation(
            ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId1);
    bw.addMutation(m);
    bw.close();

    System.out.println("Reading metadata second time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId1);
    bw.addMutation(m);
    bw.close();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperatiotns.drain did not complete");
    }

    // We should pass immediately because we aren't waiting on both files to be deleted (just the
    // one that we did)
    Assert.assertTrue(done.get());
  }
  @Test
  public void inprogressReplicationRecordsBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    LogEntry logEntry = new LogEntry();
    logEntry.extent = new KeyExtent(new Text(tableId1), null, null);
    logEntry.server = "tserver";
    logEntry.filename = file1;
    logEntry.tabletId = 1;
    logEntry.logSet = Arrays.asList(file1);
    logEntry.timestamp = System.currentTimeMillis();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    Status newStatus =
        Status.newBuilder()
            .setBegin(1000)
            .setEnd(2000)
            .setInfiniteEnd(false)
            .setClosed(true)
            .build();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, tableId1, ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // New records, but not fully replicated ones don't cause it to complete
    Assert.assertFalse(done.get());
    Assert.assertFalse(exception.get());
  }
  @Test(timeout = 40 * 1000)
  public void testFilesAreGarbageCollected() throws Exception {
    ZooKeeperInstance inst =
        new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
    Connector c = inst.getConnector("root", new PasswordToken(passwd));

    final String table = "foobar";
    c.tableOperations().create(table);

    final String tableId = c.tableOperations().tableIdMap().get(table);

    BatchWriter bw = null;

    // Add some data
    try {
      bw =
          c.createBatchWriter(
              table,
              new BatchWriterConfig()
                  .setMaxMemory(100000l)
                  .setMaxLatency(100, TimeUnit.MILLISECONDS)
                  .setMaxWriteThreads(1));
      Mutation m = new Mutation("a");
      for (int i = 0; i < 500; i++) {
        m.put("colf", Integer.toString(i), "");
      }

      bw.addMutation(m);
    } finally {
      if (null != bw) {
        bw.close();
      }
    }

    File accumuloDir = new File(testDir, "accumulo");
    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
    File myTable = new File(tables, tableId);

    log.trace(
        "Files before compaction: "
            + FileUtils.listFiles(myTable, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE));

    final boolean flush = true, wait = true;

    // Compact the tables to get some rfiles which we can gc
    c.tableOperations().compact(table, null, null, flush, wait);

    Collection<File> filesAfterCompaction =
        FileUtils.listFiles(myTable, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE);
    int fileCountAfterCompaction = filesAfterCompaction.size();

    log.trace("Files after compaction: " + filesAfterCompaction);

    // Sleep for 10s to let the GC do its thing
    for (int i = 1; i < 10; i++) {
      Thread.sleep(1000);
      filesAfterCompaction =
          FileUtils.listFiles(myTable, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE);

      log.trace("Files in loop: " + filesAfterCompaction);

      int fileCountAfterGCWait = filesAfterCompaction.size();

      if (fileCountAfterGCWait < fileCountAfterCompaction) {
        return;
      }
    }

    Assert.fail("Expected to find less files after compaction and pause for GC");
  }
Exemple #29
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    boolean userExists = SecurityHelper.getTabUserExists(state);
    Connector conn;
    try {
      conn =
          state
              .getInstance()
              .getConnector(
                  SecurityHelper.getTabUserName(state), SecurityHelper.getTabUserPass(state));
    } catch (AccumuloSecurityException ae) {
      if (ae.getErrorCode().equals(SecurityErrorCode.BAD_CREDENTIALS)) {
        if (userExists)
          throw new AccumuloException(
              "User didn't exist when they should (or worse- password mismatch)", ae);
        else return;
      }
      throw new AccumuloException("Unexpected exception!", ae);
    }
    String action = props.getProperty("action", "_random");
    TablePermission tp;
    if ("_random".equalsIgnoreCase(action)) {
      Random r = new Random();
      tp = TablePermission.values()[r.nextInt(TablePermission.values().length)];
    } else {
      tp = TablePermission.valueOf(action);
    }

    boolean tableExists = SecurityHelper.getTableExists(state);
    boolean hasPerm = SecurityHelper.getTabPerm(state, SecurityHelper.getTabUserName(state), tp);

    String tableName = state.getString("secTableName");

    switch (tp) {
      case READ:
        Authorizations auths =
            SecurityHelper.getUserAuths(state, SecurityHelper.getTabUserName(state));
        boolean canRead =
            SecurityHelper.getTabPerm(
                state, SecurityHelper.getTabUserName(state), TablePermission.READ);
        try {
          Scanner scan =
              conn.createScanner(
                  tableName,
                  conn.securityOperations()
                      .getUserAuthorizations(SecurityHelper.getTabUserName(state)));
          int seen = 0;
          Iterator<Entry<Key, Value>> iter = scan.iterator();
          while (iter.hasNext()) {
            Entry<Key, Value> entry = iter.next();
            Key k = entry.getKey();
            seen++;
            if (!auths.contains(k.getColumnVisibilityData()))
              throw new AccumuloException(
                  "Got data I should not be capable of seeing: " + k + " table " + tableName);
          }
          if (!canRead)
            throw new AccumuloException(
                "Was able to read when I shouldn't have had the perm with connection user "
                    + conn.whoami()
                    + " table "
                    + tableName);
          for (Entry<String, Integer> entry : SecurityHelper.getAuthsMap(state).entrySet()) {
            if (auths.contains(entry.getKey().getBytes())) seen = seen - entry.getValue();
          }
          if (seen != 0) throw new AccumuloException("Got mismatched amounts of data");
        } catch (TableNotFoundException tnfe) {
          if (tableExists)
            throw new AccumuloException(
                "Accumulo and test suite out of sync: table " + tableName, tnfe);
          return;
        } catch (AccumuloSecurityException ae) {
          if (ae.getErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) {
            if (canRead)
              throw new AccumuloException(
                  "Table read permission out of sync with Accumulo: table " + tableName, ae);
            else return;
          }
          throw new AccumuloException("Unexpected exception!", ae);
        } catch (RuntimeException re) {
          if (re.getCause() instanceof AccumuloSecurityException
              && ((AccumuloSecurityException) re.getCause())
                  .getErrorCode()
                  .equals(SecurityErrorCode.PERMISSION_DENIED)) {
            if (canRead)
              throw new AccumuloException(
                  "Table read permission out of sync with Accumulo: table " + tableName,
                  re.getCause());
            else return;
          }
          throw new AccumuloException("Unexpected exception!", re);
        }

        break;
      case WRITE:
        String key = SecurityHelper.getLastKey(state) + "1";
        Mutation m = new Mutation(new Text(key));
        for (String s : SecurityHelper.getAuthsArray()) {
          m.put(new Text(), new Text(), new ColumnVisibility(s), new Value("value".getBytes()));
        }
        BatchWriter writer;
        try {
          writer = conn.createBatchWriter(tableName, 9000l, 0l, 1);
        } catch (TableNotFoundException tnfe) {
          if (tableExists)
            throw new AccumuloException("Table didn't exist when it should have: " + tableName);
          return;
        }
        boolean works = true;
        try {
          writer.addMutation(m);
        } catch (MutationsRejectedException mre) {
          throw new AccumuloException("Mutation exception!", mre);
        }
        if (works)
          for (String s : SecurityHelper.getAuthsArray())
            SecurityHelper.increaseAuthMap(state, s, 1);
        break;
      case BULK_IMPORT:
        key = SecurityHelper.getLastKey(state) + "1";
        SortedSet<Key> keys = new TreeSet<Key>();
        for (String s : SecurityHelper.getAuthsArray()) {
          Key k = new Key(key, "", "", s);
          keys.add(k);
        }
        Path dir = new Path("/tmp", "bulk_" + UUID.randomUUID().toString());
        Path fail = new Path(dir.toString() + "_fail");
        FileSystem fs = SecurityHelper.getFs(state);
        FileSKVWriter f =
            FileOperations.getInstance()
                .openWriter(
                    dir + "/securityBulk." + RFile.EXTENSION,
                    fs,
                    fs.getConf(),
                    AccumuloConfiguration.getDefaultConfiguration());
        f.startDefaultLocalityGroup();
        fs.mkdirs(fail);
        for (Key k : keys) f.append(k, new Value("Value".getBytes()));
        f.close();
        try {
          conn.tableOperations().importDirectory(tableName, dir.toString(), fail.toString(), true);
        } catch (TableNotFoundException tnfe) {
          if (tableExists)
            throw new AccumuloException("Table didn't exist when it should have: " + tableName);
          return;
        } catch (AccumuloSecurityException ae) {
          if (ae.getErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) {
            if (hasPerm)
              throw new AccumuloException(
                  "Bulk Import failed when it should have worked: " + tableName);
            return;
          }
          throw new AccumuloException("Unexpected exception!", ae);
        }
        for (String s : SecurityHelper.getAuthsArray()) SecurityHelper.increaseAuthMap(state, s, 1);
        fs.delete(dir, true);
        fs.delete(fail, true);

        if (!hasPerm)
          throw new AccumuloException(
              "Bulk Import succeeded when it should have failed: " + dir + " table " + tableName);
        break;
      case ALTER_TABLE:
        AlterTable.renameTable(conn, state, tableName, tableName + "plus", hasPerm, tableExists);
        break;

      case GRANT:
        props.setProperty("task", "grant");
        props.setProperty("perm", "random");
        props.setProperty("source", "table");
        props.setProperty("target", "system");
        AlterTablePerm.alter(state, props);
        break;

      case DROP_TABLE:
        props.setProperty("source", "table");
        DropTable.dropTable(state, props);
        break;
    }
  }
  /**
   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
   *
   * @throws AccumuloException
   * @throws AccumuloSecurityException
   * @throws TableNotFoundException
   */
  public static void main(String[] args)
      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {

    String seed = null;

    int index = 0;
    String processedArgs[] = new String[13];
    for (int i = 0; i < args.length; i++) {
      if (args[i].equals("-s")) {
        seed = args[++i];
      } else {
        processedArgs[index++] = args[i];
      }
    }

    if (index != 13) {
      System.out.println(
          "Usage : RandomBatchWriter [-s <seed>] <instance name> <zoo keepers> <username> <password> <table> <num> <min> <max> <value size> <max memory> <max latency> <num threads> <visibility>");
      return;
    }

    String instanceName = processedArgs[0];
    String zooKeepers = processedArgs[1];
    String user = processedArgs[2];
    byte[] pass = processedArgs[3].getBytes();
    String table = processedArgs[4];
    int num = Integer.parseInt(processedArgs[5]);
    long min = Long.parseLong(processedArgs[6]);
    long max = Long.parseLong(processedArgs[7]);
    int valueSize = Integer.parseInt(processedArgs[8]);
    long maxMemory = Long.parseLong(processedArgs[9]);
    long maxLatency =
        Long.parseLong(processedArgs[10]) == 0 ? Long.MAX_VALUE : Long.parseLong(processedArgs[10]);
    int numThreads = Integer.parseInt(processedArgs[11]);
    String visiblity = processedArgs[12];

    // Uncomment the following lines for detailed debugging info
    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
    // logger.setLevel(Level.TRACE);

    Random r;
    if (seed == null) r = new Random();
    else {
      r = new Random(Long.parseLong(seed));
    }

    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
    Connector connector = instance.getConnector(user, pass);
    BatchWriter bw = connector.createBatchWriter(table, maxMemory, maxLatency, numThreads);

    // reuse the ColumnVisibility object to improve performance
    ColumnVisibility cv = new ColumnVisibility(visiblity);

    for (int i = 0; i < num; i++) {

      long rowid = (Math.abs(r.nextLong()) % (max - min)) + min;

      Mutation m = createMutation(rowid, valueSize, cv);

      bw.addMutation(m);
    }

    try {
      bw.close();
    } catch (MutationsRejectedException e) {
      if (e.getAuthorizationFailures().size() > 0) {
        HashSet<String> tables = new HashSet<String>();
        for (KeyExtent ke : e.getAuthorizationFailures()) {
          tables.add(ke.getTableId().toString());
        }
        System.err.println("ERROR : Not authorized to write to tables : " + tables);
      }

      if (e.getConstraintViolationSummaries().size() > 0) {
        System.err.println(
            "ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
      }
    }
  }