Exemplo n.º 1
0
  public static void run(
      String instanceName, String zookeepers, AuthenticationToken rootPassword, String args[])
      throws Exception {
    // edit this method to play with Accumulo

    Instance instance = new ZooKeeperInstance(instanceName, zookeepers);

    Connector conn = instance.getConnector("root", rootPassword);

    conn.tableOperations().create("foo");

    BatchWriterConfig bwConfig = new BatchWriterConfig();
    bwConfig.setMaxLatency(60000l, java.util.concurrent.TimeUnit.MILLISECONDS);
    bwConfig.setMaxWriteThreads(3);
    bwConfig.setMaxMemory(50000000);
    BatchWriter bw = conn.createBatchWriter("foo", bwConfig);
    Mutation m = new Mutation("r1");
    m.put("cf1", "cq1", "v1");
    m.put("cf1", "cq2", "v3");
    bw.addMutation(m);
    bw.close();

    Scanner scanner = conn.createScanner("foo", Constants.NO_AUTHS);
    for (Entry<Key, Value> entry : scanner) {
      System.out.println(entry.getKey() + " " + entry.getValue());
    }
  }
Exemplo n.º 2
0
 @Test
 public void test() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
   Mutation m = new Mutation("row1");
   m.put("cf", "col1", "Test");
   bw.addMutation(m);
   bw.close();
   scanCheck(c, tableName, "Test");
   FileSystem fs = getCluster().getFileSystem();
   Path jarPath = new Path(rootPath + "/lib/ext/Test.jar");
   copyStreamToFileSystem(fs, "/TestCombinerX.jar", jarPath);
   sleepUninterruptibly(1, TimeUnit.SECONDS);
   IteratorSetting is =
       new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner");
   Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf")));
   c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan));
   sleepUninterruptibly(ZOOKEEPER_PROPAGATION_TIME, TimeUnit.MILLISECONDS);
   scanCheck(c, tableName, "TestX");
   fs.delete(jarPath, true);
   copyStreamToFileSystem(fs, "/TestCombinerY.jar", jarPath);
   sleepUninterruptibly(5, TimeUnit.SECONDS);
   scanCheck(c, tableName, "TestY");
   fs.delete(jarPath, true);
 }
Exemplo n.º 3
0
  private static long write(Connector conn, ArrayList<byte[]> cfset, String table)
      throws TableNotFoundException, MutationsRejectedException {
    Random rand = new Random();

    byte val[] = new byte[50];

    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());

    long t1 = System.currentTimeMillis();

    for (int i = 0; i < 1 << 15; i++) {
      byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]);

      Mutation m = new Mutation(row);
      for (byte[] cf : cfset) {
        byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]);
        rand.nextBytes(val);
        m.put(cf, cq, val);
      }

      bw.addMutation(m);
    }

    bw.close();

    long t2 = System.currentTimeMillis();

    return t2 - t1;
  }
Exemplo n.º 4
0
  @Test
  public void test() throws Exception {
    Connector c = getConnector();
    // make a table
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // write to it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();

    // create a fake _tmp file in its directory
    String id = c.tableOperations().tableIdMap().get(tableName);
    FileSystem fs = getCluster().getFileSystem();
    Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp");
    fs.create(tmp).close();
    for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
      getCluster().killProcess(ServerType.TABLET_SERVER, tserver);
    }
    getCluster().start();

    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
    FunctionalTestUtils.count(scanner);
    assertFalse(fs.exists(tmp));
  }
Exemplo n.º 5
0
  private void runMergeTest(
      Connector conn,
      String table,
      String[] splits,
      String[] expectedSplits,
      String[] inserts,
      String start,
      String end)
      throws Exception {
    System.out.println(
        "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);

    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
    TreeSet<Text> splitSet = new TreeSet<Text>();
    for (String split : splits) {
      splitSet.add(new Text(split));
    }
    conn.tableOperations().addSplits(table, splitSet);

    BatchWriter bw = conn.createBatchWriter(table, null);
    HashSet<String> expected = new HashSet<String>();
    for (String row : inserts) {
      Mutation m = new Mutation(row);
      m.put("cf", "cq", row);
      bw.addMutation(m);
      expected.add(row);
    }

    bw.close();

    conn.tableOperations()
        .merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));

    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);

    HashSet<String> observed = new HashSet<String>();
    for (Entry<Key, Value> entry : scanner) {
      String row = entry.getKey().getRowData().toString();
      if (!observed.add(row)) {
        throw new Exception("Saw data twice " + table + " " + row);
      }
    }

    if (!observed.equals(expected)) {
      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
    }

    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
    HashSet<Text> ess = new HashSet<Text>();
    for (String es : expectedSplits) {
      ess.add(new Text(es));
    }

    if (!currentSplits.equals(ess)) {
      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
    }
  }
  @Before
  public void createMockKeyValues() throws Exception {
    // Make a MockInstance here, by setting the instance name to be the same as this mock instance
    // we can "trick" the InputFormat into using a MockInstance
    mockInstance = new MockInstance(test.getMethodName());
    inputformat = new HiveAccumuloTableInputFormat();
    conf = new JobConf();
    conf.set(AccumuloSerDeParameters.TABLE_NAME, TEST_TABLE);
    conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true");
    conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName());
    conf.set(AccumuloSerDeParameters.USER_NAME, USER);
    conf.set(AccumuloSerDeParameters.USER_PASS, PASS);
    conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but
    // required by input format.

    columnNames = Arrays.asList("name", "sid", "dgrs", "mills");
    columnTypes =
        Arrays.<TypeInfo>asList(
            TypeInfoFactory.stringTypeInfo,
            TypeInfoFactory.intTypeInfo,
            TypeInfoFactory.doubleTypeInfo,
            TypeInfoFactory.longTypeInfo);
    conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:name,cf:sid,cf:dgrs,cf:mills");
    conf.set(serdeConstants.LIST_COLUMNS, "name,sid,dgrs,mills");
    conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,double,bigint");

    con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes()));
    con.tableOperations().create(TEST_TABLE);
    con.securityOperations().changeUserAuthorizations(USER, new Authorizations("blah"));
    BatchWriterConfig writerConf = new BatchWriterConfig();
    BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf);

    Mutation m1 = new Mutation(new Text("r1"));
    m1.put(COLUMN_FAMILY, NAME, new Value("brian".getBytes()));
    m1.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("1")));
    m1.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("44.5")));
    m1.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("555")));

    Mutation m2 = new Mutation(new Text("r2"));
    m2.put(COLUMN_FAMILY, NAME, new Value("mark".getBytes()));
    m2.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("2")));
    m2.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("55.5")));
    m2.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("666")));

    Mutation m3 = new Mutation(new Text("r3"));
    m3.put(COLUMN_FAMILY, NAME, new Value("dennis".getBytes()));
    m3.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("3")));
    m3.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("65.5")));
    m3.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("777")));

    writer.addMutation(m1);
    writer.addMutation(m2);
    writer.addMutation(m3);

    writer.close();
  }
  private static AccumuloBackedGraph setupGraph(
      Instance instance, Connector conn, String tableName, int numEntries) {
    long ageOffTimeInMilliseconds = (30 * 24 * 60 * 60 * 1000L); // 30 days in milliseconds

    try {
      // Create table
      // (this method creates the table, removes the versioning iterator, and adds the
      // SetOfStatisticsCombiner iterator,
      // and sets the age off iterator to age data off after it is more than
      // ageOffTimeInMilliseconds milliseconds old).
      TableUtils.createTable(conn, tableName, ageOffTimeInMilliseconds);

      // Create numEntries edges and add to Accumulo
      BatchWriter writer = conn.createBatchWriter(tableName, 1000000L, 1000L, 1);
      for (int i = 0; i < numEntries; i++) {
        Edge edge =
            new Edge(
                "customer",
                "" + i,
                "product",
                "B",
                "purchase",
                "instore",
                true,
                visibilityString,
                sevenDaysBefore,
                sixDaysBefore);
        SetOfStatistics statistics = new SetOfStatistics();
        statistics.addStatistic("count", new Count(i));
        Key key = ConversionUtils.getKeysFromEdge(edge).getFirst();
        Value value = ConversionUtils.getValueFromSetOfStatistics(statistics);
        Mutation m = new Mutation(key.getRow());
        m.put(
            key.getColumnFamily(),
            key.getColumnQualifier(),
            new ColumnVisibility(key.getColumnVisibility()),
            key.getTimestamp(),
            value);
        writer.addMutation(m);
      }
      writer.close();

      // Create Accumulo backed graph
      AccumuloBackedGraph graph = new AccumuloBackedGraph(conn, tableName);
      return graph;
    } catch (AccumuloException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    } catch (AccumuloSecurityException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    } catch (TableExistsException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    } catch (TableNotFoundException e) {
      fail("Failed to set up graph in Accumulo with exception: " + e);
    }
    return null;
  }
Exemplo n.º 8
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    Connector conn = state.getConnector();

    Random rand = (Random) state.get("rand");

    @SuppressWarnings("unchecked")
    List<String> tableNames = (List<String>) state.get("tables");

    String tableName = tableNames.get(rand.nextInt(tableNames.size()));

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs = FileSystem.get(conf);

    String bulkDir = "/tmp/concurrent_bulk/b_" + String.format("%016x", Math.abs(rand.nextLong()));

    fs.mkdirs(new Path(bulkDir));
    fs.mkdirs(new Path(bulkDir + "_f"));

    try {
      BatchWriter bw = new RFileBatchWriter(conf, fs, bulkDir + "/file01.rf");
      try {
        TreeSet<Long> rows = new TreeSet<Long>();
        int numRows = rand.nextInt(100000);
        for (int i = 0; i < numRows; i++) {
          rows.add(Math.abs(rand.nextLong()));
        }

        for (Long row : rows) {
          Mutation m = new Mutation(String.format("%016x", row));
          long val = Math.abs(rand.nextLong());
          for (int j = 0; j < 10; j++) {
            m.put("cf", "cq" + j, new Value(String.format("%016x", val).getBytes()));
          }

          bw.addMutation(m);
        }
      } finally {
        bw.close();
      }

      conn.tableOperations()
          .importDirectory(tableName, bulkDir, bulkDir + "_f", rand.nextBoolean());

      log.debug("BulkImported to " + tableName);
    } catch (TableNotFoundException e) {
      log.debug("BulkImport " + tableName + " failed, doesnt exist");
    } catch (TableOfflineException toe) {
      log.debug("BulkImport " + tableName + " failed, offline");
    } finally {
      fs.delete(new Path(bulkDir), true);
      fs.delete(new Path(bulkDir + "_f"), true);
    }
  }
  @Test
  public void testGetProtectedField() throws Exception {
    FileInputFormat.addInputPath(conf, new Path("unused"));

    BatchWriterConfig writerConf = new BatchWriterConfig();
    BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf);

    Authorizations origAuths = con.securityOperations().getUserAuthorizations(USER);
    con.securityOperations()
        .changeUserAuthorizations(USER, new Authorizations(origAuths.toString() + ",foo"));

    Mutation m = new Mutation("r4");
    m.put(COLUMN_FAMILY, NAME, new ColumnVisibility("foo"), new Value("frank".getBytes()));
    m.put(COLUMN_FAMILY, SID, new ColumnVisibility("foo"), new Value(parseIntBytes("4")));
    m.put(COLUMN_FAMILY, DEGREES, new ColumnVisibility("foo"), new Value(parseDoubleBytes("60.6")));
    m.put(COLUMN_FAMILY, MILLIS, new ColumnVisibility("foo"), new Value(parseLongBytes("777")));

    writer.addMutation(m);
    writer.close();

    conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo");

    InputSplit[] splits = inputformat.getSplits(conf, 0);
    assertEquals(splits.length, 1);
    RecordReader<Text, AccumuloHiveRow> reader = inputformat.getRecordReader(splits[0], conf, null);
    Text rowId = new Text("r1");
    AccumuloHiveRow row = new AccumuloHiveRow();
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes());

    rowId = new Text("r2");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes());

    rowId = new Text("r3");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes());

    rowId = new Text("r4");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "frank".getBytes());

    assertFalse(reader.next(rowId, row));
  }
Exemplo n.º 10
0
  /** Write entries to a table. */
  public static void writeEntries(
      Connector connector, Map<Key, Value> map, String table, boolean createIfNotExist) {
    if (createIfNotExist && !connector.tableOperations().exists(table))
      try {
        connector.tableOperations().create(table);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.error("trouble creating " + table, e);
        throw new RuntimeException(e);
      } catch (TableExistsException e) {
        log.error("crazy", e);
        throw new RuntimeException(e);
      }

    BatchWriterConfig bwc = new BatchWriterConfig();
    BatchWriter bw;
    try {
      bw = connector.createBatchWriter(table, bwc);
    } catch (TableNotFoundException e) {
      log.error("tried to write to a non-existant table " + table, e);
      throw new RuntimeException(e);
    }

    try {
      for (Map.Entry<Key, Value> entry : map.entrySet()) {
        Key k = entry.getKey();
        ByteSequence rowData = k.getRowData(),
            cfData = k.getColumnFamilyData(),
            cqData = k.getColumnQualifierData();
        Mutation m = new Mutation(rowData.getBackingArray(), rowData.offset(), rowData.length());
        m.put(
            cfData.getBackingArray(),
            cqData.getBackingArray(),
            k.getColumnVisibilityParsed(),
            entry.getValue().get());
        bw.addMutation(m);
      }

    } catch (MutationsRejectedException e) {
      log.error("mutations rejected", e);
      throw new RuntimeException(e);
    } finally {
      try {
        bw.close();
      } catch (MutationsRejectedException e) {
        log.error("mutations rejected while trying to close BatchWriter", e);
      }
    }
  }
Exemplo n.º 11
0
  /**
   * Insert the contents of an NDSI data file into an Accumulo table.
   *
   * @param file Input file
   * @param Atable Table to insert into
   * @return Nummber of entries inserted into Atable; equal to 2x the number of rows.
   * @throws IOException
   */
  public long ingestFile(File file, String Atable, boolean deleteIfExists) throws IOException {
    if (deleteIfExists && connector.tableOperations().exists(Atable))
      try {
        connector.tableOperations().delete(Atable);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.warn("trouble deleting table " + Atable, e);
        throw new RuntimeException(e);
      } catch (TableNotFoundException e) {
        throw new RuntimeException(e);
      }
    if (!connector.tableOperations().exists(Atable))
      try {
        connector.tableOperations().create(Atable);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.warn("trouble creating table " + Atable, e);
        throw new RuntimeException(e);
      } catch (TableExistsException e) {
        throw new RuntimeException(e);
      }

    BatchWriter bw = null;
    String line = null;
    long entriesProcessed = 0;

    try (BufferedReader fo = new BufferedReader(new FileReader(file))) {
      BatchWriterConfig config = new BatchWriterConfig();
      bw = connector.createBatchWriter(Atable, config);

      // Skip header line
      fo.readLine();

      while ((line = fo.readLine()) != null)
        if (!line.isEmpty()) entriesProcessed += ingestLine(bw, line);

    } catch (TableNotFoundException e) {
      throw new RuntimeException(e);
    } catch (MutationsRejectedException e) {
      log.warn("Mutation rejected on line " + line, e);
    } finally {
      if (bw != null)
        try {
          bw.close();
        } catch (MutationsRejectedException e) {
          log.warn("Mutation rejected at close() on line " + line, e);
        }
    }
    return entriesProcessed;
  }
Exemplo n.º 12
0
  static void initializeClone(String srcTableId, String tableId, Connector conn, BatchWriter bw)
      throws TableNotFoundException, MutationsRejectedException {
    TabletIterator ti =
        new TabletIterator(
            createCloneScanner(srcTableId, conn),
            new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(),
            true,
            true);

    if (!ti.hasNext())
      throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId);

    while (ti.hasNext()) bw.addMutation(createCloneMutation(srcTableId, tableId, ti.next()));

    bw.flush();
  }
Exemplo n.º 13
0
 @Test
 public void merge() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
   BatchWriter bw = c.createBatchWriter(tableName, null);
   for (String row : "a b c d e f g h i j k".split(" ")) {
     Mutation m = new Mutation(row);
     m.put("cf", "cq", "value");
     bw.addMutation(m);
   }
   bw.close();
   c.tableOperations().flush(tableName, null, null, true);
   c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
   assertEquals(8, c.tableOperations().listSplits(tableName).size());
 }
Exemplo n.º 14
0
 private void flush() {
   try {
     writer.flush();
   } catch (MutationsRejectedException e) {
     log.error("Error flushing traces", e);
     resetWriter();
   }
 }
  @Test
  public void testMap() throws Exception {
    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
    c.tableOperations().create(TEST_TABLE_1);
    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
    for (int i = 0; i < 100; i++) {
      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
      bw.addMutation(m);
    }
    bw.close();

    MRTester.main(new String[] {"root", "", TEST_TABLE_1});
    assertNull(e1);
    assertNull(e2);
  }
Exemplo n.º 16
0
 public static void removeBulkLoadEntries(Connector conn, String tableId, long tid)
     throws Exception {
   Scanner mscanner =
       new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
   mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
   mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
   BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
   for (Entry<Key, Value> entry : mscanner) {
     log.debug("Looking at entry " + entry + " with tid " + tid);
     if (Long.parseLong(entry.getValue().toString()) == tid) {
       log.debug("deleting entry " + entry);
       Mutation m = new Mutation(entry.getKey().getRow());
       m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
       bw.addMutation(m);
     }
   }
   bw.close();
 }
Exemplo n.º 17
0
 @Test
 public void aggregationTest() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
   SummingCombiner.setEncodingType(setting, Type.STRING);
   SummingCombiner.setColumns(
       setting, Collections.singletonList(new IteratorSetting.Column("cf")));
   c.tableOperations().attachIterator(tableName, setting);
   BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
   for (int i = 0; i < 10; i++) {
     Mutation m = new Mutation("row1");
     m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
     bw.addMutation(m);
   }
   bw.close();
   checkSum(tableName, c);
 }
Exemplo n.º 18
0
  static void runTest(Connector c, MiniAccumuloClusterImpl cluster)
      throws AccumuloException, AccumuloSecurityException, TableExistsException,
          TableNotFoundException, MutationsRejectedException, IOException, InterruptedException,
          NoSuchAlgorithmException {
    c.tableOperations().create(tablename);
    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
    for (int i = 0; i < 10; i++) {
      Mutation m = new Mutation("" + i);
      m.put(input_cf, input_cq, "row" + i);
      bw.addMutation(m);
    }
    bw.close();
    Process hash =
        cluster.exec(
            RowHash.class,
            Collections.singletonList(hadoopTmpDirArg),
            "-i",
            c.getInstance().getInstanceName(),
            "-z",
            c.getInstance().getZooKeepers(),
            "-u",
            "root",
            "-p",
            ROOT_PASSWORD,
            "-t",
            tablename,
            "--column",
            input_cfcq);
    assertEquals(0, hash.waitFor());

    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
    s.fetchColumn(new Text(input_cf), new Text(output_cq));
    int i = 0;
    for (Entry<Key, Value> entry : s) {
      MessageDigest md = MessageDigest.getInstance("MD5");
      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
      assertEquals(entry.getValue().toString(), new String(check));
      i++;
    }
  }
Exemplo n.º 19
0
 private synchronized void resetWriter() {
   try {
     if (writer != null) writer.close();
   } catch (Exception ex) {
     log.error("Error closing batch writer", ex);
   } finally {
     writer = null;
     try {
       writer = connector.createBatchWriter(table, new BatchWriterConfig());
     } catch (Exception ex) {
       log.error("Unable to create a batch writer: " + ex);
     }
   }
 }
Exemplo n.º 20
0
 @Test
 public void mergeSize() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   c.tableOperations()
       .addSplits(
           tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
   BatchWriter bw = c.createBatchWriter(tableName, null);
   for (String row : "c e f y".split(" ")) {
     Mutation m = new Mutation(row);
     m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
     bw.addMutation(m);
   }
   bw.close();
   c.tableOperations().flush(tableName, null, null, true);
   Merge merge = new Merge();
   merge.mergomatic(c, tableName, null, null, 100, false);
   assertArrayEquals(
       "b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
   merge.mergomatic(c, tableName, null, null, 100, true);
   assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
 }
Exemplo n.º 21
0
  private static long scrambleDeleteHalfAndCheck(
      ClientOnDefaultTable opts,
      ScannerOpts scanOpts,
      BatchWriterOpts bwOpts,
      String tableName,
      Set<RowColumn> rows)
      throws Exception {
    int result = 0;
    ArrayList<RowColumn> entries = new ArrayList<RowColumn>(rows);
    java.util.Collections.shuffle(entries);

    Connector connector = opts.getConnector();
    BatchWriter mutations = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());

    for (int i = 0; i < (entries.size() + 1) / 2; i++) {
      RowColumn rc = entries.get(i);
      Mutation m = new Mutation(rc.row);
      m.putDelete(
          new Text(rc.column.columnFamily),
          new Text(rc.column.columnQualifier),
          new ColumnVisibility(rc.column.getColumnVisibility()),
          rc.timestamp + 1);
      mutations.addMutation(m);
      rows.remove(rc);
      result++;
    }

    mutations.close();

    Set<RowColumn> current = scanAll(opts, scanOpts, tableName);
    current.removeAll(rows);
    if (current.size() > 0) {
      throw new RuntimeException(current.size() + " records not deleted");
    }
    return result;
  }
Exemplo n.º 22
0
  private void createEntries(Opts opts)
      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {

    // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
    // the write operation as it is occurs asynchronously. You can optionally create additional
    // Spans
    // within a given Trace as seen below around the flush
    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);

    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
    BatchWriter batchWriter =
        opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());

    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");

    batchWriter.addMutation(m);
    // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
    scope.getSpan().addTimelineAnnotation("Initiating Flush");
    batchWriter.flush();

    batchWriter.close();
    scope.close();
  }
Exemplo n.º 23
0
  @Test(timeout = 60 * 1000)
  public void run() throws Exception {
    Connector c = getConnector();
    c.tableOperations().create("rdel1");
    Map<String, Set<Text>> groups = new HashMap<String, Set<Text>>();
    groups.put("lg1", Collections.singleton(new Text("foo")));
    groups.put("dg", Collections.<Text>emptySet());
    c.tableOperations().setLocalityGroups("rdel1", groups);
    IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
    c.tableOperations().attachIterator("rdel1", setting, EnumSet.of(IteratorScope.majc));
    c.tableOperations().setProperty("rdel1", Property.TABLE_MAJC_RATIO.getKey(), "100");

    BatchWriter bw = c.createBatchWriter("rdel1", new BatchWriterConfig());

    bw.addMutation(nm("r1", "foo", "cf1", "v1"));
    bw.addMutation(nm("r1", "bar", "cf1", "v2"));

    bw.flush();
    c.tableOperations().flush("rdel1", null, null, true);

    checkRFiles(c, "rdel1", 1, 1, 1, 1);

    int count = 0;
    Scanner scanner = c.createScanner("rdel1", Authorizations.EMPTY);
    for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) {
      count++;
    }
    if (count != 2) throw new Exception("1 count=" + count);

    bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));

    bw.flush();
    c.tableOperations().flush("rdel1", null, null, true);

    checkRFiles(c, "rdel1", 1, 1, 2, 2);

    count = 0;
    scanner = c.createScanner("rdel1", Authorizations.EMPTY);
    for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) {
      count++;
    }
    if (count != 3) throw new Exception("2 count=" + count);

    c.tableOperations().compact("rdel1", null, null, false, true);

    checkRFiles(c, "rdel1", 1, 1, 0, 0);

    count = 0;
    scanner = c.createScanner("rdel1", Authorizations.EMPTY);
    for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) {
      count++;
    }
    if (count != 0) throw new Exception("3 count=" + count);

    bw.close();
  }
  public static void main(String[] args)
      throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
          TableExistsException {
    System.out.println("START");

    String instanceName = "development";
    String zooKeepers = "localhost";
    String user = "******";
    byte[] pass = "******".getBytes();
    String tableName = "users";

    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
    Connector connector = instance.getConnector(user, pass);
    MultiTableBatchWriter writer = connector.createMultiTableBatchWriter(200000l, 300, 4);

    if (!connector.tableOperations().exists(tableName)) {
      connector.tableOperations().create(tableName);
    }

    BatchWriter bw = writer.getBatchWriter(tableName);

    try {
      String userId = "medined";
      int age = 48;
      int height = 70;
      Mutation m = new Mutation(new Text(userId));
      m.put(new Text("age"), new Text(""), new Value(new Integer(age).toString().getBytes()));
      m.put(new Text("height"), new Text(""), new Value(new Integer(height).toString().getBytes()));
      bw.addMutation(m);
    } finally {
      if (writer != null) {
        writer.close();
      }
    }
    System.out.println("END");
  }
Exemplo n.º 25
0
  private int ingestLine(BatchWriter bw, String line) throws MutationsRejectedException {
    String[] parts = line.split(",");
    String longitude = StringUtils.leftPad(parts[0], PADSIZE_LATLON, '0');
    String latitude = StringUtils.leftPad(parts[1], PADSIZE_LATLON, '0');
    String ndsi = parts[2];
    // String ndsi_count = parts[3];
    String land_sea_mask = parts[4];

    Text longText = new Text(longitude);
    Text latText = new Text(latitude);

    Mutation m = new Mutation(longText);
    m.put(COLF_NDSI, latText, new Value(ndsi.getBytes(StandardCharsets.UTF_8)));
    m.put(COLF_LSM, latText, new Value(land_sea_mask.getBytes(StandardCharsets.UTF_8)));
    bw.addMutation(m);

    return 2;
  }
Exemplo n.º 26
0
  @Test
  public void run() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);

    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());

    for (int i = 0; i < 1000; i++) {
      Mutation m = new Mutation(new Text(String.format("%08d", i)));
      for (int j = 0; j < 3; j++)
        m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));

      bw.addMutation(m);
    }

    bw.close();

    Scanner scanner = c.createScanner(tableName, new Authorizations());
    scanner.setReadaheadThreshold(20000);
    scanner.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000)));

    // test by making a slow iterator and then a couple of fast ones.
    // when then checking we shouldn't have any running except the slow iterator
    IteratorSetting setting = new IteratorSetting(21, SlowIterator.class);
    SlowIterator.setSeekSleepTime(setting, Long.MAX_VALUE);
    SlowIterator.setSleepTime(setting, Long.MAX_VALUE);
    scanner.addScanIterator(setting);

    final Iterator<Entry<Key, Value>> slow = scanner.iterator();

    final List<Future<Boolean>> callables = new ArrayList<>();
    final CountDownLatch latch = new CountDownLatch(10);
    for (int i = 0; i < 10; i++) {
      Future<Boolean> callable =
          service.submit(
              new Callable<Boolean>() {
                public Boolean call() {
                  latch.countDown();
                  while (slow.hasNext()) {

                    slow.next();
                  }
                  return slow.hasNext();
                }
              });
      callables.add(callable);
    }

    latch.await();

    log.info("Starting SessionBlockVerifyIT");

    // let's add more for good measure.
    for (int i = 0; i < 2; i++) {
      Scanner scanner2 = c.createScanner(tableName, new Authorizations());

      scanner2.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000)));

      scanner2.setBatchSize(1);
      Iterator<Entry<Key, Value>> iter = scanner2.iterator();
      // call super's verify mechanism
      verify(iter, 0, 1000);
    }

    int sessionsFound = 0;
    // we have configured 1 tserver, so we can grab the one and only
    String tserver = Iterables.getOnlyElement(c.instanceOperations().getTabletServers());

    final List<ActiveScan> scans = c.instanceOperations().getActiveScans(tserver);

    for (ActiveScan scan : scans) {
      // only here to minimize chance of seeing meta extent scans

      if (tableName.equals(scan.getTable()) && scan.getSsiList().size() > 0) {
        assertEquals("Not the expected iterator", 1, scan.getSsiList().size());
        assertTrue(
            "Not the expected iterator",
            scan.getSsiList().iterator().next().contains("SlowIterator"));
        sessionsFound++;
      }
    }

    /**
     * The message below indicates the problem that we experience within ACCUMULO-3509. The issue
     * manifests as a blockage in the Scanner synchronization that prevent us from making the close
     * call against it. Since the close blocks until a read is finished, we ultimately have a block
     * within the sweep of SessionManager. As a result never reap subsequent idle sessions AND we
     * will orphan the sessionsToCleanup in the sweep, leading to an inaccurate count within
     * sessionsFound.
     */
    assertEquals(
        "Must have ten sessions. Failure indicates a synchronization block within the sweep mechanism",
        10,
        sessionsFound);
    for (Future<Boolean> callable : callables) {
      callable.cancel(true);
    }
    service.shutdown();
  }
  @Test
  public void waitsUntilEntriesAreReplicated() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");
    Text tableId = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(),
        file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.addMutation(m);

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();
    bw.close();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // After both metadata and replication
    Assert.assertTrue(done.get());
    Assert.assertFalse(exception.get());
  }
  @Test
  public void laterCreatedLogsDontBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    System.out.println("Reading metadata first time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // We need to wait long enough for the table to read once
    Thread.sleep(2000);

    // Write another file, but also delete the old files
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m =
        new Mutation(
            ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId1);
    bw.addMutation(m);
    bw.close();

    System.out.println("Reading metadata second time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId1);
    bw.addMutation(m);
    bw.close();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperatiotns.drain did not complete");
    }

    // We should pass immediately because we aren't waiting on both files to be deleted (just the
    // one that we did)
    Assert.assertTrue(done.get());
  }
  @Test
  public void inprogressReplicationRecordsBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    LogEntry logEntry = new LogEntry();
    logEntry.extent = new KeyExtent(new Text(tableId1), null, null);
    logEntry.server = "tserver";
    logEntry.filename = file1;
    logEntry.tabletId = 1;
    logEntry.logSet = Arrays.asList(file1);
    logEntry.timestamp = System.currentTimeMillis();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    Status newStatus =
        Status.newBuilder()
            .setBegin(1000)
            .setEnd(2000)
            .setInfiniteEnd(false)
            .setClosed(true)
            .build();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, tableId1, ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // New records, but not fully replicated ones don't cause it to complete
    Assert.assertFalse(done.get());
    Assert.assertFalse(exception.get());
  }
  @Test
  public void testOptimizeQ6() throws Exception {

    RdfEvalStatsDAO<RdfCloudTripleStoreConfiguration> res =
        new ProspectorServiceEvalStatsDAO(conn, arc);
    AccumuloSelectivityEvalDAO accc = new AccumuloSelectivityEvalDAO();
    accc.setConf(arc);
    accc.setConnector(conn);
    accc.setRdfEvalDAO(res);
    accc.init();

    BatchWriter bw1 = conn.createBatchWriter("rya_prospects", config);
    BatchWriter bw2 = conn.createBatchWriter("rya_selectivity", config);

    String s1 =
        "predicateobject"
            + DELIM
            + "http://www.w3.org/2000/01/rdf-schema#label"
            + DELIM
            + "uri:dog";
    String s2 = "predicateobject" + DELIM + "uri:barksAt" + DELIM + "uri:cat";
    String s3 = "predicateobject" + DELIM + "uri:peesOn" + DELIM + "uri:hydrant";
    String s5 = "predicateobject" + DELIM + "uri:watches" + DELIM + "uri:television";
    String s4 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:chickens";
    String s6 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:kibble";
    String s7 = "predicateobject" + DELIM + "uri:rollsIn" + DELIM + "uri:mud";
    String s8 = "predicateobject" + DELIM + "uri:runsIn" + DELIM + "uri:field";
    String s9 = "predicateobject" + DELIM + "uri:smells" + DELIM + "uri:butt";
    String s10 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:sticks";

    List<Mutation> mList = new ArrayList<Mutation>();
    List<Mutation> mList2 = new ArrayList<Mutation>();
    List<String> sList =
        Arrays.asList(
            "subjectobject",
            "subjectpredicate",
            "subjectsubject",
            "predicateobject",
            "predicatepredicate",
            "predicatesubject");
    Mutation m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11;

    m1 = new Mutation(s1 + DELIM + "3");
    m1.put(new Text("count"), new Text(""), new Value("5".getBytes()));
    m2 = new Mutation(s2 + DELIM + "2");
    m2.put(new Text("count"), new Text(""), new Value("3".getBytes()));
    m3 = new Mutation(s3 + DELIM + "1");
    m3.put(new Text("count"), new Text(""), new Value("2".getBytes()));
    m4 = new Mutation(s4 + DELIM + "1");
    m4.put(new Text("count"), new Text(""), new Value("0".getBytes()));
    m5 = new Mutation(s5 + DELIM + "1");
    m5.put(new Text("count"), new Text(""), new Value("1".getBytes()));
    m6 = new Mutation(s6 + DELIM + "1");
    m6.put(new Text("count"), new Text(""), new Value("3".getBytes()));
    m7 = new Mutation(s7 + DELIM + "1");
    m7.put(new Text("count"), new Text(""), new Value("2".getBytes()));
    m8 = new Mutation(s8 + DELIM + "1");
    m8.put(new Text("count"), new Text(""), new Value("3".getBytes()));
    m9 = new Mutation(s9 + DELIM + "1");
    m9.put(new Text("count"), new Text(""), new Value("1".getBytes()));
    m10 = new Mutation(s10 + DELIM + "1");
    m10.put(new Text("count"), new Text(""), new Value("1".getBytes()));

    mList.add(m1);
    mList.add(m2);
    mList.add(m3);
    mList.add(m4);
    mList.add(m5);
    mList.add(m6);
    mList.add(m7);
    mList.add(m8);
    mList.add(m9);
    mList.add(m10);

    bw1.addMutations(mList);
    bw1.close();

    Scanner scan = conn.createScanner("rya_prospects", new Authorizations());
    scan.setRange(new Range());

    for (Map.Entry<Key, Value> entry : scan) {
      System.out.println("Key row string is " + entry.getKey().getRow().toString());
      System.out.println("Key is " + entry.getKey());
      System.out.println("Value is " + (new String(entry.getValue().get())));
    }

    m1 = new Mutation(s1);
    m2 = new Mutation(s2);
    m3 = new Mutation(s3);
    m4 = new Mutation(s4);
    m5 = new Mutation(s5);
    m6 = new Mutation(s6);
    m7 = new Mutation(s7);
    m8 = new Mutation(s8);
    m9 = new Mutation(s9);
    m10 = new Mutation(s10);
    m11 = new Mutation(new Text("subjectpredicateobject" + DELIM + "FullTableCardinality"));
    m11.put(new Text("FullTableCardinality"), new Text("100"), EMPTY_VAL);
    int i = 2;
    int j = 3;
    int k = 4;
    int l = 5;
    Long count1;
    Long count2;
    Long count3;
    Long count4;

    for (String s : sList) {
      count1 = (long) i;
      count2 = (long) j;
      count3 = (long) k;
      count4 = (long) l;
      m1.put(new Text(s), new Text(count4.toString()), EMPTY_VAL);
      m2.put(new Text(s), new Text(count2.toString()), EMPTY_VAL);
      m3.put(new Text(s), new Text(count1.toString()), EMPTY_VAL);
      m4.put(new Text(s), new Text(count3.toString()), EMPTY_VAL);
      m5.put(new Text(s), new Text(count1.toString()), EMPTY_VAL);
      m6.put(new Text(s), new Text(count2.toString()), EMPTY_VAL);
      m7.put(new Text(s), new Text(count1.toString()), EMPTY_VAL);
      m8.put(new Text(s), new Text(count4.toString()), EMPTY_VAL);
      m9.put(new Text(s), new Text(count3.toString()), EMPTY_VAL);
      m10.put(new Text(s), new Text(count1.toString()), EMPTY_VAL);

      i = 2 * i;
      j = 2 * j;
      k = 2 * k;
      l = 2 * l;
    }
    mList2.add(m1);
    mList2.add(m2);
    mList2.add(m3);
    mList2.add(m5);
    mList2.add(m4);
    mList2.add(m6);
    mList2.add(m7);
    mList2.add(m8);
    mList2.add(m9);
    mList2.add(m10);
    mList2.add(m11);
    bw2.addMutations(mList2);
    bw2.close();

    scan = conn.createScanner("rya_selectivity", new Authorizations());
    scan.setRange(new Range());

    for (Map.Entry<Key, Value> entry : scan) {
      System.out.println("Key row string is " + entry.getKey().getRow().toString());
      System.out.println("Key is " + entry.getKey());
      System.out.println(
          "Value is " + (new String(entry.getKey().getColumnQualifier().toString())));
    }

    TupleExpr te = getTupleExpr(q6);
    TupleExpr te2 = (TupleExpr) te.clone();
    System.out.println("Bindings are " + te.getBindingNames());
    RdfCloudTripleStoreSelectivityEvaluationStatistics ars =
        new RdfCloudTripleStoreSelectivityEvaluationStatistics(arc, res, accc);
    QueryJoinSelectOptimizer qjs = new QueryJoinSelectOptimizer(ars, accc);
    System.out.println("Originial query is " + te);
    qjs.optimize(te, null, null);

    FilterOptimizer fo = new FilterOptimizer();
    fo.optimize(te2, null, null);
    System.out.print("filter optimized query before js opt is " + te2);
    qjs.optimize(te2, null, null);

    System.out.println("join selectivity opt query before filter opt is " + te);
    fo.optimize(te, null, null);

    System.out.println("join selectivity opt query is " + te);
    System.out.print("filter optimized query is " + te2);
  }