@Test public void test() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("row1"); m.put("cf", "col1", "Test"); bw.addMutation(m); bw.close(); scanCheck(c, tableName, "Test"); FileSystem fs = getCluster().getFileSystem(); Path jarPath = new Path(rootPath + "/lib/ext/Test.jar"); copyStreamToFileSystem(fs, "/TestCombinerX.jar", jarPath); sleepUninterruptibly(1, TimeUnit.SECONDS); IteratorSetting is = new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner"); Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf"))); c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan)); sleepUninterruptibly(ZOOKEEPER_PROPAGATION_TIME, TimeUnit.MILLISECONDS); scanCheck(c, tableName, "TestX"); fs.delete(jarPath, true); copyStreamToFileSystem(fs, "/TestCombinerY.jar", jarPath); sleepUninterruptibly(5, TimeUnit.SECONDS); scanCheck(c, tableName, "TestY"); fs.delete(jarPath, true); }
public static void run( String instanceName, String zookeepers, AuthenticationToken rootPassword, String args[]) throws Exception { // edit this method to play with Accumulo Instance instance = new ZooKeeperInstance(instanceName, zookeepers); Connector conn = instance.getConnector("root", rootPassword); conn.tableOperations().create("foo"); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(60000l, java.util.concurrent.TimeUnit.MILLISECONDS); bwConfig.setMaxWriteThreads(3); bwConfig.setMaxMemory(50000000); BatchWriter bw = conn.createBatchWriter("foo", bwConfig); Mutation m = new Mutation("r1"); m.put("cf1", "cq1", "v1"); m.put("cf1", "cq2", "v3"); bw.addMutation(m); bw.close(); Scanner scanner = conn.createScanner("foo", Constants.NO_AUTHS); for (Entry<Key, Value> entry : scanner) { System.out.println(entry.getKey() + " " + entry.getValue()); } }
private static long write(Connector conn, ArrayList<byte[]> cfset, String table) throws TableNotFoundException, MutationsRejectedException { Random rand = new Random(); byte val[] = new byte[50]; BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig()); long t1 = System.currentTimeMillis(); for (int i = 0; i < 1 << 15; i++) { byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]); Mutation m = new Mutation(row); for (byte[] cf : cfset) { byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]); rand.nextBytes(val); m.put(cf, cq, val); } bw.addMutation(m); } bw.close(); long t2 = System.currentTimeMillis(); return t2 - t1; }
@Test public void test() throws Exception { Connector c = getConnector(); // make a table String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); // write to it BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("row"); m.put("cf", "cq", "value"); bw.addMutation(m); bw.close(); // create a fake _tmp file in its directory String id = c.tableOperations().tableIdMap().get(tableName); FileSystem fs = getCluster().getFileSystem(); Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp"); fs.create(tmp).close(); for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) { getCluster().killProcess(ServerType.TABLET_SERVER, tserver); } getCluster().start(); Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY); FunctionalTestUtils.count(scanner); assertFalse(fs.exists(tmp)); }
private void runMergeTest( Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception { System.out.println( "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end); conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL)); TreeSet<Text> splitSet = new TreeSet<Text>(); for (String split : splits) { splitSet.add(new Text(split)); } conn.tableOperations().addSplits(table, splitSet); BatchWriter bw = conn.createBatchWriter(table, null); HashSet<String> expected = new HashSet<String>(); for (String row : inserts) { Mutation m = new Mutation(row); m.put("cf", "cq", row); bw.addMutation(m); expected.add(row); } bw.close(); conn.tableOperations() .merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end)); Scanner scanner = conn.createScanner(table, Authorizations.EMPTY); HashSet<String> observed = new HashSet<String>(); for (Entry<Key, Value> entry : scanner) { String row = entry.getKey().getRowData().toString(); if (!observed.add(row)) { throw new Exception("Saw data twice " + table + " " + row); } } if (!observed.equals(expected)) { throw new Exception("data inconsistency " + table + " " + observed + " != " + expected); } HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table)); HashSet<Text> ess = new HashSet<Text>(); for (String es : expectedSplits) { ess.add(new Text(es)); } if (!currentSplits.equals(ess)) { throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess); } }
private static AccumuloBackedGraph setupGraph( Instance instance, Connector conn, String tableName, int numEntries) { long ageOffTimeInMilliseconds = (30 * 24 * 60 * 60 * 1000L); // 30 days in milliseconds try { // Create table // (this method creates the table, removes the versioning iterator, and adds the // SetOfStatisticsCombiner iterator, // and sets the age off iterator to age data off after it is more than // ageOffTimeInMilliseconds milliseconds old). TableUtils.createTable(conn, tableName, ageOffTimeInMilliseconds); // Create numEntries edges and add to Accumulo BatchWriter writer = conn.createBatchWriter(tableName, 1000000L, 1000L, 1); for (int i = 0; i < numEntries; i++) { Edge edge = new Edge( "customer", "" + i, "product", "B", "purchase", "instore", true, visibilityString, sevenDaysBefore, sixDaysBefore); SetOfStatistics statistics = new SetOfStatistics(); statistics.addStatistic("count", new Count(i)); Key key = ConversionUtils.getKeysFromEdge(edge).getFirst(); Value value = ConversionUtils.getValueFromSetOfStatistics(statistics); Mutation m = new Mutation(key.getRow()); m.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value); writer.addMutation(m); } writer.close(); // Create Accumulo backed graph AccumuloBackedGraph graph = new AccumuloBackedGraph(conn, tableName); return graph; } catch (AccumuloException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (AccumuloSecurityException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableExistsException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableNotFoundException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } return null; }
@Test(timeout = 60 * 1000) public void run() throws Exception { Connector c = getConnector(); c.tableOperations().create("rdel1"); Map<String, Set<Text>> groups = new HashMap<String, Set<Text>>(); groups.put("lg1", Collections.singleton(new Text("foo"))); groups.put("dg", Collections.<Text>emptySet()); c.tableOperations().setLocalityGroups("rdel1", groups); IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class); c.tableOperations().attachIterator("rdel1", setting, EnumSet.of(IteratorScope.majc)); c.tableOperations().setProperty("rdel1", Property.TABLE_MAJC_RATIO.getKey(), "100"); BatchWriter bw = c.createBatchWriter("rdel1", new BatchWriterConfig()); bw.addMutation(nm("r1", "foo", "cf1", "v1")); bw.addMutation(nm("r1", "bar", "cf1", "v2")); bw.flush(); c.tableOperations().flush("rdel1", null, null, true); checkRFiles(c, "rdel1", 1, 1, 1, 1); int count = 0; Scanner scanner = c.createScanner("rdel1", Authorizations.EMPTY); for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } if (count != 2) throw new Exception("1 count=" + count); bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE)); bw.flush(); c.tableOperations().flush("rdel1", null, null, true); checkRFiles(c, "rdel1", 1, 1, 2, 2); count = 0; scanner = c.createScanner("rdel1", Authorizations.EMPTY); for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } if (count != 3) throw new Exception("2 count=" + count); c.tableOperations().compact("rdel1", null, null, false, true); checkRFiles(c, "rdel1", 1, 1, 0, 0); count = 0; scanner = c.createScanner("rdel1", Authorizations.EMPTY); for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } if (count != 0) throw new Exception("3 count=" + count); bw.close(); }
@Before public void createMockKeyValues() throws Exception { // Make a MockInstance here, by setting the instance name to be the same as this mock instance // we can "trick" the InputFormat into using a MockInstance mockInstance = new MockInstance(test.getMethodName()); inputformat = new HiveAccumuloTableInputFormat(); conf = new JobConf(); conf.set(AccumuloSerDeParameters.TABLE_NAME, TEST_TABLE); conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true"); conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName()); conf.set(AccumuloSerDeParameters.USER_NAME, USER); conf.set(AccumuloSerDeParameters.USER_PASS, PASS); conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but // required by input format. columnNames = Arrays.asList("name", "sid", "dgrs", "mills"); columnTypes = Arrays.<TypeInfo>asList( TypeInfoFactory.stringTypeInfo, TypeInfoFactory.intTypeInfo, TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.longTypeInfo); conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:name,cf:sid,cf:dgrs,cf:mills"); conf.set(serdeConstants.LIST_COLUMNS, "name,sid,dgrs,mills"); conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,double,bigint"); con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); con.tableOperations().create(TEST_TABLE); con.securityOperations().changeUserAuthorizations(USER, new Authorizations("blah")); BatchWriterConfig writerConf = new BatchWriterConfig(); BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf); Mutation m1 = new Mutation(new Text("r1")); m1.put(COLUMN_FAMILY, NAME, new Value("brian".getBytes())); m1.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("1"))); m1.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("44.5"))); m1.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("555"))); Mutation m2 = new Mutation(new Text("r2")); m2.put(COLUMN_FAMILY, NAME, new Value("mark".getBytes())); m2.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("2"))); m2.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("55.5"))); m2.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("666"))); Mutation m3 = new Mutation(new Text("r3")); m3.put(COLUMN_FAMILY, NAME, new Value("dennis".getBytes())); m3.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("3"))); m3.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("65.5"))); m3.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("777"))); writer.addMutation(m1); writer.addMutation(m2); writer.addMutation(m3); writer.close(); }
@Test public void testGetProtectedField() throws Exception { FileInputFormat.addInputPath(conf, new Path("unused")); BatchWriterConfig writerConf = new BatchWriterConfig(); BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf); Authorizations origAuths = con.securityOperations().getUserAuthorizations(USER); con.securityOperations() .changeUserAuthorizations(USER, new Authorizations(origAuths.toString() + ",foo")); Mutation m = new Mutation("r4"); m.put(COLUMN_FAMILY, NAME, new ColumnVisibility("foo"), new Value("frank".getBytes())); m.put(COLUMN_FAMILY, SID, new ColumnVisibility("foo"), new Value(parseIntBytes("4"))); m.put(COLUMN_FAMILY, DEGREES, new ColumnVisibility("foo"), new Value(parseDoubleBytes("60.6"))); m.put(COLUMN_FAMILY, MILLIS, new ColumnVisibility("foo"), new Value(parseLongBytes("777"))); writer.addMutation(m); writer.close(); conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo"); InputSplit[] splits = inputformat.getSplits(conf, 0); assertEquals(splits.length, 1); RecordReader<Text, AccumuloHiveRow> reader = inputformat.getRecordReader(splits[0], conf, null); Text rowId = new Text("r1"); AccumuloHiveRow row = new AccumuloHiveRow(); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes()); rowId = new Text("r2"); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes()); rowId = new Text("r3"); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes()); rowId = new Text("r4"); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "frank".getBytes()); assertFalse(reader.next(rowId, row)); }
private synchronized void resetWriter() { try { if (writer != null) writer.close(); } catch (Exception ex) { log.error("Error closing batch writer", ex); } finally { writer = null; try { writer = connector.createBatchWriter(table, new BatchWriterConfig()); } catch (Exception ex) { log.error("Unable to create a batch writer: " + ex); } } }
/** * Insert the contents of an NDSI data file into an Accumulo table. * * @param file Input file * @param Atable Table to insert into * @return Nummber of entries inserted into Atable; equal to 2x the number of rows. * @throws IOException */ public long ingestFile(File file, String Atable, boolean deleteIfExists) throws IOException { if (deleteIfExists && connector.tableOperations().exists(Atable)) try { connector.tableOperations().delete(Atable); } catch (AccumuloException | AccumuloSecurityException e) { log.warn("trouble deleting table " + Atable, e); throw new RuntimeException(e); } catch (TableNotFoundException e) { throw new RuntimeException(e); } if (!connector.tableOperations().exists(Atable)) try { connector.tableOperations().create(Atable); } catch (AccumuloException | AccumuloSecurityException e) { log.warn("trouble creating table " + Atable, e); throw new RuntimeException(e); } catch (TableExistsException e) { throw new RuntimeException(e); } BatchWriter bw = null; String line = null; long entriesProcessed = 0; try (BufferedReader fo = new BufferedReader(new FileReader(file))) { BatchWriterConfig config = new BatchWriterConfig(); bw = connector.createBatchWriter(Atable, config); // Skip header line fo.readLine(); while ((line = fo.readLine()) != null) if (!line.isEmpty()) entriesProcessed += ingestLine(bw, line); } catch (TableNotFoundException e) { throw new RuntimeException(e); } catch (MutationsRejectedException e) { log.warn("Mutation rejected on line " + line, e); } finally { if (bw != null) try { bw.close(); } catch (MutationsRejectedException e) { log.warn("Mutation rejected at close() on line " + line, e); } } return entriesProcessed; }
/** Write entries to a table. */ public static void writeEntries( Connector connector, Map<Key, Value> map, String table, boolean createIfNotExist) { if (createIfNotExist && !connector.tableOperations().exists(table)) try { connector.tableOperations().create(table); } catch (AccumuloException | AccumuloSecurityException e) { log.error("trouble creating " + table, e); throw new RuntimeException(e); } catch (TableExistsException e) { log.error("crazy", e); throw new RuntimeException(e); } BatchWriterConfig bwc = new BatchWriterConfig(); BatchWriter bw; try { bw = connector.createBatchWriter(table, bwc); } catch (TableNotFoundException e) { log.error("tried to write to a non-existant table " + table, e); throw new RuntimeException(e); } try { for (Map.Entry<Key, Value> entry : map.entrySet()) { Key k = entry.getKey(); ByteSequence rowData = k.getRowData(), cfData = k.getColumnFamilyData(), cqData = k.getColumnQualifierData(); Mutation m = new Mutation(rowData.getBackingArray(), rowData.offset(), rowData.length()); m.put( cfData.getBackingArray(), cqData.getBackingArray(), k.getColumnVisibilityParsed(), entry.getValue().get()); bw.addMutation(m); } } catch (MutationsRejectedException e) { log.error("mutations rejected", e); throw new RuntimeException(e); } finally { try { bw.close(); } catch (MutationsRejectedException e) { log.error("mutations rejected while trying to close BatchWriter", e); } } }
@Test public void merge() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" "))); BatchWriter bw = c.createBatchWriter(tableName, null); for (String row : "a b c d e f g h i j k".split(" ")) { Mutation m = new Mutation(row); m.put("cf", "cq", "value"); bw.addMutation(m); } bw.close(); c.tableOperations().flush(tableName, null, null, true); c.tableOperations().merge(tableName, new Text("c1"), new Text("f1")); assertEquals(8, c.tableOperations().listSplits(tableName).size()); }
@Test public void testMap() throws Exception { MockInstance mockInstance = new MockInstance(INSTANCE_NAME); Connector c = mockInstance.getConnector("root", new PasswordToken("")); c.tableOperations().create(TEST_TABLE_1); BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig()); for (int i = 0; i < 100; i++) { Mutation m = new Mutation(new Text(String.format("%09x", i + 1))); m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes())); bw.addMutation(m); } bw.close(); MRTester.main(new String[] {"root", "", TEST_TABLE_1}); assertNull(e1); assertNull(e2); }
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception { Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)); mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange()); mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); for (Entry<Key, Value> entry : mscanner) { log.debug("Looking at entry " + entry + " with tid " + tid); if (Long.parseLong(entry.getValue().toString()) == tid) { log.debug("deleting entry " + entry); Mutation m = new Mutation(entry.getKey().getRow()); m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier()); bw.addMutation(m); } } bw.close(); }
@Test public void aggregationTest() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class); SummingCombiner.setEncodingType(setting, Type.STRING); SummingCombiner.setColumns( setting, Collections.singletonList(new IteratorSetting.Column("cf"))); c.tableOperations().attachIterator(tableName, setting); BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("row1"); m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes()); bw.addMutation(m); } bw.close(); checkSum(tableName, c); }
static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException { c.tableOperations().create(tablename); BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig()); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("" + i); m.put(input_cf, input_cq, "row" + i); bw.addMutation(m); } bw.close(); Process hash = cluster.exec( RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance().getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq); assertEquals(0, hash.waitFor()); Scanner s = c.createScanner(tablename, Authorizations.EMPTY); s.fetchColumn(new Text(input_cf), new Text(output_cq)); int i = 0; for (Entry<Key, Value> entry : s) { MessageDigest md = MessageDigest.getInstance("MD5"); byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes())); assertEquals(entry.getValue().toString(), new String(check)); i++; } }
@Test public void mergeSize() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); c.tableOperations() .addSplits( tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" "))); BatchWriter bw = c.createBatchWriter(tableName, null); for (String row : "c e f y".split(" ")) { Mutation m = new Mutation(row); m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives"); bw.addMutation(m); } bw.close(); c.tableOperations().flush(tableName, null, null, true); Merge merge = new Merge(); merge.mergomatic(c, tableName, null, null, 100, false); assertArrayEquals( "b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName))); merge.mergomatic(c, tableName, null, null, 100, true); assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName))); }
private static long scrambleDeleteHalfAndCheck( ClientOnDefaultTable opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts, String tableName, Set<RowColumn> rows) throws Exception { int result = 0; ArrayList<RowColumn> entries = new ArrayList<RowColumn>(rows); java.util.Collections.shuffle(entries); Connector connector = opts.getConnector(); BatchWriter mutations = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig()); for (int i = 0; i < (entries.size() + 1) / 2; i++) { RowColumn rc = entries.get(i); Mutation m = new Mutation(rc.row); m.putDelete( new Text(rc.column.columnFamily), new Text(rc.column.columnQualifier), new ColumnVisibility(rc.column.getColumnVisibility()), rc.timestamp + 1); mutations.addMutation(m); rows.remove(rc); result++; } mutations.close(); Set<RowColumn> current = scanAll(opts, scanOpts, tableName); current.removeAll(rows); if (current.size() > 0) { throw new RuntimeException(current.size() + " records not deleted"); } return result; }
public TraceServer(ServerConfiguration serverConfiguration, String hostname) throws Exception { this.serverConfiguration = serverConfiguration; AccumuloConfiguration conf = serverConfiguration.getConfiguration(); table = conf.get(Property.TRACE_TABLE); while (true) { try { String principal = conf.get(Property.TRACE_USER); AuthenticationToken at; Map<String, String> loginMap = conf.getAllPropertiesWithPrefix(Property.TRACE_TOKEN_PROPERTY_PREFIX); if (loginMap.isEmpty()) { Property p = Property.TRACE_PASSWORD; at = new PasswordToken(conf.get(p).getBytes()); } else { Properties props = new Properties(); AuthenticationToken token = AccumuloClassLoader.getClassLoader() .loadClass(conf.get(Property.TRACE_TOKEN_TYPE)) .asSubclass(AuthenticationToken.class) .newInstance(); int prefixLength = Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey().length() + 1; for (Entry<String, String> entry : loginMap.entrySet()) { props.put(entry.getKey().substring(prefixLength), entry.getValue()); } token.init(props); at = token; } connector = serverConfiguration.getInstance().getConnector(principal, at); if (!connector.tableOperations().exists(table)) { connector.tableOperations().create(table); IteratorSetting setting = new IteratorSetting(10, "ageoff", AgeOffFilter.class.getName()); AgeOffFilter.setTTL(setting, 7 * 24 * 60 * 60 * 1000l); connector.tableOperations().attachIterator(table, setting); } connector .tableOperations() .setProperty( table, Property.TABLE_FORMATTER_CLASS.getKey(), TraceFormatter.class.getName()); break; } catch (Exception ex) { log.info("Waiting to checking/create the trace table.", ex); UtilWaitThread.sleep(1000); } } int port = conf.getPort(Property.TRACE_PORT); final ServerSocket sock = ServerSocketChannel.open().socket(); sock.setReuseAddress(true); sock.bind(new InetSocketAddress(hostname, port)); final TServerTransport transport = new TServerSocket(sock); TThreadPoolServer.Args options = new TThreadPoolServer.Args(transport); options.processor(new Processor<Iface>(new Receiver())); server = new TThreadPoolServer(options); registerInZooKeeper(sock.getInetAddress().getHostAddress() + ":" + sock.getLocalPort()); writer = connector.createBatchWriter( table, new BatchWriterConfig().setMaxLatency(5, TimeUnit.SECONDS)); }
@Test public void run() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); for (int i = 0; i < 1000; i++) { Mutation m = new Mutation(new Text(String.format("%08d", i))); for (int j = 0; j < 3; j++) m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8))); bw.addMutation(m); } bw.close(); Scanner scanner = c.createScanner(tableName, new Authorizations()); scanner.setReadaheadThreshold(20000); scanner.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000))); // test by making a slow iterator and then a couple of fast ones. // when then checking we shouldn't have any running except the slow iterator IteratorSetting setting = new IteratorSetting(21, SlowIterator.class); SlowIterator.setSeekSleepTime(setting, Long.MAX_VALUE); SlowIterator.setSleepTime(setting, Long.MAX_VALUE); scanner.addScanIterator(setting); final Iterator<Entry<Key, Value>> slow = scanner.iterator(); final List<Future<Boolean>> callables = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(10); for (int i = 0; i < 10; i++) { Future<Boolean> callable = service.submit( new Callable<Boolean>() { public Boolean call() { latch.countDown(); while (slow.hasNext()) { slow.next(); } return slow.hasNext(); } }); callables.add(callable); } latch.await(); log.info("Starting SessionBlockVerifyIT"); // let's add more for good measure. for (int i = 0; i < 2; i++) { Scanner scanner2 = c.createScanner(tableName, new Authorizations()); scanner2.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000))); scanner2.setBatchSize(1); Iterator<Entry<Key, Value>> iter = scanner2.iterator(); // call super's verify mechanism verify(iter, 0, 1000); } int sessionsFound = 0; // we have configured 1 tserver, so we can grab the one and only String tserver = Iterables.getOnlyElement(c.instanceOperations().getTabletServers()); final List<ActiveScan> scans = c.instanceOperations().getActiveScans(tserver); for (ActiveScan scan : scans) { // only here to minimize chance of seeing meta extent scans if (tableName.equals(scan.getTable()) && scan.getSsiList().size() > 0) { assertEquals("Not the expected iterator", 1, scan.getSsiList().size()); assertTrue( "Not the expected iterator", scan.getSsiList().iterator().next().contains("SlowIterator")); sessionsFound++; } } /** * The message below indicates the problem that we experience within ACCUMULO-3509. The issue * manifests as a blockage in the Scanner synchronization that prevent us from making the close * call against it. Since the close blocks until a read is finished, we ultimately have a block * within the sweep of SessionManager. As a result never reap subsequent idle sessions AND we * will orphan the sessionsToCleanup in the sweep, leading to an inaccurate count within * sessionsFound. */ assertEquals( "Must have ten sessions. Failure indicates a synchronization block within the sweep mechanism", 10, sessionsFound); for (Future<Boolean> callable : callables) { callable.cancel(true); } service.shutdown(); }
@Override public void client() { Connector conn = getConnector(); String tableName = getTestProperty("TABLE"); // get batch writer configuration long maxMemory = Long.parseLong(getTestProperty("MAX_MEMORY")); long maxLatency = Long.parseLong(getTestProperty("MAX_LATENCY")); int maxWriteThreads = Integer.parseInt(getTestProperty("NUM_THREADS")); // create batch writer BatchWriter bw = null; try { bw = conn.createBatchWriter( tableName, new BatchWriterConfig() .setMaxMemory(maxMemory) .setMaxLatency(maxLatency, TimeUnit.MILLISECONDS) .setMaxWriteThreads(maxWriteThreads)); } catch (TableNotFoundException e) { log.error("Table '" + tableName + "' not found.", e); } // configure writing Random r = new Random(); String ingestInstanceId = UUID.randomUUID().toString(); long numIngestEntries = Long.parseLong(getTestProperty("NUM_ENTRIES")); long minRow = 0L; long maxRow = 9223372036854775807L; int maxColF = 32767; int maxColQ = 32767; long count = 0; long totalBytes = 0; ColumnVisibility cv = new ColumnVisibility(); // start timer startTimer(); // write specified number of entries while (count < numIngestEntries) { count++; long rowId = ContinuousIngest.genLong(minRow, maxRow, r); Mutation m = ContinuousIngest.genMutation( rowId, r.nextInt(maxColF), r.nextInt(maxColQ), cv, ingestInstanceId.getBytes(StandardCharsets.UTF_8), count, null, r, false); totalBytes += m.numBytes(); try { bw.addMutation(m); } catch (MutationsRejectedException e) { log.error("Mutations rejected.", e); System.exit(-1); } } // close writer try { bw.close(); } catch (MutationsRejectedException e) { log.error("Could not close BatchWriter due to mutations being rejected.", e); System.exit(-1); } // stop timer stopTimer(count, totalBytes); }
@Test public void waitsUntilEntriesAreReplicated() throws Exception { Connector conn = inst.getConnector("root", new PasswordToken("")); conn.tableOperations().create("foo"); Text tableId = new Text(conn.tableOperations().tableIdMap().get("foo")); String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(); Status stat = Status.newBuilder() .setBegin(0) .setEnd(10000) .setInfiniteEnd(false) .setClosed(false) .build(); BatchWriter bw = ReplicationTable.getBatchWriter(conn); Mutation m = new Mutation(file1); StatusSection.add(m, tableId, ProtobufUtil.toValue(stat)); bw.addMutation(m); m = new Mutation(file2); StatusSection.add(m, tableId, ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat)); bw.addMutation(m); m = new Mutation(ReplicationSection.getRowPrefix() + file2); m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat)); bw.close(); final AtomicBoolean done = new AtomicBoolean(false); final AtomicBoolean exception = new AtomicBoolean(false); ClientContext context = new ClientContext( inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration()); final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context); Thread t = new Thread( new Runnable() { @Override public void run() { try { roi.drain("foo"); } catch (Exception e) { log.error("Got error", e); exception.set(true); } done.set(true); } }); t.start(); // With the records, we shouldn't be drained Assert.assertFalse(done.get()); bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.putDelete(ReplicationSection.COLF, tableId); bw.addMutation(m); bw.flush(); Assert.assertFalse(done.get()); m = new Mutation(ReplicationSection.getRowPrefix() + file2); m.putDelete(ReplicationSection.COLF, tableId); bw.addMutation(m); bw.flush(); bw.close(); // Removing metadata entries doesn't change anything Assert.assertFalse(done.get()); // Remove the replication entries too bw = ReplicationTable.getBatchWriter(conn); m = new Mutation(file1); m.putDelete(StatusSection.NAME, tableId); bw.addMutation(m); bw.flush(); Assert.assertFalse(done.get()); m = new Mutation(file2); m.putDelete(StatusSection.NAME, tableId); bw.addMutation(m); bw.flush(); try { t.join(5000); } catch (InterruptedException e) { Assert.fail("ReplicationOperations.drain did not complete"); } // After both metadata and replication Assert.assertTrue(done.get()); Assert.assertFalse(exception.get()); }
@Test public void laterCreatedLogsDontBlockExecution() throws Exception { Connector conn = inst.getConnector("root", new PasswordToken("")); conn.tableOperations().create("foo"); Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo")); String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(); Status stat = Status.newBuilder() .setBegin(0) .setEnd(10000) .setInfiniteEnd(false) .setClosed(false) .build(); BatchWriter bw = ReplicationTable.getBatchWriter(conn); Mutation m = new Mutation(file1); StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); System.out.println("Reading metadata first time"); for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { System.out.println(e.getKey()); } final AtomicBoolean done = new AtomicBoolean(false); final AtomicBoolean exception = new AtomicBoolean(false); ClientContext context = new ClientContext( inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration()); final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context); Thread t = new Thread( new Runnable() { @Override public void run() { try { roi.drain("foo"); } catch (Exception e) { log.error("Got error", e); exception.set(true); } done.set(true); } }); t.start(); // We need to wait long enough for the table to read once Thread.sleep(2000); // Write another file, but also delete the old files bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation( ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID()); m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.putDelete(ReplicationSection.COLF, tableId1); bw.addMutation(m); bw.close(); System.out.println("Reading metadata second time"); for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { System.out.println(e.getKey()); } bw = ReplicationTable.getBatchWriter(conn); m = new Mutation(file1); m.putDelete(StatusSection.NAME, tableId1); bw.addMutation(m); bw.close(); try { t.join(5000); } catch (InterruptedException e) { Assert.fail("ReplicationOperatiotns.drain did not complete"); } // We should pass immediately because we aren't waiting on both files to be deleted (just the // one that we did) Assert.assertTrue(done.get()); }
@Test public void inprogressReplicationRecordsBlockExecution() throws Exception { Connector conn = inst.getConnector("root", new PasswordToken("")); conn.tableOperations().create("foo"); Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo")); String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(); Status stat = Status.newBuilder() .setBegin(0) .setEnd(10000) .setInfiniteEnd(false) .setClosed(false) .build(); BatchWriter bw = ReplicationTable.getBatchWriter(conn); Mutation m = new Mutation(file1); StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); LogEntry logEntry = new LogEntry(); logEntry.extent = new KeyExtent(new Text(tableId1), null, null); logEntry.server = "tserver"; logEntry.filename = file1; logEntry.tabletId = 1; logEntry.logSet = Arrays.asList(file1); logEntry.timestamp = System.currentTimeMillis(); bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); m = new Mutation(logEntry.getRow()); m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue()); bw.addMutation(m); bw.close(); final AtomicBoolean done = new AtomicBoolean(false); final AtomicBoolean exception = new AtomicBoolean(false); ClientContext context = new ClientContext( inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration()); final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context); Thread t = new Thread( new Runnable() { @Override public void run() { try { roi.drain("foo"); } catch (Exception e) { log.error("Got error", e); exception.set(true); } done.set(true); } }); t.start(); // With the records, we shouldn't be drained Assert.assertFalse(done.get()); Status newStatus = Status.newBuilder() .setBegin(1000) .setEnd(2000) .setInfiniteEnd(false) .setClosed(true) .build(); bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(newStatus)); bw.addMutation(m); bw.flush(); // Removing metadata entries doesn't change anything Assert.assertFalse(done.get()); // Remove the replication entries too bw = ReplicationTable.getBatchWriter(conn); m = new Mutation(file1); m.put(StatusSection.NAME, tableId1, ProtobufUtil.toValue(newStatus)); bw.addMutation(m); bw.flush(); try { t.join(5000); } catch (InterruptedException e) { Assert.fail("ReplicationOperations.drain did not complete"); } // New records, but not fully replicated ones don't cause it to complete Assert.assertFalse(done.get()); Assert.assertFalse(exception.get()); }
@Test public void testOptimizeQ6() throws Exception { RdfEvalStatsDAO<RdfCloudTripleStoreConfiguration> res = new ProspectorServiceEvalStatsDAO(conn, arc); AccumuloSelectivityEvalDAO accc = new AccumuloSelectivityEvalDAO(); accc.setConf(arc); accc.setConnector(conn); accc.setRdfEvalDAO(res); accc.init(); BatchWriter bw1 = conn.createBatchWriter("rya_prospects", config); BatchWriter bw2 = conn.createBatchWriter("rya_selectivity", config); String s1 = "predicateobject" + DELIM + "http://www.w3.org/2000/01/rdf-schema#label" + DELIM + "uri:dog"; String s2 = "predicateobject" + DELIM + "uri:barksAt" + DELIM + "uri:cat"; String s3 = "predicateobject" + DELIM + "uri:peesOn" + DELIM + "uri:hydrant"; String s5 = "predicateobject" + DELIM + "uri:watches" + DELIM + "uri:television"; String s4 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:chickens"; String s6 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:kibble"; String s7 = "predicateobject" + DELIM + "uri:rollsIn" + DELIM + "uri:mud"; String s8 = "predicateobject" + DELIM + "uri:runsIn" + DELIM + "uri:field"; String s9 = "predicateobject" + DELIM + "uri:smells" + DELIM + "uri:butt"; String s10 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:sticks"; List<Mutation> mList = new ArrayList<Mutation>(); List<Mutation> mList2 = new ArrayList<Mutation>(); List<String> sList = Arrays.asList( "subjectobject", "subjectpredicate", "subjectsubject", "predicateobject", "predicatepredicate", "predicatesubject"); Mutation m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11; m1 = new Mutation(s1 + DELIM + "3"); m1.put(new Text("count"), new Text(""), new Value("5".getBytes())); m2 = new Mutation(s2 + DELIM + "2"); m2.put(new Text("count"), new Text(""), new Value("3".getBytes())); m3 = new Mutation(s3 + DELIM + "1"); m3.put(new Text("count"), new Text(""), new Value("2".getBytes())); m4 = new Mutation(s4 + DELIM + "1"); m4.put(new Text("count"), new Text(""), new Value("0".getBytes())); m5 = new Mutation(s5 + DELIM + "1"); m5.put(new Text("count"), new Text(""), new Value("1".getBytes())); m6 = new Mutation(s6 + DELIM + "1"); m6.put(new Text("count"), new Text(""), new Value("3".getBytes())); m7 = new Mutation(s7 + DELIM + "1"); m7.put(new Text("count"), new Text(""), new Value("2".getBytes())); m8 = new Mutation(s8 + DELIM + "1"); m8.put(new Text("count"), new Text(""), new Value("3".getBytes())); m9 = new Mutation(s9 + DELIM + "1"); m9.put(new Text("count"), new Text(""), new Value("1".getBytes())); m10 = new Mutation(s10 + DELIM + "1"); m10.put(new Text("count"), new Text(""), new Value("1".getBytes())); mList.add(m1); mList.add(m2); mList.add(m3); mList.add(m4); mList.add(m5); mList.add(m6); mList.add(m7); mList.add(m8); mList.add(m9); mList.add(m10); bw1.addMutations(mList); bw1.close(); Scanner scan = conn.createScanner("rya_prospects", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println("Value is " + (new String(entry.getValue().get()))); } m1 = new Mutation(s1); m2 = new Mutation(s2); m3 = new Mutation(s3); m4 = new Mutation(s4); m5 = new Mutation(s5); m6 = new Mutation(s6); m7 = new Mutation(s7); m8 = new Mutation(s8); m9 = new Mutation(s9); m10 = new Mutation(s10); m11 = new Mutation(new Text("subjectpredicateobject" + DELIM + "FullTableCardinality")); m11.put(new Text("FullTableCardinality"), new Text("100"), EMPTY_VAL); int i = 2; int j = 3; int k = 4; int l = 5; Long count1; Long count2; Long count3; Long count4; for (String s : sList) { count1 = (long) i; count2 = (long) j; count3 = (long) k; count4 = (long) l; m1.put(new Text(s), new Text(count4.toString()), EMPTY_VAL); m2.put(new Text(s), new Text(count2.toString()), EMPTY_VAL); m3.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m4.put(new Text(s), new Text(count3.toString()), EMPTY_VAL); m5.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m6.put(new Text(s), new Text(count2.toString()), EMPTY_VAL); m7.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m8.put(new Text(s), new Text(count4.toString()), EMPTY_VAL); m9.put(new Text(s), new Text(count3.toString()), EMPTY_VAL); m10.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); i = 2 * i; j = 2 * j; k = 2 * k; l = 2 * l; } mList2.add(m1); mList2.add(m2); mList2.add(m3); mList2.add(m5); mList2.add(m4); mList2.add(m6); mList2.add(m7); mList2.add(m8); mList2.add(m9); mList2.add(m10); mList2.add(m11); bw2.addMutations(mList2); bw2.close(); scan = conn.createScanner("rya_selectivity", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println( "Value is " + (new String(entry.getKey().getColumnQualifier().toString()))); } TupleExpr te = getTupleExpr(q6); TupleExpr te2 = (TupleExpr) te.clone(); System.out.println("Bindings are " + te.getBindingNames()); RdfCloudTripleStoreSelectivityEvaluationStatistics ars = new RdfCloudTripleStoreSelectivityEvaluationStatistics(arc, res, accc); QueryJoinSelectOptimizer qjs = new QueryJoinSelectOptimizer(ars, accc); System.out.println("Originial query is " + te); qjs.optimize(te, null, null); FilterOptimizer fo = new FilterOptimizer(); fo.optimize(te2, null, null); System.out.print("filter optimized query before js opt is " + te2); qjs.optimize(te2, null, null); System.out.println("join selectivity opt query before filter opt is " + te); fo.optimize(te, null, null); System.out.println("join selectivity opt query is " + te); System.out.print("filter optimized query is " + te2); }
@Test public void testOptimizeQ4() throws Exception { RdfEvalStatsDAO<RdfCloudTripleStoreConfiguration> res = new ProspectorServiceEvalStatsDAO(conn, arc); AccumuloSelectivityEvalDAO accc = new AccumuloSelectivityEvalDAO(); accc.setConf(arc); accc.setConnector(conn); accc.setRdfEvalDAO(res); accc.init(); BatchWriter bw1 = conn.createBatchWriter("rya_prospects", config); BatchWriter bw2 = conn.createBatchWriter("rya_selectivity", config); String s1 = "predicateobject" + DELIM + "http://www.w3.org/2000/01/rdf-schema#label" + DELIM + "uri:dog"; String s2 = "predicateobject" + DELIM + "uri:barksAt" + DELIM + "uri:cat"; String s3 = "predicateobject" + DELIM + "uri:peesOn" + DELIM + "uri:hydrant"; String s5 = "predicateobject" + DELIM + "uri:scratches" + DELIM + "uri:ears"; String s4 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:chickens"; List<Mutation> mList = new ArrayList<Mutation>(); List<Mutation> mList2 = new ArrayList<Mutation>(); List<String> sList = Arrays.asList( "subjectobject", "subjectpredicate", "subjectsubject", "predicateobject", "predicatepredicate", "predicatesubject"); Mutation m1, m2, m3, m4, m5, m6; m1 = new Mutation(s1 + DELIM + "3"); m1.put(new Text("count"), new Text(""), new Value("4".getBytes())); m2 = new Mutation(s2 + DELIM + "2"); m2.put(new Text("count"), new Text(""), new Value("0".getBytes())); m3 = new Mutation(s3 + DELIM + "1"); m3.put(new Text("count"), new Text(""), new Value("8".getBytes())); m4 = new Mutation(s4 + DELIM + "1"); m4.put(new Text("count"), new Text(""), new Value("3".getBytes())); m5 = new Mutation(s5 + DELIM + "1"); m5.put(new Text("count"), new Text(""), new Value("0".getBytes())); mList.add(m1); mList.add(m2); mList.add(m3); mList.add(m4); mList.add(m5); bw1.addMutations(mList); bw1.close(); Scanner scan = conn.createScanner("rya_prospects", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println("Value is " + (new String(entry.getValue().get()))); } m1 = new Mutation(s1); m2 = new Mutation(s2); m3 = new Mutation(s3); m4 = new Mutation(s4); m5 = new Mutation(s5); m6 = new Mutation(new Text("subjectpredicateobject" + DELIM + "FullTableCardinality")); m6.put(new Text("FullTableCardinality"), new Text("100"), EMPTY_VAL); int i = 2; int j = 3; int k = 4; Long count1; Long count2; Long count3; for (String s : sList) { count1 = (long) i; count2 = (long) j; count3 = (long) k; m1.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m2.put(new Text(s), new Text(count2.toString()), EMPTY_VAL); m3.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m4.put(new Text(s), new Text(count3.toString()), EMPTY_VAL); m5.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); i = 2 * i; j = 2 * j; k = 2 * k; } mList2.add(m1); mList2.add(m2); mList2.add(m3); mList2.add(m5); mList2.add(m4); mList2.add(m6); bw2.addMutations(mList2); bw2.close(); scan = conn.createScanner("rya_selectivity", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println( "Value is " + (new String(entry.getKey().getColumnQualifier().toString()))); } TupleExpr te = getTupleExpr(q2); RdfCloudTripleStoreSelectivityEvaluationStatistics ars = new RdfCloudTripleStoreSelectivityEvaluationStatistics(arc, res, accc); QueryJoinSelectOptimizer qjs = new QueryJoinSelectOptimizer(ars, accc); System.out.println("Originial query is " + te); qjs.optimize(te, null, null); Assert.assertTrue(te.equals(getTupleExpr(Q4))); System.out.print("Optimized query is " + te); }
@Test(timeout = 40 * 1000) public void testFilesAreGarbageCollected() throws Exception { ZooKeeperInstance inst = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers()); Connector c = inst.getConnector("root", new PasswordToken(passwd)); final String table = "foobar"; c.tableOperations().create(table); final String tableId = c.tableOperations().tableIdMap().get(table); BatchWriter bw = null; // Add some data try { bw = c.createBatchWriter( table, new BatchWriterConfig() .setMaxMemory(100000l) .setMaxLatency(100, TimeUnit.MILLISECONDS) .setMaxWriteThreads(1)); Mutation m = new Mutation("a"); for (int i = 0; i < 500; i++) { m.put("colf", Integer.toString(i), ""); } bw.addMutation(m); } finally { if (null != bw) { bw.close(); } } File accumuloDir = new File(testDir, "accumulo"); File tables = new File(accumuloDir.getAbsolutePath(), "tables"); File myTable = new File(tables, tableId); log.trace( "Files before compaction: " + FileUtils.listFiles(myTable, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE)); final boolean flush = true, wait = true; // Compact the tables to get some rfiles which we can gc c.tableOperations().compact(table, null, null, flush, wait); Collection<File> filesAfterCompaction = FileUtils.listFiles(myTable, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE); int fileCountAfterCompaction = filesAfterCompaction.size(); log.trace("Files after compaction: " + filesAfterCompaction); // Sleep for 10s to let the GC do its thing for (int i = 1; i < 10; i++) { Thread.sleep(1000); filesAfterCompaction = FileUtils.listFiles(myTable, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE); log.trace("Files in loop: " + filesAfterCompaction); int fileCountAfterGCWait = filesAfterCompaction.size(); if (fileCountAfterGCWait < fileCountAfterCompaction) { return; } } Assert.fail("Expected to find less files after compaction and pause for GC"); }
@Override public void visit(State state, Properties props) throws Exception { boolean userExists = SecurityHelper.getTabUserExists(state); Connector conn; try { conn = state .getInstance() .getConnector( SecurityHelper.getTabUserName(state), SecurityHelper.getTabUserPass(state)); } catch (AccumuloSecurityException ae) { if (ae.getErrorCode().equals(SecurityErrorCode.BAD_CREDENTIALS)) { if (userExists) throw new AccumuloException( "User didn't exist when they should (or worse- password mismatch)", ae); else return; } throw new AccumuloException("Unexpected exception!", ae); } String action = props.getProperty("action", "_random"); TablePermission tp; if ("_random".equalsIgnoreCase(action)) { Random r = new Random(); tp = TablePermission.values()[r.nextInt(TablePermission.values().length)]; } else { tp = TablePermission.valueOf(action); } boolean tableExists = SecurityHelper.getTableExists(state); boolean hasPerm = SecurityHelper.getTabPerm(state, SecurityHelper.getTabUserName(state), tp); String tableName = state.getString("secTableName"); switch (tp) { case READ: Authorizations auths = SecurityHelper.getUserAuths(state, SecurityHelper.getTabUserName(state)); boolean canRead = SecurityHelper.getTabPerm( state, SecurityHelper.getTabUserName(state), TablePermission.READ); try { Scanner scan = conn.createScanner( tableName, conn.securityOperations() .getUserAuthorizations(SecurityHelper.getTabUserName(state))); int seen = 0; Iterator<Entry<Key, Value>> iter = scan.iterator(); while (iter.hasNext()) { Entry<Key, Value> entry = iter.next(); Key k = entry.getKey(); seen++; if (!auths.contains(k.getColumnVisibilityData())) throw new AccumuloException( "Got data I should not be capable of seeing: " + k + " table " + tableName); } if (!canRead) throw new AccumuloException( "Was able to read when I shouldn't have had the perm with connection user " + conn.whoami() + " table " + tableName); for (Entry<String, Integer> entry : SecurityHelper.getAuthsMap(state).entrySet()) { if (auths.contains(entry.getKey().getBytes())) seen = seen - entry.getValue(); } if (seen != 0) throw new AccumuloException("Got mismatched amounts of data"); } catch (TableNotFoundException tnfe) { if (tableExists) throw new AccumuloException( "Accumulo and test suite out of sync: table " + tableName, tnfe); return; } catch (AccumuloSecurityException ae) { if (ae.getErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) { if (canRead) throw new AccumuloException( "Table read permission out of sync with Accumulo: table " + tableName, ae); else return; } throw new AccumuloException("Unexpected exception!", ae); } catch (RuntimeException re) { if (re.getCause() instanceof AccumuloSecurityException && ((AccumuloSecurityException) re.getCause()) .getErrorCode() .equals(SecurityErrorCode.PERMISSION_DENIED)) { if (canRead) throw new AccumuloException( "Table read permission out of sync with Accumulo: table " + tableName, re.getCause()); else return; } throw new AccumuloException("Unexpected exception!", re); } break; case WRITE: String key = SecurityHelper.getLastKey(state) + "1"; Mutation m = new Mutation(new Text(key)); for (String s : SecurityHelper.getAuthsArray()) { m.put(new Text(), new Text(), new ColumnVisibility(s), new Value("value".getBytes())); } BatchWriter writer; try { writer = conn.createBatchWriter(tableName, 9000l, 0l, 1); } catch (TableNotFoundException tnfe) { if (tableExists) throw new AccumuloException("Table didn't exist when it should have: " + tableName); return; } boolean works = true; try { writer.addMutation(m); } catch (MutationsRejectedException mre) { throw new AccumuloException("Mutation exception!", mre); } if (works) for (String s : SecurityHelper.getAuthsArray()) SecurityHelper.increaseAuthMap(state, s, 1); break; case BULK_IMPORT: key = SecurityHelper.getLastKey(state) + "1"; SortedSet<Key> keys = new TreeSet<Key>(); for (String s : SecurityHelper.getAuthsArray()) { Key k = new Key(key, "", "", s); keys.add(k); } Path dir = new Path("/tmp", "bulk_" + UUID.randomUUID().toString()); Path fail = new Path(dir.toString() + "_fail"); FileSystem fs = SecurityHelper.getFs(state); FileSKVWriter f = FileOperations.getInstance() .openWriter( dir + "/securityBulk." + RFile.EXTENSION, fs, fs.getConf(), AccumuloConfiguration.getDefaultConfiguration()); f.startDefaultLocalityGroup(); fs.mkdirs(fail); for (Key k : keys) f.append(k, new Value("Value".getBytes())); f.close(); try { conn.tableOperations().importDirectory(tableName, dir.toString(), fail.toString(), true); } catch (TableNotFoundException tnfe) { if (tableExists) throw new AccumuloException("Table didn't exist when it should have: " + tableName); return; } catch (AccumuloSecurityException ae) { if (ae.getErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) { if (hasPerm) throw new AccumuloException( "Bulk Import failed when it should have worked: " + tableName); return; } throw new AccumuloException("Unexpected exception!", ae); } for (String s : SecurityHelper.getAuthsArray()) SecurityHelper.increaseAuthMap(state, s, 1); fs.delete(dir, true); fs.delete(fail, true); if (!hasPerm) throw new AccumuloException( "Bulk Import succeeded when it should have failed: " + dir + " table " + tableName); break; case ALTER_TABLE: AlterTable.renameTable(conn, state, tableName, tableName + "plus", hasPerm, tableExists); break; case GRANT: props.setProperty("task", "grant"); props.setProperty("perm", "random"); props.setProperty("source", "table"); props.setProperty("target", "system"); AlterTablePerm.alter(state, props); break; case DROP_TABLE: props.setProperty("source", "table"); DropTable.dropTable(state, props); break; } }
/** * Writes a specified number of entries to Accumulo using a {@link BatchWriter}. * * @throws AccumuloException * @throws AccumuloSecurityException * @throws TableNotFoundException */ public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { String seed = null; int index = 0; String processedArgs[] = new String[13]; for (int i = 0; i < args.length; i++) { if (args[i].equals("-s")) { seed = args[++i]; } else { processedArgs[index++] = args[i]; } } if (index != 13) { System.out.println( "Usage : RandomBatchWriter [-s <seed>] <instance name> <zoo keepers> <username> <password> <table> <num> <min> <max> <value size> <max memory> <max latency> <num threads> <visibility>"); return; } String instanceName = processedArgs[0]; String zooKeepers = processedArgs[1]; String user = processedArgs[2]; byte[] pass = processedArgs[3].getBytes(); String table = processedArgs[4]; int num = Integer.parseInt(processedArgs[5]); long min = Long.parseLong(processedArgs[6]); long max = Long.parseLong(processedArgs[7]); int valueSize = Integer.parseInt(processedArgs[8]); long maxMemory = Long.parseLong(processedArgs[9]); long maxLatency = Long.parseLong(processedArgs[10]) == 0 ? Long.MAX_VALUE : Long.parseLong(processedArgs[10]); int numThreads = Integer.parseInt(processedArgs[11]); String visiblity = processedArgs[12]; // Uncomment the following lines for detailed debugging info // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME); // logger.setLevel(Level.TRACE); Random r; if (seed == null) r = new Random(); else { r = new Random(Long.parseLong(seed)); } ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers); Connector connector = instance.getConnector(user, pass); BatchWriter bw = connector.createBatchWriter(table, maxMemory, maxLatency, numThreads); // reuse the ColumnVisibility object to improve performance ColumnVisibility cv = new ColumnVisibility(visiblity); for (int i = 0; i < num; i++) { long rowid = (Math.abs(r.nextLong()) % (max - min)) + min; Mutation m = createMutation(rowid, valueSize, cv); bw.addMutation(m); } try { bw.close(); } catch (MutationsRejectedException e) { if (e.getAuthorizationFailures().size() > 0) { HashSet<String> tables = new HashSet<String>(); for (KeyExtent ke : e.getAuthorizationFailures()) { tables.add(ke.getTableId().toString()); } System.err.println("ERROR : Not authorized to write to tables : " + tables); } if (e.getConstraintViolationSummaries().size() > 0) { System.err.println( "ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries()); } } }