@Test(timeout = 60 * 1000) public void run() throws Exception { Connector c = getConnector(); c.tableOperations().create("rdel1"); Map<String, Set<Text>> groups = new HashMap<String, Set<Text>>(); groups.put("lg1", Collections.singleton(new Text("foo"))); groups.put("dg", Collections.<Text>emptySet()); c.tableOperations().setLocalityGroups("rdel1", groups); IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class); c.tableOperations().attachIterator("rdel1", setting, EnumSet.of(IteratorScope.majc)); c.tableOperations().setProperty("rdel1", Property.TABLE_MAJC_RATIO.getKey(), "100"); BatchWriter bw = c.createBatchWriter("rdel1", new BatchWriterConfig()); bw.addMutation(nm("r1", "foo", "cf1", "v1")); bw.addMutation(nm("r1", "bar", "cf1", "v2")); bw.flush(); c.tableOperations().flush("rdel1", null, null, true); checkRFiles(c, "rdel1", 1, 1, 1, 1); int count = 0; Scanner scanner = c.createScanner("rdel1", Authorizations.EMPTY); for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } if (count != 2) throw new Exception("1 count=" + count); bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE)); bw.flush(); c.tableOperations().flush("rdel1", null, null, true); checkRFiles(c, "rdel1", 1, 1, 2, 2); count = 0; scanner = c.createScanner("rdel1", Authorizations.EMPTY); for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } if (count != 3) throw new Exception("2 count=" + count); c.tableOperations().compact("rdel1", null, null, false, true); checkRFiles(c, "rdel1", 1, 1, 0, 0); count = 0; scanner = c.createScanner("rdel1", Authorizations.EMPTY); for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } if (count != 0) throw new Exception("3 count=" + count); bw.close(); }
@Override public void visit(State state, Properties props) throws Exception { Connector conn = state.getConnector(); Random rand = (Random) state.get("rand"); @SuppressWarnings("unchecked") List<String> tableNames = (List<String>) state.get("tables"); String tableName = tableNames.get(rand.nextInt(tableNames.size())); try { Scanner scanner = conn.createScanner(tableName, Constants.NO_AUTHS); Iterator<Entry<Key, Value>> iter = scanner.iterator(); while (iter.hasNext()) { iter.next(); } log.debug("Scanned " + tableName); } catch (TableDeletedException e) { log.debug("Scan " + tableName + " failed, table deleted"); } catch (TableNotFoundException e) { log.debug("Scan " + tableName + " failed, doesnt exist"); } catch (TableOfflineException e) { log.debug("Scan " + tableName + " failed, offline"); } catch (RuntimeException e) { if (e.getCause() instanceof AccumuloSecurityException) { log.debug("BatchScan " + tableName + " failed, permission error"); } else { throw e; } } }
public static void run( String instanceName, String zookeepers, AuthenticationToken rootPassword, String args[]) throws Exception { // edit this method to play with Accumulo Instance instance = new ZooKeeperInstance(instanceName, zookeepers); Connector conn = instance.getConnector("root", rootPassword); conn.tableOperations().create("foo"); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(60000l, java.util.concurrent.TimeUnit.MILLISECONDS); bwConfig.setMaxWriteThreads(3); bwConfig.setMaxMemory(50000000); BatchWriter bw = conn.createBatchWriter("foo", bwConfig); Mutation m = new Mutation("r1"); m.put("cf1", "cq1", "v1"); m.put("cf1", "cq2", "v3"); bw.addMutation(m); bw.close(); Scanner scanner = conn.createScanner("foo", Constants.NO_AUTHS); for (Entry<Key, Value> entry : scanner) { System.out.println(entry.getKey() + " " + entry.getValue()); } }
@Test public void test() throws Exception { Connector c = getConnector(); // make a table String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); // write to it BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("row"); m.put("cf", "cq", "value"); bw.addMutation(m); bw.close(); // create a fake _tmp file in its directory String id = c.tableOperations().tableIdMap().get(tableName); FileSystem fs = getCluster().getFileSystem(); Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp"); fs.create(tmp).close(); for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) { getCluster().killProcess(ServerType.TABLET_SERVER, tserver); } getCluster().start(); Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY); FunctionalTestUtils.count(scanner); assertFalse(fs.exists(tmp)); }
private void scanCheck(Connector c, String tableName, String expected) throws Exception { Scanner bs = c.createScanner(tableName, Authorizations.EMPTY); Iterator<Entry<Key, Value>> iterator = bs.iterator(); assertTrue(iterator.hasNext()); Entry<Key, Value> next = iterator.next(); assertFalse(iterator.hasNext()); assertEquals(expected, next.getValue().toString()); }
private void checkSum(String tableName, Connector c) throws Exception { Scanner s = c.createScanner(tableName, Authorizations.EMPTY); Iterator<Entry<Key, Value>> i = s.iterator(); assertTrue(i.hasNext()); Entry<Key, Value> entry = i.next(); assertEquals("45", entry.getValue().toString()); assertFalse(i.hasNext()); }
private void runMergeTest( Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception { System.out.println( "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end); conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL)); TreeSet<Text> splitSet = new TreeSet<Text>(); for (String split : splits) { splitSet.add(new Text(split)); } conn.tableOperations().addSplits(table, splitSet); BatchWriter bw = conn.createBatchWriter(table, null); HashSet<String> expected = new HashSet<String>(); for (String row : inserts) { Mutation m = new Mutation(row); m.put("cf", "cq", row); bw.addMutation(m); expected.add(row); } bw.close(); conn.tableOperations() .merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end)); Scanner scanner = conn.createScanner(table, Authorizations.EMPTY); HashSet<String> observed = new HashSet<String>(); for (Entry<Key, Value> entry : scanner) { String row = entry.getKey().getRowData().toString(); if (!observed.add(row)) { throw new Exception("Saw data twice " + table + " " + row); } } if (!observed.equals(expected)) { throw new Exception("data inconsistency " + table + " " + observed + " != " + expected); } HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table)); HashSet<Text> ess = new HashSet<Text>(); for (String es : expectedSplits) { ess.add(new Text(es)); } if (!currentSplits.equals(ess)) { throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess); } }
@Test public void run() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); for (int i = 0; i < 100000; i++) { c.createScanner(tableName, Authorizations.EMPTY); } }
private static Scanner createCloneScanner(String tableId, Connector conn) throws TableNotFoundException { Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)); mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange()); mscanner.fetchColumnFamily(DataFileColumnFamily.NAME); mscanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME); mscanner.fetchColumnFamily(TabletsSection.LastLocationColumnFamily.NAME); mscanner.fetchColumnFamily(ClonedColumnFamily.NAME); TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(mscanner); TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(mscanner); return mscanner; }
private static TreeSet<RowColumn> scanAll( ClientOnDefaultTable opts, ScannerOpts scanOpts, String tableName) throws Exception { TreeSet<RowColumn> result = new TreeSet<RowColumn>(); Connector conn = opts.getConnector(); Scanner scanner = conn.createScanner(tableName, auths); scanner.setBatchSize(scanOpts.scanBatchSize); for (Entry<Key, Value> entry : scanner) { Key key = entry.getKey(); Column column = new Column( TextUtil.getBytes(key.getColumnFamily()), TextUtil.getBytes(key.getColumnQualifier()), TextUtil.getBytes(key.getColumnVisibility())); result.add(new RowColumn(key.getRow(), column, key.getTimestamp())); } return result; }
@Test public void testGreaterThan1Sid() throws Exception { Connector con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); Scanner scan = con.createScanner(TEST_TABLE, new Authorizations("blah")); IteratorSetting is = new IteratorSetting( 1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, PrimitiveComparisonFilter.class); is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName()); is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, GreaterThan.class.getName()); is.addOption( PrimitiveComparisonFilter.CONST_VAL, new String(Base64.encodeBase64(parseIntBytes("1")))); is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:sid"); scan.addScanIterator(is); boolean foundMark = false; boolean foundDennis = false; int totalCount = 0; for (Map.Entry<Key, Value> kv : scan) { boolean foundName = false; boolean foundSid = false; boolean foundDegrees = false; boolean foundMillis = false; SortedMap<Key, Value> items = PrimitiveComparisonFilter.decodeRow(kv.getKey(), kv.getValue()); for (Map.Entry<Key, Value> item : items.entrySet()) { if (item.getKey().getRow().toString().equals("r2")) { foundMark = true; } else if (item.getKey().getRow().toString().equals("r3")) { foundDennis = true; } if (item.getKey().getColumnQualifier().equals(NAME)) { foundName = true; } else if (item.getKey().getColumnQualifier().equals(SID)) { foundSid = true; } else if (item.getKey().getColumnQualifier().equals(DEGREES)) { foundDegrees = true; } else if (item.getKey().getColumnQualifier().equals(MILLIS)) { foundMillis = true; } } totalCount++; assertTrue(foundDegrees & foundMillis & foundName & foundSid); } assertTrue(foundDennis & foundMark); assertEquals(totalCount, 2); }
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception { Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)); mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange()); mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); for (Entry<Key, Value> entry : mscanner) { log.debug("Looking at entry " + entry + " with tid " + tid); if (Long.parseLong(entry.getValue().toString()) == tid) { log.debug("deleting entry " + entry); Mutation m = new Mutation(entry.getKey().getRow()); m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier()); bw.addMutation(m); } } bw.close(); }
private static long scan(Connector conn, ArrayList<byte[]> cfset, String table, boolean cq) throws TableNotFoundException { Scanner scanner = conn.createScanner(table, Authorizations.EMPTY); if (!cq) scanner.fetchColumnFamily(new Text(cfset.get(15))); else scanner.fetchColumn(new Text(cfset.get(15)), new Text(cfset.get(15))); long t1 = System.currentTimeMillis(); @SuppressWarnings("unused") int count = 0; for (@SuppressWarnings("unused") Entry<Key, Value> entry : scanner) { count++; } long t2 = System.currentTimeMillis(); return t2 - t1; }
@Test public void tabletShouldSplit() throws Exception { Connector c = getConnector(); c.tableOperations().create("test_ingest"); c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K"); c.tableOperations() .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K"); TestIngest.Opts opts = new TestIngest.Opts(); opts.rows = 100000; TestIngest.ingest(c, opts, new BatchWriterOpts()); VerifyIngest.Opts vopts = new VerifyIngest.Opts(); vopts.rows = opts.rows; VerifyIngest.verifyIngest(c, vopts, new ScannerOpts()); UtilWaitThread.sleep(15 * 1000); String id = c.tableOperations().tableIdMap().get("test_ingest"); Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY); KeyExtent extent = new KeyExtent(new Text(id), null, null); s.setRange(extent.toMetadataRange()); MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s); int count = 0; int shortened = 0; for (Entry<Key, Value> entry : s) { extent = new KeyExtent(entry.getKey().getRow(), entry.getValue()); if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14) shortened++; count++; } assertTrue(shortened > 0); assertTrue(count > 10); assertEquals( 0, cluster .exec( CheckForMetadataProblems.class, "-i", cluster.getInstanceName(), "-u", "root", "-p", ROOT_PASSWORD, "-z", cluster.getZooKeepers()) .waitFor()); }
protected Scanner getScanner(StringBuilder sb) throws AccumuloException, AccumuloSecurityException { AccumuloConfiguration conf = Monitor.getSystemConfiguration(); String principal = conf.get(Property.TRACE_USER); AuthenticationToken at; Map<String, String> loginMap = conf.getAllPropertiesWithPrefix(Property.TRACE_TOKEN_PROPERTY_PREFIX); if (loginMap.isEmpty()) { Property p = Property.TRACE_PASSWORD; at = new PasswordToken(conf.get(p).getBytes(StandardCharsets.UTF_8)); } else { Properties props = new Properties(); int prefixLength = Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey().length(); for (Entry<String, String> entry : loginMap.entrySet()) { props.put(entry.getKey().substring(prefixLength), entry.getValue()); } AuthenticationToken token = Property.createInstanceFromPropertyName( conf, Property.TRACE_TOKEN_TYPE, AuthenticationToken.class, new PasswordToken()); token.init(props); at = token; } String table = conf.get(Property.TRACE_TABLE); try { Connector conn = HdfsZooInstance.getInstance().getConnector(principal, at); if (!conn.tableOperations().exists(table)) { return new NullScanner(); } Scanner scanner = conn.createScanner(table, conn.securityOperations().getUserAuthorizations(principal)); return scanner; } catch (AccumuloSecurityException ex) { sb.append( "<h2>Unable to read trace table: check trace username and password configuration.</h2>\n"); return null; } catch (TableNotFoundException ex) { return new NullScanner(); } }
static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException { c.tableOperations().create(tablename); BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig()); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("" + i); m.put(input_cf, input_cq, "row" + i); bw.addMutation(m); } bw.close(); Process hash = cluster.exec( RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance().getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq); assertEquals(0, hash.waitFor()); Scanner s = c.createScanner(tablename, Authorizations.EMPTY); s.fetchColumn(new Text(input_cf), new Text(output_cq)); int i = 0; for (Entry<Key, Value> entry : s) { MessageDigest md = MessageDigest.getInstance("MD5"); byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes())); assertEquals(entry.getValue().toString(), new String(check)); i++; } }
@Test public void testNameEqualBrian() throws Exception { Connector con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); Scanner scan = con.createScanner(TEST_TABLE, new Authorizations("blah")); IteratorSetting is = new IteratorSetting( 1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, PrimitiveComparisonFilter.class); is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName()); is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); is.addOption( PrimitiveComparisonFilter.CONST_VAL, new String(Base64.encodeBase64("brian".getBytes()))); is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:name"); scan.addScanIterator(is); boolean foundName = false; boolean foundSid = false; boolean foundDegrees = false; boolean foundMillis = false; for (Map.Entry<Key, Value> kv : scan) { SortedMap<Key, Value> items = PrimitiveComparisonFilter.decodeRow(kv.getKey(), kv.getValue()); for (Map.Entry<Key, Value> item : items.entrySet()) { assertEquals(item.getKey().getRow().toString(), "r1"); if (item.getKey().getColumnQualifier().equals(NAME)) { foundName = true; assertArrayEquals(item.getValue().get(), "brian".getBytes()); } else if (item.getKey().getColumnQualifier().equals(SID)) { foundSid = true; assertArrayEquals(item.getValue().get(), parseIntBytes("1")); } else if (item.getKey().getColumnQualifier().equals(DEGREES)) { foundDegrees = true; assertArrayEquals(item.getValue().get(), parseDoubleBytes("44.5")); } else if (item.getKey().getColumnQualifier().equals(MILLIS)) { foundMillis = true; assertArrayEquals(item.getValue().get(), parseLongBytes("555")); } } } assertTrue(foundDegrees & foundMillis & foundName & foundSid); }
public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException { List<FileRef> result = new ArrayList<FileRef>(); try { VolumeManager fs = VolumeManagerImpl.get(); Scanner mscanner = new IsolatedScanner( conn.createScanner( extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY)); mscanner.setRange(extent.toMetadataRange()); mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); for (Entry<Key, Value> entry : mscanner) { if (Long.parseLong(entry.getValue().toString()) == tid) { result.add(new FileRef(fs, entry.getKey())); } } return result; } catch (TableNotFoundException ex) { // unlikely throw new RuntimeException("Onos! teh metadata table has vanished!!"); } }
static void checkRFiles( Connector c, String tableName, int minTablets, int maxTablets, int minRFiles, int maxRFiles) throws Exception { Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY); String tableId = c.tableOperations().tableIdMap().get(tableName); scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true)); scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME); MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner); HashMap<Text, Integer> tabletFileCounts = new HashMap<Text, Integer>(); for (Entry<Key, Value> entry : scanner) { Text row = entry.getKey().getRow(); Integer count = tabletFileCounts.get(row); if (count == null) count = 0; if (entry .getKey() .getColumnFamily() .equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) { count = count + 1; } tabletFileCounts.put(row, count); } if (tabletFileCounts.size() < minTablets || tabletFileCounts.size() > maxTablets) { throw new Exception("Did not find expected number of tablets " + tabletFileCounts.size()); } Set<Entry<Text, Integer>> es = tabletFileCounts.entrySet(); for (Entry<Text, Integer> entry : es) { if (entry.getValue() > maxRFiles || entry.getValue() < minRFiles) { throw new Exception("tablet " + entry.getKey() + " has " + entry.getValue() + " map files"); } } }
public int run(String[] args) throws Exception { ClientOpts opts = new ClientOpts(); opts.parseArgs(LocalityCheck.class.getName(), args); VolumeManager fs = VolumeManagerImpl.get(); Connector connector = opts.getConnector(); Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY); scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME); scanner.fetchColumnFamily(DataFileColumnFamily.NAME); scanner.setRange(MetadataSchema.TabletsSection.getRange()); Map<String, Long> totalBlocks = new HashMap<String, Long>(); Map<String, Long> localBlocks = new HashMap<String, Long>(); ArrayList<String> files = new ArrayList<String>(); for (Entry<Key, Value> entry : scanner) { Key key = entry.getKey(); if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) { String location = entry.getValue().toString(); String[] parts = location.split(":"); String host = parts[0]; addBlocks(fs, host, files, totalBlocks, localBlocks); files.clear(); } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) { files.add(fs.getFullPath(key).toString()); } } System.out.println(" Server %local total blocks"); for (String host : totalBlocks.keySet()) { System.out.println( String.format( "%15s %5.1f %8d", host, (localBlocks.get(host) * 100.) / totalBlocks.get(host), totalBlocks.get(host))); } return 0; }
@Test public void run() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); for (int i = 0; i < 1000; i++) { Mutation m = new Mutation(new Text(String.format("%08d", i))); for (int j = 0; j < 3; j++) m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8))); bw.addMutation(m); } bw.close(); Scanner scanner = c.createScanner(tableName, new Authorizations()); scanner.setReadaheadThreshold(20000); scanner.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000))); // test by making a slow iterator and then a couple of fast ones. // when then checking we shouldn't have any running except the slow iterator IteratorSetting setting = new IteratorSetting(21, SlowIterator.class); SlowIterator.setSeekSleepTime(setting, Long.MAX_VALUE); SlowIterator.setSleepTime(setting, Long.MAX_VALUE); scanner.addScanIterator(setting); final Iterator<Entry<Key, Value>> slow = scanner.iterator(); final List<Future<Boolean>> callables = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(10); for (int i = 0; i < 10; i++) { Future<Boolean> callable = service.submit( new Callable<Boolean>() { public Boolean call() { latch.countDown(); while (slow.hasNext()) { slow.next(); } return slow.hasNext(); } }); callables.add(callable); } latch.await(); log.info("Starting SessionBlockVerifyIT"); // let's add more for good measure. for (int i = 0; i < 2; i++) { Scanner scanner2 = c.createScanner(tableName, new Authorizations()); scanner2.setRange(new Range(String.format("%08d", 0), String.format("%08d", 1000))); scanner2.setBatchSize(1); Iterator<Entry<Key, Value>> iter = scanner2.iterator(); // call super's verify mechanism verify(iter, 0, 1000); } int sessionsFound = 0; // we have configured 1 tserver, so we can grab the one and only String tserver = Iterables.getOnlyElement(c.instanceOperations().getTabletServers()); final List<ActiveScan> scans = c.instanceOperations().getActiveScans(tserver); for (ActiveScan scan : scans) { // only here to minimize chance of seeing meta extent scans if (tableName.equals(scan.getTable()) && scan.getSsiList().size() > 0) { assertEquals("Not the expected iterator", 1, scan.getSsiList().size()); assertTrue( "Not the expected iterator", scan.getSsiList().iterator().next().contains("SlowIterator")); sessionsFound++; } } /** * The message below indicates the problem that we experience within ACCUMULO-3509. The issue * manifests as a blockage in the Scanner synchronization that prevent us from making the close * call against it. Since the close blocks until a read is finished, we ultimately have a block * within the sweep of SessionManager. As a result never reap subsequent idle sessions AND we * will orphan the sessionsToCleanup in the sweep, leading to an inaccurate count within * sessionsFound. */ assertEquals( "Must have ten sessions. Failure indicates a synchronization block within the sweep mechanism", 10, sessionsFound); for (Future<Boolean> callable : callables) { callable.cancel(true); } service.shutdown(); }
public static void main(String[] args) throws Exception { Preconditions.checkArgument( args.length == 6, "java " + ExternalIndexMain.class.getCanonicalName() + " sparqlFile cbinstance cbzk cbuser cbpassword rdfTablePrefix."); final String sparqlFile = args[0]; instStr = args[1]; zooStr = args[2]; userStr = args[3]; passStr = args[4]; tablePrefix = args[5]; String queryString = FileUtils.readFileToString(new File(sparqlFile)); // Look for Extra Indexes Instance inst = new ZooKeeperInstance(instStr, zooStr); Connector c = inst.getConnector(userStr, passStr.getBytes()); System.out.println("Searching for Indexes"); Map<String, String> indexTables = Maps.newLinkedHashMap(); for (String table : c.tableOperations().list()) { if (table.startsWith(tablePrefix + "INDEX_")) { Scanner s = c.createScanner(table, new Authorizations()); s.setRange(Range.exact(new Text("~SPARQL"))); for (Entry<Key, Value> e : s) { indexTables.put(table, e.getValue().toString()); } } } List<ExternalTupleSet> index = Lists.newArrayList(); if (indexTables.isEmpty()) { System.out.println("No Index found"); } else { for (String table : indexTables.keySet()) { String indexSparqlString = indexTables.get(table); System.out.println("====================== INDEX FOUND ======================"); System.out.println(" table : " + table); System.out.println(" sparql : "); System.out.println(indexSparqlString); index.add(new AccumuloIndexSet(indexSparqlString, c, table)); } } // Connect to Rya Sail s = getRyaSail(); SailRepository repo = new SailRepository(s); repo.initialize(); // Perform Query CountingTupleQueryResultHandler count = new CountingTupleQueryResultHandler(); SailRepositoryConnection conn; if (index.isEmpty()) { conn = repo.getConnection(); } else { ExternalProcessor processor = new ExternalProcessor(index); Sail processingSail = new ExternalSail(s, processor); SailRepository smartSailRepo = new SailRepository(processingSail); smartSailRepo.initialize(); conn = smartSailRepo.getConnection(); } startTime = System.currentTimeMillis(); lastTime = startTime; System.out.println("Query Started"); conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(count); System.out.println("Count of Results found : " + count.i); System.out.println( "Total query time (s) : " + (System.currentTimeMillis() - startTime) / 1000.); }
@Test public void laterCreatedLogsDontBlockExecution() throws Exception { Connector conn = inst.getConnector("root", new PasswordToken("")); conn.tableOperations().create("foo"); Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo")); String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(); Status stat = Status.newBuilder() .setBegin(0) .setEnd(10000) .setInfiniteEnd(false) .setClosed(false) .build(); BatchWriter bw = ReplicationTable.getBatchWriter(conn); Mutation m = new Mutation(file1); StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); System.out.println("Reading metadata first time"); for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { System.out.println(e.getKey()); } final AtomicBoolean done = new AtomicBoolean(false); final AtomicBoolean exception = new AtomicBoolean(false); ClientContext context = new ClientContext( inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration()); final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context); Thread t = new Thread( new Runnable() { @Override public void run() { try { roi.drain("foo"); } catch (Exception e) { log.error("Got error", e); exception.set(true); } done.set(true); } }); t.start(); // We need to wait long enough for the table to read once Thread.sleep(2000); // Write another file, but also delete the old files bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation( ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID()); m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat)); bw.addMutation(m); m = new Mutation(ReplicationSection.getRowPrefix() + file1); m.putDelete(ReplicationSection.COLF, tableId1); bw.addMutation(m); bw.close(); System.out.println("Reading metadata second time"); for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { System.out.println(e.getKey()); } bw = ReplicationTable.getBatchWriter(conn); m = new Mutation(file1); m.putDelete(StatusSection.NAME, tableId1); bw.addMutation(m); bw.close(); try { t.join(5000); } catch (InterruptedException e) { Assert.fail("ReplicationOperatiotns.drain did not complete"); } // We should pass immediately because we aren't waiting on both files to be deleted (just the // one that we did) Assert.assertTrue(done.get()); }
@Test public void testOptimizeQ6() throws Exception { RdfEvalStatsDAO<RdfCloudTripleStoreConfiguration> res = new ProspectorServiceEvalStatsDAO(conn, arc); AccumuloSelectivityEvalDAO accc = new AccumuloSelectivityEvalDAO(); accc.setConf(arc); accc.setConnector(conn); accc.setRdfEvalDAO(res); accc.init(); BatchWriter bw1 = conn.createBatchWriter("rya_prospects", config); BatchWriter bw2 = conn.createBatchWriter("rya_selectivity", config); String s1 = "predicateobject" + DELIM + "http://www.w3.org/2000/01/rdf-schema#label" + DELIM + "uri:dog"; String s2 = "predicateobject" + DELIM + "uri:barksAt" + DELIM + "uri:cat"; String s3 = "predicateobject" + DELIM + "uri:peesOn" + DELIM + "uri:hydrant"; String s5 = "predicateobject" + DELIM + "uri:watches" + DELIM + "uri:television"; String s4 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:chickens"; String s6 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:kibble"; String s7 = "predicateobject" + DELIM + "uri:rollsIn" + DELIM + "uri:mud"; String s8 = "predicateobject" + DELIM + "uri:runsIn" + DELIM + "uri:field"; String s9 = "predicateobject" + DELIM + "uri:smells" + DELIM + "uri:butt"; String s10 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:sticks"; List<Mutation> mList = new ArrayList<Mutation>(); List<Mutation> mList2 = new ArrayList<Mutation>(); List<String> sList = Arrays.asList( "subjectobject", "subjectpredicate", "subjectsubject", "predicateobject", "predicatepredicate", "predicatesubject"); Mutation m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11; m1 = new Mutation(s1 + DELIM + "3"); m1.put(new Text("count"), new Text(""), new Value("5".getBytes())); m2 = new Mutation(s2 + DELIM + "2"); m2.put(new Text("count"), new Text(""), new Value("3".getBytes())); m3 = new Mutation(s3 + DELIM + "1"); m3.put(new Text("count"), new Text(""), new Value("2".getBytes())); m4 = new Mutation(s4 + DELIM + "1"); m4.put(new Text("count"), new Text(""), new Value("0".getBytes())); m5 = new Mutation(s5 + DELIM + "1"); m5.put(new Text("count"), new Text(""), new Value("1".getBytes())); m6 = new Mutation(s6 + DELIM + "1"); m6.put(new Text("count"), new Text(""), new Value("3".getBytes())); m7 = new Mutation(s7 + DELIM + "1"); m7.put(new Text("count"), new Text(""), new Value("2".getBytes())); m8 = new Mutation(s8 + DELIM + "1"); m8.put(new Text("count"), new Text(""), new Value("3".getBytes())); m9 = new Mutation(s9 + DELIM + "1"); m9.put(new Text("count"), new Text(""), new Value("1".getBytes())); m10 = new Mutation(s10 + DELIM + "1"); m10.put(new Text("count"), new Text(""), new Value("1".getBytes())); mList.add(m1); mList.add(m2); mList.add(m3); mList.add(m4); mList.add(m5); mList.add(m6); mList.add(m7); mList.add(m8); mList.add(m9); mList.add(m10); bw1.addMutations(mList); bw1.close(); Scanner scan = conn.createScanner("rya_prospects", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println("Value is " + (new String(entry.getValue().get()))); } m1 = new Mutation(s1); m2 = new Mutation(s2); m3 = new Mutation(s3); m4 = new Mutation(s4); m5 = new Mutation(s5); m6 = new Mutation(s6); m7 = new Mutation(s7); m8 = new Mutation(s8); m9 = new Mutation(s9); m10 = new Mutation(s10); m11 = new Mutation(new Text("subjectpredicateobject" + DELIM + "FullTableCardinality")); m11.put(new Text("FullTableCardinality"), new Text("100"), EMPTY_VAL); int i = 2; int j = 3; int k = 4; int l = 5; Long count1; Long count2; Long count3; Long count4; for (String s : sList) { count1 = (long) i; count2 = (long) j; count3 = (long) k; count4 = (long) l; m1.put(new Text(s), new Text(count4.toString()), EMPTY_VAL); m2.put(new Text(s), new Text(count2.toString()), EMPTY_VAL); m3.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m4.put(new Text(s), new Text(count3.toString()), EMPTY_VAL); m5.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m6.put(new Text(s), new Text(count2.toString()), EMPTY_VAL); m7.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m8.put(new Text(s), new Text(count4.toString()), EMPTY_VAL); m9.put(new Text(s), new Text(count3.toString()), EMPTY_VAL); m10.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); i = 2 * i; j = 2 * j; k = 2 * k; l = 2 * l; } mList2.add(m1); mList2.add(m2); mList2.add(m3); mList2.add(m5); mList2.add(m4); mList2.add(m6); mList2.add(m7); mList2.add(m8); mList2.add(m9); mList2.add(m10); mList2.add(m11); bw2.addMutations(mList2); bw2.close(); scan = conn.createScanner("rya_selectivity", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println( "Value is " + (new String(entry.getKey().getColumnQualifier().toString()))); } TupleExpr te = getTupleExpr(q6); TupleExpr te2 = (TupleExpr) te.clone(); System.out.println("Bindings are " + te.getBindingNames()); RdfCloudTripleStoreSelectivityEvaluationStatistics ars = new RdfCloudTripleStoreSelectivityEvaluationStatistics(arc, res, accc); QueryJoinSelectOptimizer qjs = new QueryJoinSelectOptimizer(ars, accc); System.out.println("Originial query is " + te); qjs.optimize(te, null, null); FilterOptimizer fo = new FilterOptimizer(); fo.optimize(te2, null, null); System.out.print("filter optimized query before js opt is " + te2); qjs.optimize(te2, null, null); System.out.println("join selectivity opt query before filter opt is " + te); fo.optimize(te, null, null); System.out.println("join selectivity opt query is " + te); System.out.print("filter optimized query is " + te2); }
@Test public void testOptimizeQ4() throws Exception { RdfEvalStatsDAO<RdfCloudTripleStoreConfiguration> res = new ProspectorServiceEvalStatsDAO(conn, arc); AccumuloSelectivityEvalDAO accc = new AccumuloSelectivityEvalDAO(); accc.setConf(arc); accc.setConnector(conn); accc.setRdfEvalDAO(res); accc.init(); BatchWriter bw1 = conn.createBatchWriter("rya_prospects", config); BatchWriter bw2 = conn.createBatchWriter("rya_selectivity", config); String s1 = "predicateobject" + DELIM + "http://www.w3.org/2000/01/rdf-schema#label" + DELIM + "uri:dog"; String s2 = "predicateobject" + DELIM + "uri:barksAt" + DELIM + "uri:cat"; String s3 = "predicateobject" + DELIM + "uri:peesOn" + DELIM + "uri:hydrant"; String s5 = "predicateobject" + DELIM + "uri:scratches" + DELIM + "uri:ears"; String s4 = "predicateobject" + DELIM + "uri:eats" + DELIM + "uri:chickens"; List<Mutation> mList = new ArrayList<Mutation>(); List<Mutation> mList2 = new ArrayList<Mutation>(); List<String> sList = Arrays.asList( "subjectobject", "subjectpredicate", "subjectsubject", "predicateobject", "predicatepredicate", "predicatesubject"); Mutation m1, m2, m3, m4, m5, m6; m1 = new Mutation(s1 + DELIM + "3"); m1.put(new Text("count"), new Text(""), new Value("4".getBytes())); m2 = new Mutation(s2 + DELIM + "2"); m2.put(new Text("count"), new Text(""), new Value("0".getBytes())); m3 = new Mutation(s3 + DELIM + "1"); m3.put(new Text("count"), new Text(""), new Value("8".getBytes())); m4 = new Mutation(s4 + DELIM + "1"); m4.put(new Text("count"), new Text(""), new Value("3".getBytes())); m5 = new Mutation(s5 + DELIM + "1"); m5.put(new Text("count"), new Text(""), new Value("0".getBytes())); mList.add(m1); mList.add(m2); mList.add(m3); mList.add(m4); mList.add(m5); bw1.addMutations(mList); bw1.close(); Scanner scan = conn.createScanner("rya_prospects", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println("Value is " + (new String(entry.getValue().get()))); } m1 = new Mutation(s1); m2 = new Mutation(s2); m3 = new Mutation(s3); m4 = new Mutation(s4); m5 = new Mutation(s5); m6 = new Mutation(new Text("subjectpredicateobject" + DELIM + "FullTableCardinality")); m6.put(new Text("FullTableCardinality"), new Text("100"), EMPTY_VAL); int i = 2; int j = 3; int k = 4; Long count1; Long count2; Long count3; for (String s : sList) { count1 = (long) i; count2 = (long) j; count3 = (long) k; m1.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m2.put(new Text(s), new Text(count2.toString()), EMPTY_VAL); m3.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); m4.put(new Text(s), new Text(count3.toString()), EMPTY_VAL); m5.put(new Text(s), new Text(count1.toString()), EMPTY_VAL); i = 2 * i; j = 2 * j; k = 2 * k; } mList2.add(m1); mList2.add(m2); mList2.add(m3); mList2.add(m5); mList2.add(m4); mList2.add(m6); bw2.addMutations(mList2); bw2.close(); scan = conn.createScanner("rya_selectivity", new Authorizations()); scan.setRange(new Range()); for (Map.Entry<Key, Value> entry : scan) { System.out.println("Key row string is " + entry.getKey().getRow().toString()); System.out.println("Key is " + entry.getKey()); System.out.println( "Value is " + (new String(entry.getKey().getColumnQualifier().toString()))); } TupleExpr te = getTupleExpr(q2); RdfCloudTripleStoreSelectivityEvaluationStatistics ars = new RdfCloudTripleStoreSelectivityEvaluationStatistics(arc, res, accc); QueryJoinSelectOptimizer qjs = new QueryJoinSelectOptimizer(ars, accc); System.out.println("Originial query is " + te); qjs.optimize(te, null, null); Assert.assertTrue(te.equals(getTupleExpr(Q4))); System.out.print("Optimized query is " + te); }
@Override public void visit(State state, Properties props) throws Exception { boolean userExists = SecurityHelper.getTabUserExists(state); Connector conn; try { conn = state .getInstance() .getConnector( SecurityHelper.getTabUserName(state), SecurityHelper.getTabUserPass(state)); } catch (AccumuloSecurityException ae) { if (ae.getErrorCode().equals(SecurityErrorCode.BAD_CREDENTIALS)) { if (userExists) throw new AccumuloException( "User didn't exist when they should (or worse- password mismatch)", ae); else return; } throw new AccumuloException("Unexpected exception!", ae); } String action = props.getProperty("action", "_random"); TablePermission tp; if ("_random".equalsIgnoreCase(action)) { Random r = new Random(); tp = TablePermission.values()[r.nextInt(TablePermission.values().length)]; } else { tp = TablePermission.valueOf(action); } boolean tableExists = SecurityHelper.getTableExists(state); boolean hasPerm = SecurityHelper.getTabPerm(state, SecurityHelper.getTabUserName(state), tp); String tableName = state.getString("secTableName"); switch (tp) { case READ: Authorizations auths = SecurityHelper.getUserAuths(state, SecurityHelper.getTabUserName(state)); boolean canRead = SecurityHelper.getTabPerm( state, SecurityHelper.getTabUserName(state), TablePermission.READ); try { Scanner scan = conn.createScanner( tableName, conn.securityOperations() .getUserAuthorizations(SecurityHelper.getTabUserName(state))); int seen = 0; Iterator<Entry<Key, Value>> iter = scan.iterator(); while (iter.hasNext()) { Entry<Key, Value> entry = iter.next(); Key k = entry.getKey(); seen++; if (!auths.contains(k.getColumnVisibilityData())) throw new AccumuloException( "Got data I should not be capable of seeing: " + k + " table " + tableName); } if (!canRead) throw new AccumuloException( "Was able to read when I shouldn't have had the perm with connection user " + conn.whoami() + " table " + tableName); for (Entry<String, Integer> entry : SecurityHelper.getAuthsMap(state).entrySet()) { if (auths.contains(entry.getKey().getBytes())) seen = seen - entry.getValue(); } if (seen != 0) throw new AccumuloException("Got mismatched amounts of data"); } catch (TableNotFoundException tnfe) { if (tableExists) throw new AccumuloException( "Accumulo and test suite out of sync: table " + tableName, tnfe); return; } catch (AccumuloSecurityException ae) { if (ae.getErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) { if (canRead) throw new AccumuloException( "Table read permission out of sync with Accumulo: table " + tableName, ae); else return; } throw new AccumuloException("Unexpected exception!", ae); } catch (RuntimeException re) { if (re.getCause() instanceof AccumuloSecurityException && ((AccumuloSecurityException) re.getCause()) .getErrorCode() .equals(SecurityErrorCode.PERMISSION_DENIED)) { if (canRead) throw new AccumuloException( "Table read permission out of sync with Accumulo: table " + tableName, re.getCause()); else return; } throw new AccumuloException("Unexpected exception!", re); } break; case WRITE: String key = SecurityHelper.getLastKey(state) + "1"; Mutation m = new Mutation(new Text(key)); for (String s : SecurityHelper.getAuthsArray()) { m.put(new Text(), new Text(), new ColumnVisibility(s), new Value("value".getBytes())); } BatchWriter writer; try { writer = conn.createBatchWriter(tableName, 9000l, 0l, 1); } catch (TableNotFoundException tnfe) { if (tableExists) throw new AccumuloException("Table didn't exist when it should have: " + tableName); return; } boolean works = true; try { writer.addMutation(m); } catch (MutationsRejectedException mre) { throw new AccumuloException("Mutation exception!", mre); } if (works) for (String s : SecurityHelper.getAuthsArray()) SecurityHelper.increaseAuthMap(state, s, 1); break; case BULK_IMPORT: key = SecurityHelper.getLastKey(state) + "1"; SortedSet<Key> keys = new TreeSet<Key>(); for (String s : SecurityHelper.getAuthsArray()) { Key k = new Key(key, "", "", s); keys.add(k); } Path dir = new Path("/tmp", "bulk_" + UUID.randomUUID().toString()); Path fail = new Path(dir.toString() + "_fail"); FileSystem fs = SecurityHelper.getFs(state); FileSKVWriter f = FileOperations.getInstance() .openWriter( dir + "/securityBulk." + RFile.EXTENSION, fs, fs.getConf(), AccumuloConfiguration.getDefaultConfiguration()); f.startDefaultLocalityGroup(); fs.mkdirs(fail); for (Key k : keys) f.append(k, new Value("Value".getBytes())); f.close(); try { conn.tableOperations().importDirectory(tableName, dir.toString(), fail.toString(), true); } catch (TableNotFoundException tnfe) { if (tableExists) throw new AccumuloException("Table didn't exist when it should have: " + tableName); return; } catch (AccumuloSecurityException ae) { if (ae.getErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) { if (hasPerm) throw new AccumuloException( "Bulk Import failed when it should have worked: " + tableName); return; } throw new AccumuloException("Unexpected exception!", ae); } for (String s : SecurityHelper.getAuthsArray()) SecurityHelper.increaseAuthMap(state, s, 1); fs.delete(dir, true); fs.delete(fail, true); if (!hasPerm) throw new AccumuloException( "Bulk Import succeeded when it should have failed: " + dir + " table " + tableName); break; case ALTER_TABLE: AlterTable.renameTable(conn, state, tableName, tableName + "plus", hasPerm, tableExists); break; case GRANT: props.setProperty("task", "grant"); props.setProperty("perm", "random"); props.setProperty("source", "table"); props.setProperty("target", "system"); AlterTablePerm.alter(state, props); break; case DROP_TABLE: props.setProperty("source", "table"); DropTable.dropTable(state, props); break; } }
@Test public void dataReplicatedToCorrectTable() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg); peer1Cluster.start(); try { Connector connMaster = getConnector(); Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerClusterName = "peer"; String peerUserName = "******", peerPassword = "******"; // Create local user connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers()))); String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2"; // Create tables connMaster.tableOperations().create(masterTable1); String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1); Assert.assertNotNull(masterTableId1); connMaster.tableOperations().create(masterTable2); String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2); Assert.assertNotNull(masterTableId2); connPeer.tableOperations().create(peerTable1); String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1); Assert.assertNotNull(peerTableId1); connPeer.tableOperations().create(peerTable2); String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2); Assert.assertNotNull(peerTableId2); // Grant write permission connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1); connMaster .tableOperations() .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2); // Wait for zookeeper updates (configuration) to propogate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig()); long masterTable1Records = 0l; for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable1 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); masterTable1Records++; } bw.addMutation(m); } bw.close(); // Write some data to table2 bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig()); long masterTable2Records = 0l; for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable2 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); masterTable2Records++; } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(masterTable2); while (!ReplicationTable.isOnline(connMaster)) { Thread.sleep(500); } // Restart the tserver to force a close on the WAL for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); log.info("Restarted the tserver"); // Read the data -- the tserver is back up and running Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator()); // Wait for both tables to be replicated log.info("Waiting for {} for {}", filesFor1, masterTable1); connMaster.replicationOperations().drain(masterTable1, filesFor1); log.info("Waiting for {} for {}", filesFor2, masterTable2); connMaster.replicationOperations().drain(masterTable2, filesFor2); long countTable = 0l; for (int i = 0; i < 5; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1)); } log.info("Found {} records in {}", countTable, peerTable1); if (masterTable1Records != countTable) { log.warn( "Did not find {} expected records in {}, only found {}", masterTable1Records, peerTable1, countTable); } } Assert.assertEquals(masterTable1Records, countTable); for (int i = 0; i < 5; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2)); } log.info("Found {} records in {}", countTable, peerTable2); if (masterTable2Records != countTable) { log.warn( "Did not find {} expected records in {}, only found {}", masterTable2Records, peerTable2, countTable); } } Assert.assertEquals(masterTable2Records, countTable); } finally { peer1Cluster.stop(); } }
@Test public void dataReplicatedToCorrectTableWithoutDrain() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg); peer1Cluster.start(); try { Connector connMaster = getConnector(); Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerClusterName = "peer"; String peerUserName = "******"; String peerPassword = "******"; // Create a user on the peer for replication to use connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); // Configure the credentials we should use to authenticate ourselves to the peer for // replication connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers()))); String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2"; connMaster.tableOperations().create(masterTable1); String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1); Assert.assertNotNull(masterTableId1); connMaster.tableOperations().create(masterTable2); String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2); Assert.assertNotNull(masterTableId2); connPeer.tableOperations().create(peerTable1); String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1); Assert.assertNotNull(peerTableId1); connPeer.tableOperations().create(peerTable2); String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2); Assert.assertNotNull(peerTableId2); // Give our replication user the ability to write to the tables connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1); connMaster .tableOperations() .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2); // Wait for zookeeper updates (configuration) to propagate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig()); for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable1 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); // Write some data to table2 bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig()); for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable2 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); while (!ReplicationTable.isOnline(connMaster)) { Thread.sleep(500); } for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); // Wait until we fully replicated something boolean fullyReplicated = false; for (int i = 0; i < 10 && !fullyReplicated; i++) { sleepUninterruptibly(timeoutFactor * 2, TimeUnit.SECONDS); Scanner s = ReplicationTable.getScanner(connMaster); WorkSection.limit(s); for (Entry<Key, Value> entry : s) { Status status = Status.parseFrom(entry.getValue().get()); if (StatusUtil.isFullyReplicated(status)) { fullyReplicated |= true; } } } Assert.assertNotEquals(0, fullyReplicated); long countTable = 0l; // Check a few times for (int i = 0; i < 10; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1)); } log.info("Found {} records in {}", countTable, peerTable1); if (0 < countTable) { break; } Thread.sleep(2000); } Assert.assertTrue("Did not find any records in " + peerTable1 + " on peer", countTable > 0); for (int i = 0; i < 10; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2)); } log.info("Found {} records in {}", countTable, peerTable2); if (0 < countTable) { break; } Thread.sleep(2000); } Assert.assertTrue("Did not find any records in " + peerTable2 + " on peer", countTable > 0); } finally { peer1Cluster.stop(); } }
@Test public void dataWasReplicatedToThePeerWithoutDrain() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg); peerCluster.start(); Connector connMaster = getConnector(); Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerUserName = "******"; String peerPassword = "******"; // Create a user on the peer for replication to use connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); String peerClusterName = "peer"; // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peerCluster.getInstanceName(), peerCluster.getZooKeepers()))); // Configure the credentials we should use to authenticate ourselves to the peer for replication connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); String masterTable = "master", peerTable = "peer"; connMaster.tableOperations().create(masterTable); String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable); Assert.assertNotNull(masterTableId); connPeer.tableOperations().create(peerTable); String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable); Assert.assertNotNull(peerTableId); // Give our replication user the ability to write to the table connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig()); for (int rows = 0; rows < 5000; rows++) { Mutation m = new Mutation(Integer.toString(rows)); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable); for (String s : files) { log.info("Found referenced file for " + masterTable + ": " + s); } for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator()); for (Entry<Key, Value> kv : connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) { log.debug( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } connMaster.replicationOperations().drain(masterTable, files); Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY); Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator(); Assert.assertTrue("No data in master table", masterIter.hasNext()); Assert.assertTrue("No data in peer table", peerIter.hasNext()); while (masterIter.hasNext() && peerIter.hasNext()) { Entry<Key, Value> masterEntry = masterIter.next(), peerEntry = peerIter.next(); Assert.assertEquals( peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS)); Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue()); } Assert.assertFalse("Had more data to read from the master", masterIter.hasNext()); Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext()); peerCluster.stop(); }
public static Map<TreeSet<String>, Long> getDiskUsage( AccumuloConfiguration acuConf, Set<String> tableIds, VolumeManager fs, Connector conn) throws IOException { TableDiskUsage tdu = new TableDiskUsage(); // Add each tableID for (String tableId : tableIds) tdu.addTable(tableId); HashSet<String> tablesReferenced = new HashSet<String>(tableIds); HashSet<String> emptyTableIds = new HashSet<String>(); HashSet<String> nameSpacesReferenced = new HashSet<String>(); // For each table ID for (String tableId : tableIds) { Scanner mdScanner = null; try { mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY); } catch (TableNotFoundException e) { throw new RuntimeException(e); } mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME); mdScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange()); if (!mdScanner.iterator().hasNext()) { emptyTableIds.add(tableId); } // Read each file referenced by that table for (Entry<Key, Value> entry : mdScanner) { String file = entry.getKey().getColumnQualifier().toString(); String parts[] = file.split("/"); // the filename String uniqueName = parts[parts.length - 1]; if (file.contains(":") || file.startsWith("../")) { String ref = parts[parts.length - 3]; // Track any tables which are referenced externally by the current table if (!ref.equals(tableId)) { tablesReferenced.add(ref); } if (file.contains(":") && parts.length > 3) { List<String> base = Arrays.asList(Arrays.copyOf(parts, parts.length - 3)); nameSpacesReferenced.add(Joiner.on("/").join(base)); } } // add this file to this table tdu.linkFileAndTable(tableId, uniqueName); } } // Each table seen (provided by user, or reference by table the user provided) for (String tableId : tablesReferenced) { for (String tableDir : nameSpacesReferenced) { // Find each file and add its size FileStatus[] files = fs.globStatus(new Path(tableDir + "/" + tableId + "/*/*")); if (files != null) { for (FileStatus fileStatus : files) { // Assumes that all filenames are unique String name = fileStatus.getPath().getName(); tdu.addFileSize(name, fileStatus.getLen()); } } } } // Invert tableId->tableName HashMap<String, String> reverseTableIdMap = new HashMap<String, String>(); for (Entry<String, String> entry : conn.tableOperations().tableIdMap().entrySet()) reverseTableIdMap.put(entry.getValue(), entry.getKey()); TreeMap<TreeSet<String>, Long> usage = new TreeMap<TreeSet<String>, Long>( new Comparator<TreeSet<String>>() { @Override public int compare(TreeSet<String> o1, TreeSet<String> o2) { int len1 = o1.size(); int len2 = o2.size(); int min = Math.min(len1, len2); Iterator<String> iter1 = o1.iterator(); Iterator<String> iter2 = o2.iterator(); int count = 0; while (count < min) { String s1 = iter1.next(); String s2 = iter2.next(); int cmp = s1.compareTo(s2); if (cmp != 0) return cmp; count++; } return len1 - len2; } }); for (Entry<List<String>, Long> entry : tdu.calculateUsage().entrySet()) { TreeSet<String> tableNames = new TreeSet<String>(); // Convert size shared by each table id into size shared by each table name for (String tableId : entry.getKey()) tableNames.add(reverseTableIdMap.get(tableId)); // Make table names to shared file size usage.put(tableNames, entry.getValue()); } if (!emptyTableIds.isEmpty()) { TreeSet<String> emptyTables = new TreeSet<String>(); for (String tableId : emptyTableIds) { emptyTables.add(reverseTableIdMap.get(tableId)); } usage.put(emptyTables, 0L); } return usage; }