@Override public void update(Key k, Value v) { // SIGNAL from SingleTransposeIterator if (k.getTimestamp() % 2 != 0) return; String rStr; { ByteSequence rowData = k.getRowData(); rStr = new String(rowData.toArray(), rowData.offset(), rowData.length()); } int pos = rStr.indexOf(edgeSep); if (pos == -1) return; // this is a degree row, not an edge row. // log.debug("edge row "+rStr+" : now "+setNodesReached.toString()); String toNode = rStr.substring(pos + 1); setNodesReached.add(toNode); // if (copyDeg) { // Integer cnt = setNodesReachedCount.get(toNode); // cnt = cnt == null ? new Integer(1) : new Integer(cnt+1); // setNodesReachedCount.put(toNode, cnt); // } }
private static String normalizeEndRow(Range range) { Key endKey = range.getEndKey(); if (endKey == null) return null; String endRow = new String(endKey.getRowData().getBackingArray()); if (!range.isEndKeyInclusive()) return prevRow(endRow); else return endRow; }
private static String normalizeStartRow(Range range) { Key startKey = range.getStartKey(); if (startKey == null) return null; String startRow = new String(startKey.getRowData().getBackingArray()); if (!range.isStartKeyInclusive()) return startRow + '\0'; else return startRow; }
private static float getProgressForRange(final Range range, final Key currentKey) { if (currentKey == null) { return 0f; } if ((range.getStartKey() != null) && (range.getEndKey() != null)) { if (!range.getStartKey().equals(range.getEndKey(), PartialKey.ROW)) { // just look at the row progress return getProgressForRange( range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData()); } else if (!range.getStartKey().equals(range.getEndKey(), PartialKey.ROW_COLFAM)) { // just look at the column family progress return getProgressForRange( range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData()); } else if (!range.getStartKey().equals(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL)) { // just look at the column qualifier progress return getProgressForRange( range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData()); } } // if we can't figure it out, then claim no progress return 0f; }
public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); @SuppressWarnings("deprecation") FileSystem hadoopFs = FileUtil.getFileSystem(conf, AccumuloConfiguration.getSiteConfiguration()); FileSystem localFs = FileSystem.getLocal(conf); Opts opts = new Opts(); opts.parseArgs(PrintInfo.class.getName(), args); if (opts.files.isEmpty()) { System.err.println("No files were given"); System.exit(-1); } long countBuckets[] = new long[11]; long sizeBuckets[] = new long[countBuckets.length]; long totalSize = 0; for (String arg : opts.files) { Path path = new Path(arg); FileSystem fs = hadoopFs.exists(path) ? hadoopFs : localFs; // fall back to local CachableBlockFile.Reader _rdr = new CachableBlockFile.Reader(fs, path, conf, null, null); Reader iter = new RFile.Reader(_rdr); iter.printInfo(); System.out.println(); org.apache.accumulo.core.file.rfile.bcfile.PrintInfo.main(new String[] {arg}); if (opts.histogram || opts.dump) { iter.seek(new Range((Key) null, (Key) null), new ArrayList<ByteSequence>(), false); while (iter.hasTop()) { Key key = iter.getTopKey(); Value value = iter.getTopValue(); if (opts.dump) System.out.println(key + " -> " + value); if (opts.histogram) { long size = key.getSize() + value.getSize(); int bucket = (int) Math.log10(size); countBuckets[bucket]++; sizeBuckets[bucket] += size; totalSize += size; } iter.next(); } } iter.close(); if (opts.histogram) { System.out.println("Up to size count %-age"); for (int i = 1; i < countBuckets.length; i++) { System.out.println( String.format( "%11.0f : %10d %6.2f%%", Math.pow(10, i), countBuckets[i], sizeBuckets[i] * 100. / totalSize)); } } } }
public static Range minimizeEndKeyTimeStamp(Range range) { Range seekRange = range; if (range.getEndKey() != null && range.getEndKey().getTimestamp() != Long.MIN_VALUE) { Key seekKey = new Key(seekRange.getEndKey()); seekKey.setTimestamp(Long.MIN_VALUE); seekRange = new Range(range.getStartKey(), range.isStartKeyInclusive(), seekKey, true); } return seekRange; }
@Override protected void map(Key k, Value v, Context context) throws IOException, InterruptedException { try { if (key != null) assertEquals(key.getRow().toString(), new String(v.get())); assertEquals(k.getRow(), new Text(String.format("%09x", count + 1))); assertEquals(new String(v.get()), String.format("%09x", count)); } catch (AssertionError e) { e1 = e; } key = new Key(k); count++; }
private static Scanner getTabletLogScanner(Credentials credentials, KeyExtent extent) { String tableId = MetadataTable.ID; if (extent.isMeta()) tableId = RootTable.ID; Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId, Authorizations.EMPTY); scanner.fetchColumnFamily(LogColumnFamily.NAME); Text start = extent.getMetadataEntry(); Key endKey = new Key(start, LogColumnFamily.NAME); endKey = endKey.followingKey(PartialKey.ROW_COLFAM); scanner.setRange(new Range(new Key(start), endKey)); return scanner; }
/** Write entries to a table. */ public static void writeEntries( Connector connector, Map<Key, Value> map, String table, boolean createIfNotExist) { if (createIfNotExist && !connector.tableOperations().exists(table)) try { connector.tableOperations().create(table); } catch (AccumuloException | AccumuloSecurityException e) { log.error("trouble creating " + table, e); throw new RuntimeException(e); } catch (TableExistsException e) { log.error("crazy", e); throw new RuntimeException(e); } BatchWriterConfig bwc = new BatchWriterConfig(); BatchWriter bw; try { bw = connector.createBatchWriter(table, bwc); } catch (TableNotFoundException e) { log.error("tried to write to a non-existant table " + table, e); throw new RuntimeException(e); } try { for (Map.Entry<Key, Value> entry : map.entrySet()) { Key k = entry.getKey(); ByteSequence rowData = k.getRowData(), cfData = k.getColumnFamilyData(), cqData = k.getColumnQualifierData(); Mutation m = new Mutation(rowData.getBackingArray(), rowData.offset(), rowData.length()); m.put( cfData.getBackingArray(), cqData.getBackingArray(), k.getColumnVisibilityParsed(), entry.getValue().get()); bw.addMutation(m); } } catch (MutationsRejectedException e) { log.error("mutations rejected", e); throw new RuntimeException(e); } finally { try { bw.close(); } catch (MutationsRejectedException e) { log.error("mutations rejected while trying to close BatchWriter", e); } } }
private Value reduceMultiValue( final Key key, final Iterator<Value> iter, final Value firstValue) { final String group; try { group = new String(key.getColumnFamilyData().getBackingArray(), CommonConstants.UTF_8); } catch (final UnsupportedEncodingException e) { throw new RuntimeException(e.getMessage(), e); } ElementAggregator aggregator; Properties firstPropertySet; try { firstPropertySet = elementConverter.getPropertiesFromValue(group, firstValue); aggregator = schema.getElement(group).getAggregator(); aggregator.aggregate(firstPropertySet); while (iter.hasNext()) { aggregator.aggregate(elementConverter.getPropertiesFromValue(group, iter.next())); } } catch (final AccumuloElementConversionException e) { throw new IllegalArgumentException("Failed to get Properties from an accumulo value", e); } final Properties properties = new Properties(); aggregator.state(properties); try { return elementConverter.getValueFromProperties(group, properties); } catch (final AccumuloElementConversionException e) { throw new IllegalArgumentException("Failed to get Properties from an accumulo value", e); } }
private boolean inBounds(final Key k) { k.getRow(row); final MultiDimensionalCoordinates coordinates = indexStrategy.getCoordinatesPerDimension( new ByteArrayId(new GeowaveRowId(row.getBytes(), row.getLength()).getInsertionId())); return rangeCache.inBounds(coordinates); }
public ScanState( ClientContext context, String tableId, Authorizations authorizations, Range range, SortedSet<Column> fetchedColumns, int size, List<IterInfo> serverSideIteratorList, Map<String, Map<String, String>> serverSideIteratorOptions, boolean isolated, long readaheadThreshold, SamplerConfiguration samplerConfig, long batchTimeOut, String classLoaderContext) { this.context = context; this.authorizations = authorizations; this.classLoaderContext = classLoaderContext; columns = new ArrayList<Column>(fetchedColumns.size()); for (Column column : fetchedColumns) { columns.add(column); } this.tableId = tableId; this.range = range; Key startKey = range.getStartKey(); if (startKey == null) { startKey = new Key(); } this.startRow = startKey.getRow(); this.skipStartRow = false; this.size = size; this.serverSideIteratorList = serverSideIteratorList; this.serverSideIteratorOptions = serverSideIteratorOptions; this.isolated = isolated; this.readaheadThreshold = readaheadThreshold; this.samplerConfig = samplerConfig; this.batchTimeOut = batchTimeOut; }
@Override public boolean accept(Key k, Value v) { long ts = k.getTimestamp(); if ((hasStart && (ts < start)) || (hasEnd && (ts > end))) return false; if (hasStart && !startInclusive && ts == start) return false; if (hasEnd && !endInclusive && ts == end) return false; return true; }
@Override public void visit(State state, Environment env, Properties props) throws Exception { String[] args = new String[8]; args[0] = "-libjars"; args[1] = getMapReduceJars(); args[2] = env.getUserName(); args[3] = env.getPassword(); args[4] = state.getString("seqTableName"); args[5] = env.getInstance().getInstanceName(); args[6] = env.getConfigProperty("ZOOKEEPERS"); args[7] = args[4] + "_MR"; if (ToolRunner.run(CachedConfiguration.getInstance(), new MapRedVerifyTool(), args) != 0) { log.error("Failed to run map/red verify"); return; } Scanner outputScanner = env.getConnector().createScanner(args[7], Authorizations.EMPTY); outputScanner.setRange(new Range()); int count = 0; Key lastKey = null; for (Entry<Key, Value> entry : outputScanner) { Key current = entry.getKey(); if (lastKey != null && lastKey.getColumnFamily().equals(current.getRow())) { log.info(entry.getKey()); count++; } lastKey = current; } if (count > 1) { log.error("Gaps in output"); } log.debug("Dropping table: " + args[7]); Connector conn = env.getConnector(); conn.tableOperations().delete(args[7]); }
@Override protected HashCode hash(HashFunction hashFunction, Key k) { Hasher hasher = hashFunction.newHasher(); if (row) { putByteSquence(k.getRowData(), hasher); } if (family) { putByteSquence(k.getColumnFamilyData(), hasher); } if (qualifier) { putByteSquence(k.getColumnQualifierData(), hasher); } if (visibility) { putByteSquence(k.getColumnVisibilityData(), hasher); } return hasher.hash(); }
public static TabletLocationState createTabletLocationState(Key k, Value v) throws IOException, BadLocationStateException { final SortedMap<Key, Value> decodedRow = WholeRowIterator.decodeRow(k, v); KeyExtent extent = null; TServerInstance future = null; TServerInstance current = null; TServerInstance last = null; long lastTimestamp = 0; List<Collection<String>> walogs = new ArrayList<Collection<String>>(); boolean chopped = false; for (Entry<Key, Value> entry : decodedRow.entrySet()) { Key key = entry.getKey(); Text row = key.getRow(); Text cf = key.getColumnFamily(); Text cq = key.getColumnQualifier(); if (cf.compareTo(TabletsSection.FutureLocationColumnFamily.NAME) == 0) { TServerInstance location = new TServerInstance(entry.getValue(), cq); if (future != null) { throw new BadLocationStateException( "found two assignments for the same extent " + key.getRow() + ": " + future + " and " + location); } future = location; } else if (cf.compareTo(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) { TServerInstance location = new TServerInstance(entry.getValue(), cq); if (current != null) { throw new BadLocationStateException( "found two locations for the same extent " + key.getRow() + ": " + current + " and " + location); } current = location; } else if (cf.compareTo(LogColumnFamily.NAME) == 0) { String[] split = entry.getValue().toString().split("\\|")[0].split(";"); walogs.add(Arrays.asList(split)); } else if (cf.compareTo(TabletsSection.LastLocationColumnFamily.NAME) == 0) { if (lastTimestamp < entry.getKey().getTimestamp()) last = new TServerInstance(entry.getValue(), cq); } else if (cf.compareTo(ChoppedColumnFamily.NAME) == 0) { chopped = true; } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(cf, cq)) { extent = new KeyExtent(row, entry.getValue()); } } if (extent == null) { log.warn("No prev-row for key extent: " + decodedRow); return null; } return new TabletLocationState(extent, future, current, last, walogs, chopped); }
public void map(Key keyIn, Value valueIn, Context context) throws IOException, InterruptedException { // Only process records containing tweet text if (keyIn.getColumnFamily().equals(new Text("text"))) { int docPartition = r.nextInt(MAX_PARTITION); // Split the text into tokens String[] tokens = valueIn.toString().split(splitRegex); // Process each word and add it as a key with its tweet ID as a value for (String token : tokens) { // Omit zero length tokens and tokens only containing special characters if (token.length() != 0 && !token.matches("[\\W]*")) { // Filter some of the garbage if (!token.matches(urlRegex)) { if (token.matches(tweetRegex)) { token = token.replaceAll(tweetRegex, "$1"); } else { token = token.replaceAll(uglyTextRegex, "$1"); } } // Set the outgoing key and value keyOut.set(Integer.toString(docPartition)); String colFam = token.toLowerCase(); String colQual = keyIn.getRow().toString(); valueOut.set(colFam + ":" + colQual); // Create an output Key Value pair context.write(keyOut, valueOut); } } } }
public static Entry<Key, Value> checkColumn( Environment env, IteratorSetting iterConf, Bytes row, Column col) { Span span = Span.exact(row, col); Scanner scanner; try { // TODO reuse or share scanner scanner = env.getConnector().createScanner(env.getTable(), env.getAuthorizations()); } catch (TableNotFoundException e) { // TODO proper exception handling throw new RuntimeException(e); } scanner.setRange(SpanUtil.toRange(span)); scanner.addScanIterator(iterConf); Iterator<Entry<Key, Value>> iter = scanner.iterator(); if (iter.hasNext()) { Entry<Key, Value> entry = iter.next(); Key k = entry.getKey(); Bytes r = Bytes.of(k.getRowData().toArray()); Bytes cf = Bytes.of(k.getColumnFamilyData().toArray()); Bytes cq = Bytes.of(k.getColumnQualifierData().toArray()); Bytes cv = Bytes.of(k.getColumnVisibilityData().toArray()); if (r.equals(row) && cf.equals(col.getFamily()) && cq.equals(col.getQualifier()) && cv.equals(col.getVisibility())) { return entry; } else { throw new RuntimeException("unexpected key " + k + " " + row + " " + col); } } return null; }
public int run(String[] args) throws Exception { ClientOpts opts = new ClientOpts(); opts.parseArgs(LocalityCheck.class.getName(), args); VolumeManager fs = VolumeManagerImpl.get(); Connector connector = opts.getConnector(); Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY); scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME); scanner.fetchColumnFamily(DataFileColumnFamily.NAME); scanner.setRange(MetadataSchema.TabletsSection.getRange()); Map<String, Long> totalBlocks = new HashMap<String, Long>(); Map<String, Long> localBlocks = new HashMap<String, Long>(); ArrayList<String> files = new ArrayList<String>(); for (Entry<Key, Value> entry : scanner) { Key key = entry.getKey(); if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) { String location = entry.getValue().toString(); String[] parts = location.split(":"); String host = parts[0]; addBlocks(fs, host, files, totalBlocks, localBlocks); files.clear(); } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) { files.add(fs.getFullPath(key).toString()); } } System.out.println(" Server %local total blocks"); for (String host : totalBlocks.keySet()) { System.out.println( String.format( "%15s %5.1f %8d", host, (localBlocks.get(host) * 100.) / totalBlocks.get(host), totalBlocks.get(host))); } return 0; }
public static SortedMap<FileRef, DataFileValue> getDataFileSizes( KeyExtent extent, Credentials credentials) throws IOException { TreeMap<FileRef, DataFileValue> sizes = new TreeMap<FileRef, DataFileValue>(); Scanner mdScanner = new ScannerImpl( HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY); mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME); Text row = extent.getMetadataEntry(); VolumeManager fs = VolumeManagerImpl.get(); Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text("")); endKey = endKey.followingKey(PartialKey.ROW_COLFAM); mdScanner.setRange(new Range(new Key(row), endKey)); for (Entry<Key, Value> entry : mdScanner) { if (!entry.getKey().getRow().equals(row)) break; DataFileValue dfv = new DataFileValue(entry.getValue().get()); sizes.put(new FileRef(fs, entry.getKey()), dfv); } return sizes; }
private static AccumuloBackedGraph setupGraph( Instance instance, Connector conn, String tableName, int numEntries) { long ageOffTimeInMilliseconds = (30 * 24 * 60 * 60 * 1000L); // 30 days in milliseconds try { // Create table // (this method creates the table, removes the versioning iterator, and adds the // SetOfStatisticsCombiner iterator, // and sets the age off iterator to age data off after it is more than // ageOffTimeInMilliseconds milliseconds old). TableUtils.createTable(conn, tableName, ageOffTimeInMilliseconds); // Create numEntries edges and add to Accumulo BatchWriter writer = conn.createBatchWriter(tableName, 1000000L, 1000L, 1); for (int i = 0; i < numEntries; i++) { Edge edge = new Edge( "customer", "" + i, "product", "B", "purchase", "instore", true, visibilityString, sevenDaysBefore, sixDaysBefore); SetOfStatistics statistics = new SetOfStatistics(); statistics.addStatistic("count", new Count(i)); Key key = ConversionUtils.getKeysFromEdge(edge).getFirst(); Value value = ConversionUtils.getValueFromSetOfStatistics(statistics); Mutation m = new Mutation(key.getRow()); m.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value); writer.addMutation(m); } writer.close(); // Create Accumulo backed graph AccumuloBackedGraph graph = new AccumuloBackedGraph(conn, tableName); return graph; } catch (AccumuloException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (AccumuloSecurityException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableExistsException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableNotFoundException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } return null; }
private static <M extends Map<Key, V>, V> M transposeMapHelp(Map<Key, V> orig, M neww) { for (Map.Entry<Key, V> entry : orig.entrySet()) { Key k0 = entry.getKey(); Key k = new Key( k0.getColumnQualifier(), k0.getColumnFamily(), k0.getRow(), k0.getColumnVisibilityParsed(), k0.getTimestamp()); neww.put(k, entry.getValue()); } return neww; }
@Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { super.seek(range, columnFamilies, inclusive); if (!super.hasTop()) { return; } do { topKey = super.getTopKey(); if (!topKey.isDeleted()) { topValue = super.getTopValue(); sum += encoder.decode(super.getTopValue().get()); } super.next(); } while (super.hasTop()); topValue = new Value(encoder.encode(sum)); hasTop = true; }
private static TreeSet<RowColumn> scanAll( ClientOnDefaultTable opts, ScannerOpts scanOpts, String tableName) throws Exception { TreeSet<RowColumn> result = new TreeSet<RowColumn>(); Connector conn = opts.getConnector(); Scanner scanner = conn.createScanner(tableName, auths); scanner.setBatchSize(scanOpts.scanBatchSize); for (Entry<Key, Value> entry : scanner) { Key key = entry.getKey(); Column column = new Column( TextUtil.getBytes(key.getColumnFamily()), TextUtil.getBytes(key.getColumnQualifier()), TextUtil.getBytes(key.getColumnVisibility())); result.add(new RowColumn(key.getRow(), column, key.getTimestamp())); } return result; }
@Override public void map(Key ik, Value iv, Context context) { NNMetadata metadata = NNMetadata.inflate(iv.toString(), ik.getRow().toString()); int inputNeuronCount = BinaryUtils.toBinary(metadata.getInputMax(), new double[] {metadata.getInputMax()}, true) .length; int num = BinaryUtils.toBinary( metadata.getOutputMax(), new double[] {metadata.getOutputMax()}, false) .length; int categories = metadata.getOutputNameFields().size(); ClassificationNetworkConf conf = new ClassificationNetworkConf(); conf.setInputMax(metadata.getInputMax()); conf.setOutputMax(metadata.getOutputMax()); conf.setInputActivation(null); conf.setInputBias(true); conf.setInputNeuronCount(inputNeuronCount); conf.setHiddenActiviation(new ActivationSigmoid()); conf.setHiddenBias(true); conf.setHiddenNeuronCount(2 ^ categories); conf.setOutputActivation(new ActivationSigmoid()); conf.setOutputNeuronCount(num); conf.setNumberOfCategories(categories); // FIXME:This is bogus now conf.setBasicMLInput(this.getRandomArray(inputNeuronCount)); // FIXME:This is bogus now conf.setBasicIdealMLOutput(this.getRandomArray(num)); // FIXME:This is bogus now try { NNProcessor processor = NNProcessorFactory.getProcessorBean(conf); processor.constructNetworks(metadata.getArtifactId()); } catch (RepositoryException e) { String gripe = "Access to the Repository Services died"; log.log(Level.SEVERE, gripe, e); throw new StopMapperException(gripe, e); } }
protected ByteArrayId getPeristenceTypeName(final Key key) { if (key.getColumnFamily() != null) { return new ByteArrayId(key.getColumnFamily().getBytes()); } return null; }
protected ByteArrayId getPrimaryId(final Key key) { return new ByteArrayId(key.getRow().getBytes()); }
protected ByteArrayId getSecondaryId(final Key key) { if (key.getColumnQualifier() != null) { return new ByteArrayId(key.getColumnQualifier().getBytes()); } return null; }
public static Column convert(Key k) { Bytes f = ByteUtil.toBytes(k.getColumnFamilyData()); Bytes q = ByteUtil.toBytes(k.getColumnQualifierData()); Bytes v = ByteUtil.toBytes(k.getColumnVisibilityData()); return new Column(f, q, v); }
@Test public void visibilitySimplified() throws Exception { // Create a PCJ index within Rya. final String sparql = "SELECT ?customer ?worker ?city " + "{ " + "?customer <" + TALKS_TO + "> ?worker. " + "?worker <" + LIVES_IN + "> ?city. " + "?worker <" + WORKS_AT + "> <" + BURGER_JOINT + ">. " + "}"; final RyaClient ryaClient = AccumuloRyaClientFactory.build( new AccumuloConnectionDetails( ACCUMULO_USER, ACCUMULO_PASSWORD.toCharArray(), instanceName, zookeepers), accumuloConn); final String pcjId = ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql); // Grant the root user the "u" authorization. accumuloConn .securityOperations() .changeUserAuthorizations(ACCUMULO_USER, new Authorizations("u")); // Setup a connection to the Rya instance that uses the "u" authorizations. This ensures // any statements that are inserted will have the "u" authorization on them and that the // PCJ updating application will have to maintain visibilities. final AccumuloRdfConfiguration ryaConf = super.makeConfig(instanceName, zookeepers); ryaConf.set(ConfigUtils.CLOUDBASE_AUTHS, "u"); ryaConf.set(RdfCloudTripleStoreConfiguration.CONF_CV, "u"); Sail sail = null; RyaSailRepository ryaRepo = null; RepositoryConnection ryaConn = null; try { sail = RyaSailFactory.getInstance(ryaConf); ryaRepo = new RyaSailRepository(sail); ryaConn = ryaRepo.getConnection(); // Load a few Statements into Rya. ryaConn.add(VF.createStatement(ALICE, TALKS_TO, BOB)); ryaConn.add(VF.createStatement(BOB, LIVES_IN, HAPPYVILLE)); ryaConn.add(VF.createStatement(BOB, WORKS_AT, BURGER_JOINT)); // Wait for Fluo to finish processing. fluo.waitForObservers(); // Fetch the exported result and show that its column visibility has been simplified. final String pcjTableName = new PcjTableNameFactory().makeTableName(RYA_INSTANCE_NAME, pcjId); final Scanner scan = accumuloConn.createScanner(pcjTableName, new Authorizations("u")); scan.fetchColumnFamily(new Text("customer;worker;city")); final Entry<Key, Value> result = scan.iterator().next(); final Key key = result.getKey(); assertEquals(new Text("u"), key.getColumnVisibility()); } finally { if (ryaConn != null) { try { ryaConn.close(); } finally { } } if (ryaRepo != null) { try { ryaRepo.shutDown(); } finally { } } if (sail != null) { try { sail.shutDown(); } finally { } } } }