public static void moveMetaDeleteMarkers(Instance instance, Credentials creds) { // move old delete markers to new location, to standardize table schema between all metadata // tables byte[] EMPTY_BYTES = new byte[0]; Scanner scanner = new ScannerImpl(instance, creds, RootTable.ID, Authorizations.EMPTY); String oldDeletesPrefix = "!!~del"; Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false); scanner.setRange(oldDeletesRange); for (Entry<Key, Value> entry : scanner) { String row = entry.getKey().getRow().toString(); if (row.startsWith(oldDeletesPrefix)) { String filename = row.substring(oldDeletesPrefix.length()); // add the new entry first log.info("Moving " + filename + " marker in " + RootTable.NAME); Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename); m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES); update(creds, m, RootTable.EXTENT); // remove the old entry m = new Mutation(entry.getKey().getRow()); m.putDelete(EMPTY_BYTES, EMPTY_BYTES); update(creds, m, RootTable.OLD_EXTENT); } else { break; } } }
private static long write(Connector conn, ArrayList<byte[]> cfset, String table) throws TableNotFoundException, MutationsRejectedException { Random rand = new Random(); byte val[] = new byte[50]; BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig()); long t1 = System.currentTimeMillis(); for (int i = 0; i < 1 << 15; i++) { byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]); Mutation m = new Mutation(row); for (byte[] cf : cfset) { byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]); rand.nextBytes(val); m.put(cf, cq, val); } bw.addMutation(m); } bw.close(); long t2 = System.currentTimeMillis(); return t2 - t1; }
@Test public void test() throws Exception { Connector c = getConnector(); String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("row1"); m.put("cf", "col1", "Test"); bw.addMutation(m); bw.close(); scanCheck(c, tableName, "Test"); FileSystem fs = getCluster().getFileSystem(); Path jarPath = new Path(rootPath + "/lib/ext/Test.jar"); copyStreamToFileSystem(fs, "/TestCombinerX.jar", jarPath); sleepUninterruptibly(1, TimeUnit.SECONDS); IteratorSetting is = new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner"); Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf"))); c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan)); sleepUninterruptibly(ZOOKEEPER_PROPAGATION_TIME, TimeUnit.MILLISECONDS); scanCheck(c, tableName, "TestX"); fs.delete(jarPath, true); copyStreamToFileSystem(fs, "/TestCombinerY.jar", jarPath); sleepUninterruptibly(5, TimeUnit.SECONDS); scanCheck(c, tableName, "TestY"); fs.delete(jarPath, true); }
@Override public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException { context.setStatus("Entering"); long rowId = row.get(); if (rand == null) { // we use 3 random numbers per a row rand = new RandomGenerator(rowId * 3); } addKey(); value.clear(); // addRowId(rowId); addFiller(rowId); // New Mutation m = new Mutation(key); m.put( new Text("c"), // column family getRowIdString(rowId), // column qual new Value(value.toString().getBytes())); // data context.setStatus("About to add to accumulo"); context.write(tableName, m); context.setStatus("Added to accumulo " + key.toString()); }
public static void removeUnusedWALEntries( KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) { if (extent.isRootTablet()) { for (LogEntry entry : logEntries) { String root = getZookeeperLogLocation(); while (true) { try { IZooReaderWriter zoo = ZooReaderWriter.getInstance(); if (zoo.isLockHeld(zooLock.getLockID())) zoo.recursiveDelete(root + "/" + entry.filename, NodeMissingPolicy.SKIP); break; } catch (Exception e) { log.error(e, e); } UtilWaitThread.sleep(1000); } } } else { Mutation m = new Mutation(extent.getMetadataEntry()); for (LogEntry entry : logEntries) { m.putDelete(LogColumnFamily.NAME, new Text(entry.toString())); } update(SystemCredentials.get(), zooLock, m, extent); } }
private static Mutation createCloneMutation( String srcTableId, String tableId, Map<Key, Value> tablet) { KeyExtent ke = new KeyExtent(tablet.keySet().iterator().next().getRow(), (Text) null); Mutation m = new Mutation(KeyExtent.getMetadataEntry(new Text(tableId), ke.getEndRow())); for (Entry<Key, Value> entry : tablet.entrySet()) { if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) { String cf = entry.getKey().getColumnQualifier().toString(); if (!cf.startsWith("../") && !cf.contains(":")) cf = "../" + srcTableId + entry.getKey().getColumnQualifier(); m.put(entry.getKey().getColumnFamily(), new Text(cf), entry.getValue()); } else if (entry .getKey() .getColumnFamily() .equals(TabletsSection.CurrentLocationColumnFamily.NAME)) { m.put( TabletsSection.LastLocationColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue()); } else if (entry .getKey() .getColumnFamily() .equals(TabletsSection.LastLocationColumnFamily.NAME)) { // skip } else { m.put( entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getValue()); } } return m; }
public static void addLogEntry(Credentials credentials, LogEntry entry, ZooLock zooLock) { if (entry.extent.isRootTablet()) { String root = getZookeeperLogLocation(); while (true) { try { IZooReaderWriter zoo = ZooReaderWriter.getInstance(); if (zoo.isLockHeld(zooLock.getLockID())) { String[] parts = entry.filename.split("/"); String uniqueId = parts[parts.length - 1]; zoo.putPersistentData( root + "/" + uniqueId, entry.toBytes(), NodeExistsPolicy.OVERWRITE); } break; } catch (KeeperException e) { log.error(e, e); } catch (InterruptedException e) { log.error(e, e); } catch (IOException e) { log.error(e, e); } UtilWaitThread.sleep(1000); } } else { Mutation m = new Mutation(entry.getRow()); m.put(entry.getColumnFamily(), entry.getColumnQualifier(), entry.getValue()); update(credentials, zooLock, m, entry.extent); } }
public static void run( String instanceName, String zookeepers, AuthenticationToken rootPassword, String args[]) throws Exception { // edit this method to play with Accumulo Instance instance = new ZooKeeperInstance(instanceName, zookeepers); Connector conn = instance.getConnector("root", rootPassword); conn.tableOperations().create("foo"); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(60000l, java.util.concurrent.TimeUnit.MILLISECONDS); bwConfig.setMaxWriteThreads(3); bwConfig.setMaxMemory(50000000); BatchWriter bw = conn.createBatchWriter("foo", bwConfig); Mutation m = new Mutation("r1"); m.put("cf1", "cq1", "v1"); m.put("cf1", "cq2", "v3"); bw.addMutation(m); bw.close(); Scanner scanner = conn.createScanner("foo", Constants.NO_AUTHS); for (Entry<Key, Value> entry : scanner) { System.out.println(entry.getKey() + " " + entry.getValue()); } }
public int logManyTablets(Map<CommitSession, List<Mutation>> mutations) throws IOException { final Map<CommitSession, List<Mutation>> loggables = new HashMap<CommitSession, List<Mutation>>(mutations); for (CommitSession t : mutations.keySet()) { if (!enabled(t)) loggables.remove(t); } if (loggables.size() == 0) return -1; int seq = write( loggables.keySet(), false, new Writer() { @Override public LoggerOperation write(DfsLogger logger, int ignored) throws Exception { List<TabletMutations> copy = new ArrayList<TabletMutations>(loggables.size()); for (Entry<CommitSession, List<Mutation>> entry : loggables.entrySet()) { CommitSession cs = entry.getKey(); copy.add(new TabletMutations(cs.getLogId(), cs.getWALogSeq(), entry.getValue())); } return logger.logManyTablets(copy); } }); for (List<Mutation> entry : loggables.values()) { if (entry.size() < 1) throw new IllegalArgumentException("logManyTablets: logging empty mutation list"); for (Mutation m : entry) { logSizeEstimate.addAndGet(m.numBytes()); } } return seq; }
@Test public void test() throws Exception { Connector c = getConnector(); // make a table String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); // write to it BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("row"); m.put("cf", "cq", "value"); bw.addMutation(m); bw.close(); // create a fake _tmp file in its directory String id = c.tableOperations().tableIdMap().get(tableName); FileSystem fs = getCluster().getFileSystem(); Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp"); fs.create(tmp).close(); for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) { getCluster().killProcess(ServerType.TABLET_SERVER, tserver); } getCluster().start(); Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY); FunctionalTestUtils.count(scanner); assertFalse(fs.exists(tmp)); }
private StreamingPropertyValueRef saveStreamingPropertyValueSmall( String rowKey, Property property, byte[] data, StreamingPropertyValue propertyValue) { String dataTableRowKey = new DataTableRowKey(rowKey, property).getRowKey(); Mutation dataMutation = new Mutation(dataTableRowKey); dataMutation.put(EMPTY_TEXT, EMPTY_TEXT, new Value(data)); saveDataMutation(dataMutation); return new StreamingPropertyValueTableRef(dataTableRowKey, propertyValue, data); }
private void runMergeTest( Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception { System.out.println( "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end); conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL)); TreeSet<Text> splitSet = new TreeSet<Text>(); for (String split : splits) { splitSet.add(new Text(split)); } conn.tableOperations().addSplits(table, splitSet); BatchWriter bw = conn.createBatchWriter(table, null); HashSet<String> expected = new HashSet<String>(); for (String row : inserts) { Mutation m = new Mutation(row); m.put("cf", "cq", row); bw.addMutation(m); expected.add(row); } bw.close(); conn.tableOperations() .merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end)); Scanner scanner = conn.createScanner(table, Authorizations.EMPTY); HashSet<String> observed = new HashSet<String>(); for (Entry<Key, Value> entry : scanner) { String row = entry.getKey().getRowData().toString(); if (!observed.add(row)) { throw new Exception("Saw data twice " + table + " " + row); } } if (!observed.equals(expected)) { throw new Exception("data inconsistency " + table + " " + observed + " != " + expected); } HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table)); HashSet<Text> ess = new HashSet<Text>(); for (String es : expectedSplits) { ess.add(new Text(es)); } if (!currentSplits.equals(ess)) { throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess); } }
public static void removeScanFiles( KeyExtent extent, Set<FileRef> scanFiles, Credentials credentials, ZooLock zooLock) { Mutation m = new Mutation(extent.getMetadataEntry()); for (FileRef pathToRemove : scanFiles) m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta()); update(credentials, zooLock, m, extent); }
private static AccumuloBackedGraph setupGraph( Instance instance, Connector conn, String tableName, int numEntries) { long ageOffTimeInMilliseconds = (30 * 24 * 60 * 60 * 1000L); // 30 days in milliseconds try { // Create table // (this method creates the table, removes the versioning iterator, and adds the // SetOfStatisticsCombiner iterator, // and sets the age off iterator to age data off after it is more than // ageOffTimeInMilliseconds milliseconds old). TableUtils.createTable(conn, tableName, ageOffTimeInMilliseconds); // Create numEntries edges and add to Accumulo BatchWriter writer = conn.createBatchWriter(tableName, 1000000L, 1000L, 1); for (int i = 0; i < numEntries; i++) { Edge edge = new Edge( "customer", "" + i, "product", "B", "purchase", "instore", true, visibilityString, sevenDaysBefore, sixDaysBefore); SetOfStatistics statistics = new SetOfStatistics(); statistics.addStatistic("count", new Count(i)); Key key = ConversionUtils.getKeysFromEdge(edge).getFirst(); Value value = ConversionUtils.getValueFromSetOfStatistics(statistics); Mutation m = new Mutation(key.getRow()); m.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value); writer.addMutation(m); } writer.close(); // Create Accumulo backed graph AccumuloBackedGraph graph = new AccumuloBackedGraph(conn, tableName); return graph; } catch (AccumuloException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (AccumuloSecurityException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableExistsException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableNotFoundException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } return null; }
public static void removeBulkLoadInProgressFlag(String path) { Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path); m.putDelete(EMPTY_TEXT, EMPTY_TEXT); // new KeyExtent is only added to force update to write to the metadata table, not the root // table // because bulk loads aren't supported to the metadata table update(SystemCredentials.get(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null)); }
@Override public void visit(State state, Properties props) throws Exception { Connector conn = state.getConnector(); Random rand = (Random) state.get("rand"); @SuppressWarnings("unchecked") List<String> tableNames = (List<String>) state.get("tables"); String tableName = tableNames.get(rand.nextInt(tableNames.size())); Configuration conf = CachedConfiguration.getInstance(); FileSystem fs = FileSystem.get(conf); String bulkDir = "/tmp/concurrent_bulk/b_" + String.format("%016x", Math.abs(rand.nextLong())); fs.mkdirs(new Path(bulkDir)); fs.mkdirs(new Path(bulkDir + "_f")); try { BatchWriter bw = new RFileBatchWriter(conf, fs, bulkDir + "/file01.rf"); try { TreeSet<Long> rows = new TreeSet<Long>(); int numRows = rand.nextInt(100000); for (int i = 0; i < numRows; i++) { rows.add(Math.abs(rand.nextLong())); } for (Long row : rows) { Mutation m = new Mutation(String.format("%016x", row)); long val = Math.abs(rand.nextLong()); for (int j = 0; j < 10; j++) { m.put("cf", "cq" + j, new Value(String.format("%016x", val).getBytes())); } bw.addMutation(m); } } finally { bw.close(); } conn.tableOperations() .importDirectory(tableName, bulkDir, bulkDir + "_f", rand.nextBoolean()); log.debug("BulkImported to " + tableName); } catch (TableNotFoundException e) { log.debug("BulkImport " + tableName + " failed, doesnt exist"); } catch (TableOfflineException toe) { log.debug("BulkImport " + tableName + " failed, offline"); } finally { fs.delete(new Path(bulkDir), true); fs.delete(new Path(bulkDir + "_f"), true); } }
@Test public void testGetProtectedField() throws Exception { FileInputFormat.addInputPath(conf, new Path("unused")); BatchWriterConfig writerConf = new BatchWriterConfig(); BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf); Authorizations origAuths = con.securityOperations().getUserAuthorizations(USER); con.securityOperations() .changeUserAuthorizations(USER, new Authorizations(origAuths.toString() + ",foo")); Mutation m = new Mutation("r4"); m.put(COLUMN_FAMILY, NAME, new ColumnVisibility("foo"), new Value("frank".getBytes())); m.put(COLUMN_FAMILY, SID, new ColumnVisibility("foo"), new Value(parseIntBytes("4"))); m.put(COLUMN_FAMILY, DEGREES, new ColumnVisibility("foo"), new Value(parseDoubleBytes("60.6"))); m.put(COLUMN_FAMILY, MILLIS, new ColumnVisibility("foo"), new Value(parseLongBytes("777"))); writer.addMutation(m); writer.close(); conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo"); InputSplit[] splits = inputformat.getSplits(conf, 0); assertEquals(splits.length, 1); RecordReader<Text, AccumuloHiveRow> reader = inputformat.getRecordReader(splits[0], conf, null); Text rowId = new Text("r1"); AccumuloHiveRow row = new AccumuloHiveRow(); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes()); rowId = new Text("r2"); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes()); rowId = new Text("r3"); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes()); rowId = new Text("r4"); assertTrue(reader.next(rowId, row)); assertEquals(row.getRowId(), rowId.toString()); assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "frank".getBytes()); assertFalse(reader.next(rowId, row)); }
public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException { if (!pathToRemove.contains(":")) { if (pathToRemove.startsWith("../")) pathToRemove = pathToRemove.substring(2); else pathToRemove = "/" + tableId + pathToRemove; } Path path = VolumeManagerImpl.get().getFullPath(FileType.TABLE, pathToRemove); Mutation delFlag = new Mutation(new Text(MetadataSchema.DeletesSection.getRowPrefix() + path.toString())); delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {})); return delFlag; }
@Override protected void map(LongWritable location, Text value, Context context) throws IOException, InterruptedException { String parts[] = value.toString().split("\\t"); if (parts.length >= 4) { Mutation m = new Mutation(parts[0]); m.put( parts[1], String.format("%010d", Long.parseLong(parts[2])), new Value(parts[3].trim().getBytes())); context.write(null, m); } }
/** * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified * visibility, and a random value of specified size. * * @param rowid the row of the mutation * @param dataSize the size of the random value * @param visibility the visibility of the entry to insert * @return a mutation */ public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) { Text row = new Text(String.format("row_%010d", rowid)); Mutation m = new Mutation(row); // create a random value that is a function of the // row id for verification purposes byte value[] = createValue(rowid, dataSize); m.put(new Text("foo"), new Text("1"), visibility, new Value(value)); return m; }
public boolean alterElementVisibility( Mutation m, AccumuloElement element, Visibility newVisibility) { ColumnVisibility currentColumnVisibility = visibilityToAccumuloVisibility(element.getVisibility()); ColumnVisibility newColumnVisibility = visibilityToAccumuloVisibility(newVisibility); if (currentColumnVisibility.equals(newColumnVisibility)) { return false; } if (element instanceof AccumuloEdge) { AccumuloEdge edge = (AccumuloEdge) element; m.putDelete( AccumuloEdge.CF_SIGNAL, new Text(edge.getLabel()), currentColumnVisibility, currentTimeMillis()); m.put( AccumuloEdge.CF_SIGNAL, new Text(edge.getLabel()), newColumnVisibility, currentTimeMillis(), ElementMutationBuilder.EMPTY_VALUE); m.putDelete( AccumuloEdge.CF_OUT_VERTEX, new Text(edge.getVertexId(Direction.OUT)), currentColumnVisibility, currentTimeMillis()); m.put( AccumuloEdge.CF_OUT_VERTEX, new Text(edge.getVertexId(Direction.OUT)), newColumnVisibility, currentTimeMillis(), ElementMutationBuilder.EMPTY_VALUE); m.putDelete( AccumuloEdge.CF_IN_VERTEX, new Text(edge.getVertexId(Direction.IN)), currentColumnVisibility, currentTimeMillis()); m.put( AccumuloEdge.CF_IN_VERTEX, new Text(edge.getVertexId(Direction.IN)), newColumnVisibility, currentTimeMillis(), ElementMutationBuilder.EMPTY_VALUE); } else if (element instanceof AccumuloVertex) { m.putDelete( AccumuloVertex.CF_SIGNAL, EMPTY_TEXT, currentColumnVisibility, currentTimeMillis()); m.put( AccumuloVertex.CF_SIGNAL, EMPTY_TEXT, newColumnVisibility, currentTimeMillis(), ElementMutationBuilder.EMPTY_VALUE); } else { throw new IllegalArgumentException("Invalid element type: " + element); } return true; }
/** Write entries to a table. */ public static void writeEntries( Connector connector, Map<Key, Value> map, String table, boolean createIfNotExist) { if (createIfNotExist && !connector.tableOperations().exists(table)) try { connector.tableOperations().create(table); } catch (AccumuloException | AccumuloSecurityException e) { log.error("trouble creating " + table, e); throw new RuntimeException(e); } catch (TableExistsException e) { log.error("crazy", e); throw new RuntimeException(e); } BatchWriterConfig bwc = new BatchWriterConfig(); BatchWriter bw; try { bw = connector.createBatchWriter(table, bwc); } catch (TableNotFoundException e) { log.error("tried to write to a non-existant table " + table, e); throw new RuntimeException(e); } try { for (Map.Entry<Key, Value> entry : map.entrySet()) { Key k = entry.getKey(); ByteSequence rowData = k.getRowData(), cfData = k.getColumnFamilyData(), cqData = k.getColumnQualifierData(); Mutation m = new Mutation(rowData.getBackingArray(), rowData.offset(), rowData.length()); m.put( cfData.getBackingArray(), cqData.getBackingArray(), k.getColumnVisibilityParsed(), entry.getValue().get()); bw.addMutation(m); } } catch (MutationsRejectedException e) { log.error("mutations rejected", e); throw new RuntimeException(e); } finally { try { bw.close(); } catch (MutationsRejectedException e) { log.error("mutations rejected while trying to close BatchWriter", e); } } }
public static void printLogEvent( LogFileKey key, LogFileValue value, Text row, Matcher rowMatcher, KeyExtent ke, Set<Integer> tabletIds, int maxMutations) { if (ke != null) { if (key.event == LogEvents.DEFINE_TABLET) { if (key.tablet.equals(ke)) { tabletIds.add(key.tid); } else { return; } } else if (!tabletIds.contains(key.tid)) { return; } } if (row != null || rowMatcher != null) { if (key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) { boolean found = false; for (Mutation m : value.mutations) { if (row != null && new Text(m.getRow()).equals(row)) { found = true; break; } if (rowMatcher != null) { rowMatcher.reset(new String(m.getRow(), StandardCharsets.UTF_8)); if (rowMatcher.matches()) { found = true; break; } } } if (!found) return; } else { return; } } System.out.println(key); System.out.println(LogFileValue.format(value, maxMutations)); }
@Test public void test() { Mutation goodMutation = new Mutation(new Text("Row1")); goodMutation.put(new Text("Colf2"), new Text("ColQ3"), new Value("value".getBytes())); assertNull(ankc.check(null, goodMutation)); // Check that violations are in row, cf, cq order Mutation badMutation = new Mutation(new Text("Row#1")); badMutation.put(new Text("Colf$2"), new Text("Colq%3"), new Value("value".getBytes())); assertEquals( ImmutableList.of( AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ), ankc.check(null, badMutation)); }
@Override public void map(LongWritable key, Text value, Context output) throws IOException { String[] words = value.toString().split("\\s+"); for (String word : words) { Mutation mutation = new Mutation(new Text(word)); mutation.put(new Text("count"), new Text("20080906"), new Value("1".getBytes())); try { output.write(null, mutation); } catch (InterruptedException e) { log.error("Could not write to Context.", e); } } }
/** * Push a mutation into a table. If table is null, the defaultTable will be used. If {@link * AccumuloOutputFormat#canCreateTables(JobContext)} is set, the table will be created if it * does not exist. The table name must only contain alphanumerics and underscore. */ @Override public void write(Text table, Mutation mutation) throws IOException { if (table == null || table.toString().isEmpty()) table = this.defaultTableName; if (!simulate && table == null) throw new IOException("No table or default table specified. Try simulation mode next time"); ++mutCount; valCount += mutation.size(); printMutation(table, mutation); if (simulate) return; if (!bws.containsKey(table)) try { addTable(table); } catch (Exception e) { log.error("Could not add table '" + table + "'", e); throw new IOException(e); } try { bws.get(table).addMutation(mutation); } catch (MutationsRejectedException e) { throw new IOException(e); } }
public void addPropertyToMutation( AccumuloGraph graph, Mutation m, String rowKey, Property property) { Text columnQualifier = KeyHelper.getColumnQualifierFromPropertyColumnQualifier( property, getNameSubstitutionStrategy()); ColumnVisibility columnVisibility = visibilityToAccumuloVisibility(property.getVisibility()); Object propertyValue = property.getValue(); if (propertyValue instanceof StreamingPropertyValue) { propertyValue = saveStreamingPropertyValue(rowKey, property, (StreamingPropertyValue) propertyValue); } if (propertyValue instanceof DateOnly) { propertyValue = ((DateOnly) propertyValue).getDate(); } // graph can be null if this is running in Map Reduce. We can just assume the property is // already defined. if (graph != null) { graph.ensurePropertyDefined(property.getName(), propertyValue); } Value value = new Value(vertexiumSerializer.objectToBytes(propertyValue)); m.put( AccumuloElement.CF_PROPERTY, columnQualifier, columnVisibility, property.getTimestamp(), value); addPropertyMetadataToMutation(m, property); }
@Override public void readFields(DataInput in) throws IOException { super.readFields(in); // new format writes system time with the mutation if (getSerializedFormat() == SERIALIZED_FORMAT.VERSION2) systemTime = WritableUtils.readVLong(in); }
@Override public List<Short> check(Environment env, Mutation mutation) { List<ColumnUpdate> updates = mutation.getUpdates(); HashSet<String> ok = null; if (updates.size() > 1) ok = new HashSet<String>(); VisibilityEvaluator ve = null; for (ColumnUpdate update : updates) { byte[] cv = update.getColumnVisibility(); if (cv.length > 0) { String key = null; if (ok != null && ok.contains(key = new String(cv, UTF_8))) continue; try { if (ve == null) ve = new VisibilityEvaluator(env); if (!ve.evaluate(new ColumnVisibility(cv))) return Collections.singletonList(Short.valueOf((short) 2)); } catch (BadArgumentException bae) { return Collections.singletonList(new Short((short) 1)); } catch (VisibilityParseException e) { return Collections.singletonList(new Short((short) 1)); } if (ok != null) ok.add(key); } } return null; }
public synchronized void addMutation(String table, Mutation m) throws MutationsRejectedException { if (closed) throw new IllegalStateException("Closed"); if (m.size() == 0) throw new IllegalArgumentException("Can not add empty mutations"); checkForFailures(); while ((totalMemUsed >= maxMem || flushing) && !somethingFailed) { waitRTE(); } // do checks again since things could have changed while waiting and not holding lock if (closed) throw new IllegalStateException("Closed"); checkForFailures(); if (startTime == 0) { startTime = System.currentTimeMillis(); List<GarbageCollectorMXBean> gcmBeans = ManagementFactory.getGarbageCollectorMXBeans(); for (GarbageCollectorMXBean garbageCollectorMXBean : gcmBeans) { initialGCTimes += garbageCollectorMXBean.getCollectionTime(); } CompilationMXBean compMxBean = ManagementFactory.getCompilationMXBean(); if (compMxBean.isCompilationTimeMonitoringSupported()) { initialCompileTimes = compMxBean.getTotalCompilationTime(); } initialSystemLoad = ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage(); } // create a copy of mutation so that after this method returns the user // is free to reuse the mutation object, like calling readFields... this // is important for the case where a mutation is passed from map to reduce // to batch writer... the map reduce code will keep passing the same mutation // object into the reduce method m = new Mutation(m); totalMemUsed += m.estimatedMemoryUsed(); mutations.addMutation(table, m); totalAdded++; if (mutations.getMemoryUsed() >= maxMem / 2) { startProcessing(); checkForFailures(); } }