public static TabletLocationState createTabletLocationState(Key k, Value v) throws IOException, BadLocationStateException { final SortedMap<Key, Value> decodedRow = WholeRowIterator.decodeRow(k, v); KeyExtent extent = null; TServerInstance future = null; TServerInstance current = null; TServerInstance last = null; long lastTimestamp = 0; List<Collection<String>> walogs = new ArrayList<Collection<String>>(); boolean chopped = false; for (Entry<Key, Value> entry : decodedRow.entrySet()) { Key key = entry.getKey(); Text row = key.getRow(); Text cf = key.getColumnFamily(); Text cq = key.getColumnQualifier(); if (cf.compareTo(TabletsSection.FutureLocationColumnFamily.NAME) == 0) { TServerInstance location = new TServerInstance(entry.getValue(), cq); if (future != null) { throw new BadLocationStateException( "found two assignments for the same extent " + key.getRow() + ": " + future + " and " + location); } future = location; } else if (cf.compareTo(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) { TServerInstance location = new TServerInstance(entry.getValue(), cq); if (current != null) { throw new BadLocationStateException( "found two locations for the same extent " + key.getRow() + ": " + current + " and " + location); } current = location; } else if (cf.compareTo(LogColumnFamily.NAME) == 0) { String[] split = entry.getValue().toString().split("\\|")[0].split(";"); walogs.add(Arrays.asList(split)); } else if (cf.compareTo(TabletsSection.LastLocationColumnFamily.NAME) == 0) { if (lastTimestamp < entry.getKey().getTimestamp()) last = new TServerInstance(entry.getValue(), cq); } else if (cf.compareTo(ChoppedColumnFamily.NAME) == 0) { chopped = true; } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(cf, cq)) { extent = new KeyExtent(row, entry.getValue()); } } if (extent == null) { log.warn("No prev-row for key extent: " + decodedRow); return null; } return new TabletLocationState(extent, future, current, last, walogs, chopped); }
private static AccumuloBackedGraph setupGraph( Instance instance, Connector conn, String tableName, int numEntries) { long ageOffTimeInMilliseconds = (30 * 24 * 60 * 60 * 1000L); // 30 days in milliseconds try { // Create table // (this method creates the table, removes the versioning iterator, and adds the // SetOfStatisticsCombiner iterator, // and sets the age off iterator to age data off after it is more than // ageOffTimeInMilliseconds milliseconds old). TableUtils.createTable(conn, tableName, ageOffTimeInMilliseconds); // Create numEntries edges and add to Accumulo BatchWriter writer = conn.createBatchWriter(tableName, 1000000L, 1000L, 1); for (int i = 0; i < numEntries; i++) { Edge edge = new Edge( "customer", "" + i, "product", "B", "purchase", "instore", true, visibilityString, sevenDaysBefore, sixDaysBefore); SetOfStatistics statistics = new SetOfStatistics(); statistics.addStatistic("count", new Count(i)); Key key = ConversionUtils.getKeysFromEdge(edge).getFirst(); Value value = ConversionUtils.getValueFromSetOfStatistics(statistics); Mutation m = new Mutation(key.getRow()); m.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value); writer.addMutation(m); } writer.close(); // Create Accumulo backed graph AccumuloBackedGraph graph = new AccumuloBackedGraph(conn, tableName); return graph; } catch (AccumuloException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (AccumuloSecurityException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableExistsException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } catch (TableNotFoundException e) { fail("Failed to set up graph in Accumulo with exception: " + e); } return null; }
private static <M extends Map<Key, V>, V> M transposeMapHelp(Map<Key, V> orig, M neww) { for (Map.Entry<Key, V> entry : orig.entrySet()) { Key k0 = entry.getKey(); Key k = new Key( k0.getColumnQualifier(), k0.getColumnFamily(), k0.getRow(), k0.getColumnVisibilityParsed(), k0.getTimestamp()); neww.put(k, entry.getValue()); } return neww; }
private static TreeSet<RowColumn> scanAll( ClientOnDefaultTable opts, ScannerOpts scanOpts, String tableName) throws Exception { TreeSet<RowColumn> result = new TreeSet<RowColumn>(); Connector conn = opts.getConnector(); Scanner scanner = conn.createScanner(tableName, auths); scanner.setBatchSize(scanOpts.scanBatchSize); for (Entry<Key, Value> entry : scanner) { Key key = entry.getKey(); Column column = new Column( TextUtil.getBytes(key.getColumnFamily()), TextUtil.getBytes(key.getColumnQualifier()), TextUtil.getBytes(key.getColumnVisibility())); result.add(new RowColumn(key.getRow(), column, key.getTimestamp())); } return result; }
@Override public void visit(State state, Environment env, Properties props) throws Exception { String[] args = new String[8]; args[0] = "-libjars"; args[1] = getMapReduceJars(); args[2] = env.getUserName(); args[3] = env.getPassword(); args[4] = state.getString("seqTableName"); args[5] = env.getInstance().getInstanceName(); args[6] = env.getConfigProperty("ZOOKEEPERS"); args[7] = args[4] + "_MR"; if (ToolRunner.run(CachedConfiguration.getInstance(), new MapRedVerifyTool(), args) != 0) { log.error("Failed to run map/red verify"); return; } Scanner outputScanner = env.getConnector().createScanner(args[7], Authorizations.EMPTY); outputScanner.setRange(new Range()); int count = 0; Key lastKey = null; for (Entry<Key, Value> entry : outputScanner) { Key current = entry.getKey(); if (lastKey != null && lastKey.getColumnFamily().equals(current.getRow())) { log.info(entry.getKey()); count++; } lastKey = current; } if (count > 1) { log.error("Gaps in output"); } log.debug("Dropping table: " + args[7]); Connector conn = env.getConnector(); conn.tableOperations().delete(args[7]); }
public void map(Key keyIn, Value valueIn, Context context) throws IOException, InterruptedException { // Only process records containing tweet text if (keyIn.getColumnFamily().equals(new Text("text"))) { int docPartition = r.nextInt(MAX_PARTITION); // Split the text into tokens String[] tokens = valueIn.toString().split(splitRegex); // Process each word and add it as a key with its tweet ID as a value for (String token : tokens) { // Omit zero length tokens and tokens only containing special characters if (token.length() != 0 && !token.matches("[\\W]*")) { // Filter some of the garbage if (!token.matches(urlRegex)) { if (token.matches(tweetRegex)) { token = token.replaceAll(tweetRegex, "$1"); } else { token = token.replaceAll(uglyTextRegex, "$1"); } } // Set the outgoing key and value keyOut.set(Integer.toString(docPartition)); String colFam = token.toLowerCase(); String colQual = keyIn.getRow().toString(); valueOut.set(colFam + ":" + colQual); // Create an output Key Value pair context.write(keyOut, valueOut); } } } }
protected ByteArrayId getPeristenceTypeName(final Key key) { if (key.getColumnFamily() != null) { return new ByteArrayId(key.getColumnFamily().getBytes()); } return null; }
@Override public void map(Key key, Value val, Context output) throws IOException, InterruptedException { Mutation m = new Mutation(key.getRow()); m.put(key.getColumnFamily(), key.getColumnQualifier(), val); output.write(null, m); }
private void testAggregatingMultiplePropertySetsAcrossRowIDRange( final AccumuloStore store, final AccumuloElementConverter elementConverter) throws StoreException, AccumuloElementConversionException, RangeFactoryException { String visibilityString = "public"; try { // Create table // (this method creates the table, removes the versioning iterator, and adds the // SetOfStatisticsCombiner iterator). TableUtils.createTable(store); final Properties properties1 = new Properties(); properties1.put(AccumuloPropertyNames.COUNT, 1); final Properties properties2 = new Properties(); properties2.put(AccumuloPropertyNames.COUNT, 1); final Properties properties3 = new Properties(); properties3.put(AccumuloPropertyNames.COUNT, 2); // Create edge final Edge edge = new Edge(TestGroups.EDGE); edge.setSource("2"); edge.setDestination("1"); edge.setDirected(true); edge.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 1); edge.putProperty(AccumuloPropertyNames.PROP_1, 0); edge.putProperty(AccumuloPropertyNames.PROP_2, 0); edge.putProperty(AccumuloPropertyNames.PROP_3, 0); edge.putProperty(AccumuloPropertyNames.PROP_4, 0); final Edge edge2 = new Edge(TestGroups.EDGE); edge2.setSource("B"); edge2.setDestination("Z"); edge2.setDirected(true); edge2.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 1); edge2.putProperty(AccumuloPropertyNames.PROP_1, 1); edge2.putProperty(AccumuloPropertyNames.PROP_2, 1); edge2.putProperty(AccumuloPropertyNames.PROP_3, 1); edge2.putProperty(AccumuloPropertyNames.PROP_4, 1); final Edge edge3 = new Edge(TestGroups.EDGE); edge3.setSource("3"); edge3.setDestination("8"); edge3.setDirected(true); edge3.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 1); edge3.putProperty(AccumuloPropertyNames.PROP_1, 0); edge3.putProperty(AccumuloPropertyNames.PROP_2, 0); edge3.putProperty(AccumuloPropertyNames.PROP_3, 0); edge3.putProperty(AccumuloPropertyNames.PROP_4, 0); final Edge edge6 = new Edge("BasicEdge2"); edge6.setSource("1"); edge6.setDestination("5"); edge6.setDirected(true); edge6.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 2); edge6.putProperty(AccumuloPropertyNames.PROP_1, 0); edge6.putProperty(AccumuloPropertyNames.PROP_2, 0); edge6.putProperty(AccumuloPropertyNames.PROP_3, 0); edge6.putProperty(AccumuloPropertyNames.PROP_4, 0); final Edge edge7 = new Edge("BasicEdge2"); edge7.setSource("2"); edge7.setDestination("6"); edge7.setDirected(true); edge7.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 1); edge7.putProperty(AccumuloPropertyNames.PROP_1, 0); edge7.putProperty(AccumuloPropertyNames.PROP_2, 0); edge7.putProperty(AccumuloPropertyNames.PROP_3, 0); edge7.putProperty(AccumuloPropertyNames.PROP_4, 0); final Edge edge8 = new Edge("BasicEdge2"); edge8.setSource("4"); edge8.setDestination("8"); edge8.setDirected(true); edge8.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 2); edge8.putProperty(AccumuloPropertyNames.PROP_1, 0); edge8.putProperty(AccumuloPropertyNames.PROP_2, 0); edge8.putProperty(AccumuloPropertyNames.PROP_3, 0); edge8.putProperty(AccumuloPropertyNames.PROP_4, 0); final Edge edge9 = new Edge("BasicEdge2"); edge9.setSource("5"); edge9.setDestination("9"); edge9.setDirected(true); edge9.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 2); edge9.putProperty(AccumuloPropertyNames.PROP_1, 0); edge9.putProperty(AccumuloPropertyNames.PROP_2, 0); edge9.putProperty(AccumuloPropertyNames.PROP_3, 0); edge9.putProperty(AccumuloPropertyNames.PROP_4, 0); // Accumulo key final Key key = elementConverter.getKeysFromEdge(edge).getFirst(); final Key key2 = elementConverter.getKeysFromEdge(edge2).getFirst(); final Key key3 = elementConverter.getKeysFromEdge(edge3).getFirst(); final Key key4 = elementConverter.getKeysFromEdge(edge6).getFirst(); final Key key5 = elementConverter.getKeysFromEdge(edge7).getFirst(); final Key key6 = elementConverter.getKeysFromEdge(edge8).getFirst(); final Key key7 = elementConverter.getKeysFromEdge(edge9).getFirst(); // Accumulo values final Value value1 = elementConverter.getValueFromProperties(TestGroups.EDGE, properties1); final Value value2 = elementConverter.getValueFromProperties(TestGroups.EDGE, properties2); final Value value3 = elementConverter.getValueFromProperties(TestGroups.EDGE, properties3); final Value value4 = elementConverter.getValueFromProperties(TestGroups.EDGE_2, properties1); final Value value5 = elementConverter.getValueFromProperties(TestGroups.EDGE_2, properties2); // Create mutation final Mutation m1 = new Mutation(key.getRow()); m1.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value1); final Mutation m2 = new Mutation(key.getRow()); m2.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value2); final Mutation m3 = new Mutation(key.getRow()); m3.put( key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value3); final Mutation m4 = new Mutation(key2.getRow()); m4.put( key2.getColumnFamily(), key2.getColumnQualifier(), new ColumnVisibility(key2.getColumnVisibility()), key2.getTimestamp(), value1); final Mutation m5 = new Mutation(key.getRow()); m5.put( key3.getColumnFamily(), key3.getColumnQualifier(), new ColumnVisibility(key3.getColumnVisibility()), key3.getTimestamp(), value1); final Mutation m6 = new Mutation(key4.getRow()); m6.put( key4.getColumnFamily(), key4.getColumnQualifier(), new ColumnVisibility(key4.getColumnVisibility()), key4.getTimestamp(), value4); final Mutation m7 = new Mutation(key5.getRow()); m7.put( key5.getColumnFamily(), key5.getColumnQualifier(), new ColumnVisibility(key5.getColumnVisibility()), key5.getTimestamp(), value5); final Mutation m8 = new Mutation(key6.getRow()); m8.put( key6.getColumnFamily(), key6.getColumnQualifier(), new ColumnVisibility(key6.getColumnVisibility()), key6.getTimestamp(), value5); final Mutation m9 = new Mutation(key7.getRow()); m9.put( key7.getColumnFamily(), key7.getColumnQualifier(), new ColumnVisibility(key7.getColumnVisibility()), key7.getTimestamp(), value5); // Write mutation final BatchWriterConfig writerConfig = new BatchWriterConfig(); writerConfig.setMaxMemory(1000000L); writerConfig.setMaxLatency(1000L, TimeUnit.MILLISECONDS); writerConfig.setMaxWriteThreads(1); final BatchWriter writer = store.getConnection().createBatchWriter(store.getProperties().getTable(), writerConfig); writer.addMutation(m1); writer.addMutation(m2); writer.addMutation(m3); writer.addMutation(m4); writer.addMutation(m5); writer.addMutation(m6); writer.addMutation(m7); writer.addMutation(m8); writer.addMutation(m9); writer.close(); // Read data back and check we get one merged element final Authorizations authorizations = new Authorizations(visibilityString); final BatchScanner scanner = store .getConnection() .createBatchScanner(store.getProperties().getTable(), authorizations, 1000); try { scanner.addScanIterator( store .getKeyPackage() .getIteratorFactory() .getRowIDAggregatorIteratorSetting(store, "BasicEdge2")); } catch (IteratorSettingException e) { fail(e.getMessage()); } final RangeFactory rangeF = store.getKeyPackage().getRangeFactory(); final Range r = rangeF.getRangeFromPair( new Pair<ElementSeed>((new EntitySeed("1")), new EntitySeed("4")), new SummariseGroupOverRanges()); final Range r2 = rangeF.getRangeFromPair( new Pair<ElementSeed>((new EntitySeed("5")), new EntitySeed("5")), new SummariseGroupOverRanges()); scanner.setRanges(Arrays.asList(r, r2)); final Iterator<Map.Entry<Key, Value>> it = scanner.iterator(); Map.Entry<Key, Value> entry = it.next(); Element readEdge = elementConverter.getFullElement(entry.getKey(), entry.getValue()); Edge expectedEdge = new Edge("BasicEdge2"); expectedEdge.setSource("4"); expectedEdge.setDestination("8"); expectedEdge.setDirected(true); expectedEdge.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 5); expectedEdge.putProperty(AccumuloPropertyNames.COUNT, 3); assertEquals(expectedEdge, readEdge); assertEquals(5, readEdge.getProperty(AccumuloPropertyNames.COLUMN_QUALIFIER)); assertEquals(3, readEdge.getProperty(AccumuloPropertyNames.COUNT)); // Check we get the Result of the second provided range assertTrue(it.hasNext()); entry = it.next(); readEdge = elementConverter.getFullElement(entry.getKey(), entry.getValue()); expectedEdge = new Edge("BasicEdge2"); expectedEdge.setSource("5"); expectedEdge.setDestination("9"); expectedEdge.setDirected(true); expectedEdge.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 2); expectedEdge.putProperty(AccumuloPropertyNames.COUNT, 1); assertEquals(expectedEdge, readEdge); // Check no additional rows are found. (For a table of this size we shouldn't see this) if (it.hasNext()) { fail("Additional row found."); } } catch (AccumuloException | TableExistsException | TableNotFoundException e) { fail(this.getClass().getSimpleName() + " failed with exception: " + e); } }
public static void cloneTable( Instance instance, String srcTableId, String tableId, VolumeManager volumeManager) throws Exception { Connector conn = instance.getConnector( SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken()); BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); while (true) { try { initializeClone(srcTableId, tableId, conn, bw); // the following loop looks changes in the file that occurred during the copy.. if files // were dereferenced then they could have been GCed while (true) { int rewrites = checkClone(srcTableId, tableId, conn, bw); if (rewrites == 0) break; } bw.flush(); break; } catch (TabletIterator.TabletDeletedException tde) { // tablets were merged in the src table bw.flush(); // delete what we have cloned and try again deleteTable(tableId, false, SystemCredentials.get(), null); log.debug( "Tablets merged in table " + srcTableId + " while attempting to clone, trying again"); UtilWaitThread.sleep(100); } } // delete the clone markers and create directory entries Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY); mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange()); mscanner.fetchColumnFamily(ClonedColumnFamily.NAME); int dirCount = 0; for (Entry<Key, Value> entry : mscanner) { Key k = entry.getKey(); Mutation m = new Mutation(k.getRow()); m.putDelete(k.getColumnFamily(), k.getColumnQualifier()); String dir = volumeManager.choose(ServerConstants.getTablesDirs()) + "/" + tableId + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes())); bw.addMutation(m); } bw.close(); }
static int checkClone(String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException { TabletIterator srcIter = new TabletIterator( createCloneScanner(srcTableId, conn), new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(), true, true); TabletIterator cloneIter = new TabletIterator( createCloneScanner(tableId, conn), new KeyExtent(new Text(tableId), null, null).toMetadataRange(), true, true); if (!cloneIter.hasNext() || !srcIter.hasNext()) throw new RuntimeException( " table deleted during clone? srcTableId = " + srcTableId + " tableId=" + tableId); int rewrites = 0; while (cloneIter.hasNext()) { Map<Key, Value> cloneTablet = cloneIter.next(); Text cloneEndRow = new KeyExtent(cloneTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow(); HashSet<String> cloneFiles = new HashSet<String>(); boolean cloneSuccessful = false; for (Entry<Key, Value> entry : cloneTablet.entrySet()) { if (entry.getKey().getColumnFamily().equals(ClonedColumnFamily.NAME)) { cloneSuccessful = true; break; } } if (!cloneSuccessful) getFiles(cloneFiles, cloneTablet, null); List<Map<Key, Value>> srcTablets = new ArrayList<Map<Key, Value>>(); Map<Key, Value> srcTablet = srcIter.next(); srcTablets.add(srcTablet); Text srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow(); int cmp = compareEndRows(cloneEndRow, srcEndRow); if (cmp < 0) throw new TabletIterator.TabletDeletedException( "Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow); HashSet<String> srcFiles = new HashSet<String>(); if (!cloneSuccessful) getFiles(srcFiles, srcTablet, srcTableId); while (cmp > 0) { srcTablet = srcIter.next(); srcTablets.add(srcTablet); srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow(); cmp = compareEndRows(cloneEndRow, srcEndRow); if (cmp < 0) throw new TabletIterator.TabletDeletedException( "Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow); if (!cloneSuccessful) getFiles(srcFiles, srcTablet, srcTableId); } if (cloneSuccessful) continue; if (!srcFiles.containsAll(cloneFiles)) { // delete existing cloned tablet entry Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow()); for (Entry<Key, Value> entry : cloneTablet.entrySet()) { Key k = entry.getKey(); m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp()); } bw.addMutation(m); for (Map<Key, Value> st : srcTablets) bw.addMutation(createCloneMutation(srcTableId, tableId, st)); rewrites++; } else { // write out marker that this tablet was successfully cloned Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow()); m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes())); bw.addMutation(m); } } bw.flush(); return rewrites; }
public static void deleteTable( String tableId, boolean insertDeletes, Credentials credentials, ZooLock lock) throws AccumuloException, IOException { Scanner ms = new ScannerImpl( HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY); Text tableIdText = new Text(tableId); BatchWriter bw = new BatchWriterImpl( HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, new BatchWriterConfig() .setMaxMemory(1000000) .setMaxLatency(120000l, TimeUnit.MILLISECONDS) .setMaxWriteThreads(2)); // scan metadata for our table and delete everything we find Mutation m = null; ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange()); // insert deletes before deleting data from metadata... this makes the code fault tolerant if (insertDeletes) { ms.fetchColumnFamily(DataFileColumnFamily.NAME); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms); for (Entry<Key, Value> cell : ms) { Key key = cell.getKey(); if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) { FileRef ref = new FileRef(VolumeManagerImpl.get(), key); bw.addMutation(createDeleteMutation(tableId, ref.meta().toString())); } if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) { bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString())); } } bw.flush(); ms.clearColumns(); } for (Entry<Key, Value> cell : ms) { Key key = cell.getKey(); if (m == null) { m = new Mutation(key.getRow()); if (lock != null) putLockID(lock, m); } if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) { bw.addMutation(m); m = new Mutation(key.getRow()); if (lock != null) putLockID(lock, m); } m.putDelete(key.getColumnFamily(), key.getColumnQualifier()); } if (m != null) bw.addMutation(m); bw.close(); }