private static Put clonePut(Put put, long startId, boolean locking, byte[] columnsWritten) { Put ret = new Put(put.getRow()); Map<byte[], List<KeyValue>> families = put.getFamilyMap(); Columns cols = new Columns(columnsWritten); for (byte[] family : families.keySet()) { List<KeyValue> columns = families.get(family); Iterator<KeyValue> it = columns.iterator(); while (it.hasNext()) { KeyValue kv = it.next(); // byte[] column = DominoConst.getColumnKey(kv.getQualifier(), startId); byte[] qualifier = kv.getQualifier(); ret.add(family, qualifier, startId, kv.getValue()); cols.add(family, qualifier); } } Map<String, byte[]> attributes = put.getAttributesMap(); for (String key : attributes.keySet()) { ret.setAttribute(key, attributes.get(key)); } byte[] state = new byte[1 + Bytes.SIZEOF_LONG]; state[0] = locking ? DominoConst.S_STATEFUL_BYTE : DominoConst.S_STATELESS_BYTE; Bytes.putLong(state, 1, startId); ret.add(DominoConst.INNER_FAMILY, DominoConst.COLUMNS_COL, startId, cols.toByteArray()); ret.add(DominoConst.INNER_FAMILY, DominoConst.STATUS_COL, startId, state); return ret; }
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String s = line.toString() .replaceAll("[^a-zA-Z0-9=\"]+", " ") .replace("row", "") .replaceAll("\"", "\'") .replaceAll("'", "\\\\'") .trim(); StringTokenizer st = new StringTokenizer(s, "\'"); String rowId = st.nextToken().replaceAll("\\\\", "").replaceAll("=", "").trim(); String rowKey = st.nextToken().replace("\\", "").trim(); Put HPut = new Put(Bytes.toBytes(rowKey)); ImmutableBytesWritable HKey = new ImmutableBytesWritable(Bytes.toBytes(rowKey)); while (st.hasMoreTokens()) { String colname = st.nextToken().replaceAll("\\\\", "").replaceAll("=", "").trim(); String valname = st.nextToken().replace("\\", "").trim(); if (colname.equals("CreationDate") || colname.equals("LastEditDate") || colname.equals("LastActivityDate") || colname.equals("OwnerUserId") || colname.equals("LastEditorUserId")) { HPut.add(Bytes.toBytes("User"), Bytes.toBytes(colname), Bytes.toBytes(valname)); } else { HPut.add(Bytes.toBytes("Post"), Bytes.toBytes(colname), Bytes.toBytes(valname)); } } context.write(HKey, HPut); }
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { for (Text val : values) { treeset.add(val.toString()); } int treesize = treeset.size(); int limit = treesize - 10; Iterator itr = treeset.iterator(); while (itr.hasNext()) { String[] va; c++; if (c < 11) { va = itr.next().toString().split("!"); Put pa = new Put(va[1].getBytes()); // System.out.println(va[0]+" and "+va[1]); pa.add(Bytes.toBytes("stock"), Bytes.toBytes("volatility"), Bytes.toBytes(va[0])); context.write(new ImmutableBytesWritable(va[1].getBytes()), pa); } else if (c > limit) { va = itr.next().toString().split("!"); // System.out.println(va[0]+" and "+va[1]); Put pa = new Put(va[1].getBytes()); pa.add(Bytes.toBytes("stock"), Bytes.toBytes("volatility"), Bytes.toBytes(va[0])); context.write(new ImmutableBytesWritable(va[1].getBytes()), pa); } else { itr.next(); } } }
/** * Creates a HBase {@link Put} from a Storm {@link Tuple} * * @param tuple The {@link Tuple} * @return {@link Put} */ public Put getPutFromTuple(final Tuple tuple) { byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField)); long ts = 0; if (!tupleTimestampField.equals("")) { ts = tuple.getLongByField(tupleTimestampField); } Put p = new Put(rowKey); p.setWriteToWAL(writeToWAL); if (columnFamilies.size() > 0) { for (String cf : columnFamilies.keySet()) { byte[] cfBytes = Bytes.toBytes(cf); for (String cq : columnFamilies.get(cf)) { byte[] cqBytes = Bytes.toBytes(cq); byte[] val = Bytes.toBytes(tuple.getStringByField(cq)); if (ts > 0) { p.add(cfBytes, cqBytes, ts, val); } else { p.add(cfBytes, cqBytes, val); } } } } return p; }
public static void writeTest(String tableStr) { try { Configuration conf = HBaseConfiguration.create(); byte[] tableName = Bytes.toBytes(tableStr); HConnection hConnection = HConnectionManager.createConnection(conf); HTableInterface table = hConnection.getTable(tableName); byte[] family = f0; List<Put> puts = new ArrayList<Put>(); for (int k = 0; k < 10; k++) // 写10行数据 { byte[] rowkey = Bytes.toBytes("rowKey_" + k); Put p = new Put(rowkey); byte[] value_id = Bytes.toBytes("123456"); byte[] value_user = Bytes.toBytes("mengqinghao" + k); p.add(family, qualifier_id, value_id); p.add(family, qualifier_user, value_user); puts.add(p); } table.put(puts); System.out.println("Puts done: " + puts.size()); table.close(); // very important } catch (IOException e) { e.printStackTrace(); } }
/* * (non-Javadoc) * * @see com.hazelcast.core.MapStore#storeAll(java.util.Map) */ @Override public void storeAll(Map<String, String> pairs) { HTable table = null; try { List<Put> puts = new ArrayList<Put>(pairs.size()); for (Map.Entry<String, String> pair : pairs.entrySet()) { try { byte[] rowId = prefixDate ? IdUtil.bucketizeId(pair.getKey()) : Bytes.toBytes(pair.getKey()); Put p = new Put(rowId); if (outputFormatType == StoreFormatType.SMILE) { p.add(family, qualifier, jsonSmileConverter.convertToSmile(pair.getValue())); } else { p.add(family, qualifier, Bytes.toBytes(pair.getValue())); } puts.add(p); } catch (NumberFormatException nfe) { LOG.error("Encountered bad key: " + pair.getKey(), nfe); } } table = (HTable) pool.getTable(tableName); table.setAutoFlush(false); table.put(puts); table.flushCommits(); } catch (IOException e) { LOG.error("Error during puts", e); } finally { if (table != null) { pool.putTable(table); } } }
private static void initTableValues() throws Exception { ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES); HTableInterface hTable = services.getTable( SchemaUtil.getTableNameAsBytes( HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME, HBASE_DYNAMIC_COLUMNS)); try { // Insert rows using standard HBase mechanism with standard HBase "types" List<Row> mutations = new ArrayList<Row>(); byte[] dv = Bytes.toBytes("DV"); byte[] first = Bytes.toBytes("F"); byte[] f1v1 = Bytes.toBytes("F1V1"); byte[] f1v2 = Bytes.toBytes("F1V2"); byte[] f2v1 = Bytes.toBytes("F2V1"); byte[] f2v2 = Bytes.toBytes("F2V2"); byte[] key = Bytes.toBytes("entry1"); Put put = new Put(key); put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default")); put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first")); put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1")); put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2")); put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1")); put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2")); mutations.add(put); hTable.batch(mutations); } finally { hTable.close(); } // Create Phoenix table after HBase table was created through the native APIs // The timestamp of the table creation must be later than the timestamp of the data ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS); }
private void doPuts(HRegion region) throws IOException { LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE); for (int i = 0; i < NUM_ROWS; ++i) { byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes(); for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { Put put = new Put(key); byte[] col = Bytes.toBytes(String.valueOf(j)); byte[] value = dataGenerator.generateRandomSizeValue(key, col); if (includeTags) { Tag[] tag = new Tag[1]; tag[0] = new Tag((byte) 1, "Visibility"); KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag); put.add(kv); } else { put.add(CF_BYTES, col, value); } if (VERBOSE) { KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value); System.err.println(Strings.padFront(i + "", ' ', 4) + " " + kvPut); } region.put(put); } if (i % NUM_ROWS_PER_FLUSH == 0) { region.flushcache(); } } }
/* * (non-Javadoc) * * @see com.hazelcast.core.MapStore#store(java.lang.Object, * java.lang.Object) */ @Override public void store(String key, String value) { HTableInterface table = null; try { table = pool.getTable(tableName); try { byte[] rowId = prefixDate ? IdUtil.bucketizeId(key) : Bytes.toBytes(key); Put p = new Put(rowId); if (outputFormatType == StoreFormatType.SMILE) { p.add(family, qualifier, jsonSmileConverter.convertToSmile(value)); } else { p.add(family, qualifier, Bytes.toBytes(value)); } table.put(p); } catch (NumberFormatException nfe) { LOG.error("Encountered bad key: " + key, nfe); } } catch (IOException e) { LOG.error("Error during put", e); } finally { if (table != null) { pool.putTable(table); } } }
private Put constructRow(String[] oneLine) { String rowKey = oneLine[0].trim(); LOG.info("About to add row: " + rowKey); LOG.info("Number of lines : " + oneLine.length); LOG.info("First column value: " + oneLine[1]); Put put = new Put(Bytes.toBytes(rowKey)); for (int i = 1; i < oneLine.length; i++) { String subLine[] = oneLine[i].split(":"); LOG.info("column name: " + subLine[0]); LOG.info("column value: " + subLine[1]); if (subLine[0].trim().equals("city")) { LOG.info("About to add: " + subLine[1]); put.add(HBASE_CF, COL_CITY, Bytes.toBytes(subLine[1].trim())); } if (subLine[0].trim().equals("state")) { LOG.info("About to add: " + subLine[1]); put.add(HBASE_CF, COL_STATE, Bytes.toBytes(subLine[1].trim())); } if (subLine[0].trim().equals("country")) { LOG.info("About to add: " + subLine[1]); put.add(HBASE_CF, COL_COUNTRY, Bytes.toBytes(subLine[1].trim())); } } return put; }
@Test public void testCheckAndPutHooks() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Put p = new Put(Bytes.toBytes(0)); p.add(A, A, A); table.put(p); table.flushCommits(); p = new Put(Bytes.toBytes(0)); p.add(A, A, A); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"}, tableName, new Boolean[] {false, false, false}); table.checkAndPut(Bytes.toBytes(0), A, A, A, p); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
/** * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be * loaded during region startup. * * @throws Exception */ @BeforeClass public static void setupBeforeClass() throws Exception { conf.set( CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.coprocessor.AggregateImplementation"); util.startMiniCluster(2); HTable table = util.createTable(TEST_TABLE, TEST_FAMILY); util.createMultiRegions( util.getConfiguration(), table, TEST_FAMILY, new byte[][] {HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1], ROWS[rowSeperator2]}); /** * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1: * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2 */ for (int i = 0; i < ROWSIZE; i++) { Put put = new Put(ROWS[i]); put.setWriteToWAL(false); Long l = new Long(i); put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(l)); table.put(put); Put p2 = new Put(ROWS[i]); put.setWriteToWAL(false); p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(l)), Bytes.toBytes(l * 10)); table.put(p2); } table.close(); }
public void fillTable( String table, int startRow, int endRow, int numCols, int pad, boolean setTimestamp, boolean random, String... colfams) throws IOException { HTable tbl = new HTable(conf, table); Random rnd = new Random(); for (int row = startRow; row <= endRow; row++) { for (int col = 0; col < numCols; col++) { Put put = new Put(Bytes.toBytes("row-" + padNum(row, pad))); for (String cf : colfams) { String colName = "col-" + (padNum(col, pad)); String val = "val-" + (random ? Integer.toString(rnd.nextInt(numCols)) : padNum(row, pad) + "." + padNum(col, pad)); if (setTimestamp) { put.add(Bytes.toBytes(cf), Bytes.toBytes(colName), col, Bytes.toBytes(val)); } else { put.add(Bytes.toBytes(cf), Bytes.toBytes(colName), Bytes.toBytes(val)); } } tbl.put(put); } } tbl.close(); }
/* * 为表添加数据(适合知道有多少列族的固定表) * * @rowKey rowKey * * @tableName 表名 * * @column1 第一个列族列表 * * @value1 第一个列的值的列表 * * @column2 第二个列族列表 * * @value2 第二个列的值的列表 */ public static void addData( String rowKey, String tableName, String[] column1, String[] value1, String[] column2, String[] value2) throws IOException { Put put = new Put(Bytes.toBytes(rowKey)); // 设置rowkey HTableInterface htable = conn.getTable(tableName); HColumnDescriptor[] columnFamilies = htable .getTableDescriptor() // 获取所有的列族 .getColumnFamilies(); for (int i = 0; i < columnFamilies.length; i++) { String familyName = columnFamilies[i].getNameAsString(); // 获取列族名 if (familyName.equals("article")) { // article列族put数据 for (int j = 0; j < column1.length; j++) { put.add(Bytes.toBytes(familyName), Bytes.toBytes(column1[j]), Bytes.toBytes(value1[j])); } } if (familyName.equals("author")) { // author列族put数据 for (int j = 0; j < column2.length; j++) { put.add(Bytes.toBytes(familyName), Bytes.toBytes(column2[j]), Bytes.toBytes(value2[j])); } } } htable.put(put); // System.out.println("add data Success!"); }
@Test public void testRowMutation() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, false, false, false}); Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); Delete delete = new Delete(ROW); delete.deleteColumn(A, A); delete.deleteColumn(B, B); delete.deleteColumn(C, C); RowMutations arm = new RowMutations(ROW); arm.add(put); arm.add(delete); table.mutateRow(arm); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
private void putRecord(MetaRecord rec, long ts) throws Exception { Put put = new Put(Bytes.toBytes(rec.getId())); put.add(FAMILY, OBJECT_TYPE, ts, Bytes.toBytes(rec.getObjectType())); put.add(FAMILY, SQL, ts, Bytes.toBytes(rec.getSQL())); // System.out.println("addRecord id: " + rec.getId() + ", sql=" + rec.getSQL()); table.put(put); }
@Test public void testPreWALRestoreSkip() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip"); TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer(); ServerName sn2 = rs1.getRegionServer().getServerName(); String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName(); util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes()); while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) { Thread.sleep(100); } Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); table.put(put); table.flushCommits(); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(20000); // just to be sure that the kill has fully started. util.waitUntilAllRegionsAssigned(tableName); verifyMethodResult( SimpleRegionObserver.class, new String[] {"getCtPreWALRestore", "getCtPostWALRestore"}, tableName, new Integer[] {0, 0}); util.deleteTable(tableName); table.close(); }
/** * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have * one. * * @param table * @throws IOException */ private static void writeRows(Table table) throws IOException { final byte[] family = Bytes.toBytes(COL_FAM); final byte[] value = Bytes.toBytes("abcd"); final byte[] col1 = Bytes.toBytes(COL1); final byte[] col2 = Bytes.toBytes(COL2); final byte[] col3 = Bytes.toBytes(COMPOSITE_COLUMN); ArrayList<Put> rowsUpdate = new ArrayList<Put>(); // write few rows with two columns int i = 0; for (; i < TOTAL_ROWS - ROWS_WITH_ONE_COL; i++) { byte[] row = Bytes.toBytes("row" + i); Put put = new Put(row); put.add(family, col1, value); put.add(family, col2, value); put.add(family, col3, value); rowsUpdate.add(put); } // write few rows with only one column for (; i < TOTAL_ROWS; i++) { byte[] row = Bytes.toBytes("row" + i); Put put = new Put(row); put.add(family, col2, value); rowsUpdate.add(put); } table.put(rowsUpdate); }
private static boolean putRunEntry( dbutil db_util, String db_name, String run_id, String file_id, String type, String timestamp, String timestamp_stop, String path, String regex) throws Exception { Put run_id_put = new Put(run_id.getBytes()); run_id_put.add("d".getBytes(), file_id.getBytes(), new Long(timestamp), type.getBytes()); run_id_put.add( "d".getBytes(), (file_id + "_db_timestamp").getBytes(), new Long(timestamp), timestamp_stop.getBytes()); run_id_put.add( "d".getBytes(), (file_id + "_filename").getBytes(), new Long(timestamp), path.getBytes()); run_id_put.add( "d".getBytes(), (file_id + "_regex").getBytes(), new Long(timestamp), regex.getBytes()); db_util.doPut(db_name, run_id_put); return true; }
private static boolean putFileEntry( dbutil db_util, FileSystem fs, String db_name, String file_id, String file_path, String source) throws Exception { String all_paths = file_path; if (hasFile(db_util, fs, db_name, file_id, file_path)) { logger.debug("File already found, putFileEntry aborting"); return false; } else { Get file_id_get = new Get(file_id.getBytes()); Result file_result = db_util.doGet(db_name, file_id_get); KeyValue file_names = file_result.getColumnLatest("d".getBytes(), "filenames".getBytes()); if (file_names != null) { String paths = new String(file_names.getValue()); all_paths = paths + "\n" + file_path; } } Put file_id_put = new Put(file_id.getBytes()); file_id_put.add("d".getBytes(), "source".getBytes(), source.getBytes()); if (!source.equals("fullfile")) { file_id_put.add("d".getBytes(), "filenames".getBytes(), all_paths.getBytes()); } db_util.doPut(db_name, file_id_put); return true; }
@Test(timeout = 120000) public void testChangeTable() throws Exception { HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration()); // basic test: it should work. Put p = new Put(row); p.add(f, row, row); table.put(p); Get g = new Get(row); Result r = table.get(g); Assert.assertFalse(r.isStale()); // Add a CF, it should work. HTableDescriptor bHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName()); HColumnDescriptor hcd = new HColumnDescriptor(row); hdt.addFamily(hcd); HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.getHBaseAdmin().modifyTable(hdt.getTableName(), hdt); HTU.getHBaseAdmin().enableTable(hdt.getTableName()); HTableDescriptor nHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName()); Assert.assertEquals( "fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length); p = new Put(row); p.add(row, row, row); table.put(p); g = new Get(row); r = table.get(g); Assert.assertFalse(r.isStale()); try { SlowMeCopro.cdl.set(new CountDownLatch(1)); g = new Get(row); g.setConsistency(Consistency.TIMELINE); r = table.get(g); Assert.assertTrue(r.isStale()); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } Admin admin = HTU.getHBaseAdmin(); nHdt = admin.getTableDescriptor(hdt.getTableName()); Assert.assertEquals( "fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length); admin.disableTable(hdt.getTableName()); admin.deleteTable(hdt.getTableName()); admin.close(); }
/** * Tests that each transaction can see its own persisted writes, while not seeing writes from * other in-progress transactions. */ @Test public void testReadYourWrites() throws Exception { // In-progress tx1: started before our main transaction HTable hTable1 = new HTable(testUtil.getConfiguration(), TestBytes.table); TransactionAwareHTable txHTable1 = new TransactionAwareHTable(hTable1); TransactionContext inprogressTxContext1 = new TransactionContext(new InMemoryTxSystemClient(txManager), txHTable1); // In-progress tx2: started while our main transaction is running HTable hTable2 = new HTable(testUtil.getConfiguration(), TestBytes.table); TransactionAwareHTable txHTable2 = new TransactionAwareHTable(hTable2); TransactionContext inprogressTxContext2 = new TransactionContext(new InMemoryTxSystemClient(txManager), txHTable2); // create an in-progress write that should be ignored byte[] col2 = Bytes.toBytes("col2"); inprogressTxContext1.start(); Put putCol2 = new Put(TestBytes.row); byte[] valueCol2 = Bytes.toBytes("writing in progress"); putCol2.add(TestBytes.family, col2, valueCol2); txHTable1.put(putCol2); // start a tx and write a value to test reading in same tx transactionContext.start(); Put put = new Put(TestBytes.row); byte[] value = Bytes.toBytes("writing"); put.add(TestBytes.family, TestBytes.qualifier, value); transactionAwareHTable.put(put); // test that a write from a tx started after the first is not visible inprogressTxContext2.start(); Put put2 = new Put(TestBytes.row); byte[] value2 = Bytes.toBytes("writing2"); put2.add(TestBytes.family, TestBytes.qualifier, value2); txHTable2.put(put2); Get get = new Get(TestBytes.row); Result row = transactionAwareHTable.get(get); assertFalse(row.isEmpty()); byte[] col1Value = row.getValue(TestBytes.family, TestBytes.qualifier); Assert.assertNotNull(col1Value); Assert.assertArrayEquals(value, col1Value); // write from in-progress transaction should not be visible byte[] col2Value = row.getValue(TestBytes.family, col2); assertNull(col2Value); // commit in-progress transaction, should still not be visible inprogressTxContext1.finish(); get = new Get(TestBytes.row); row = transactionAwareHTable.get(get); assertFalse(row.isEmpty()); col2Value = row.getValue(TestBytes.family, col2); assertNull(col2Value); transactionContext.finish(); inprogressTxContext2.abort(); }
public static void createTable(String tableName) { long start = System.currentTimeMillis(); try { HTableInterface htable = conn.getTable(tableName); List<Put> putList = new ArrayList<Put>(); HColumnDescriptor[] columnFamilies = htable.getTableDescriptor().getColumnFamilies(); String[] family = {"article", "author"}; creatTable(tableName, family); String[] column1 = {"title", "content", "tag"}; String[] column2 = {"name", "nickname"}; for (int i = 1; i <= 1000000; i++) { DecimalFormat format = new DecimalFormat("00000000"); String rowKey = format.format(i); // System.out.println("==| insert"+rowKey); String[] valueOfArticle = {"title" + i, "content" + i, "tag" + i}; String[] valueOfAuthor = {"name" + i, "nickname" + i}; // addData(rowKey, tableName, column1, valueOfArticle, column2, // valueOfAuthor); Put put = new Put(Bytes.toBytes(rowKey)); // 设置rowkey for (int colIndex = 0; colIndex < columnFamilies.length; colIndex++) { String familyName = columnFamilies[colIndex].getNameAsString(); // 获取列族名 if (familyName.equals("article")) { // article列族put数据 for (int k = 0; k < column1.length; k++) { put.add( Bytes.toBytes(familyName), Bytes.toBytes(column1[k]), Bytes.toBytes(valueOfArticle[k])); } } if (familyName.equals("author")) { // author列族put数据 for (int k = 0; k < column2.length; k++) { put.add( Bytes.toBytes(familyName), Bytes.toBytes(column2[k]), Bytes.toBytes(valueOfAuthor[k])); } } } putList.add(put); if (i % 10000 == 0) { htable.put(putList); putList = new ArrayList<Put>(); } } long end = System.currentTimeMillis(); long used = end - start; System.out.println("data insert finished used " + used + " ms"); } catch (Exception e) { e.printStackTrace(); } }
@Test public void testTTL() throws Exception { TableName tableName = TableName.valueOf("testTTL"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(F).setMaxVersions(10).setTimeToLive(1); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); Table t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); long now = EnvironmentEdgeManager.currentTime(); ManualEnvironmentEdge me = new ManualEnvironmentEdge(); me.setValue(now); EnvironmentEdgeManagerTestHelper.injectEdge(me); // 2s in the past long ts = now - 2000; // Set the TTL override to 3s Put p = new Put(R); p.setAttribute("ttl", new byte[] {}); p.add(F, tableName.getName(), Bytes.toBytes(3000L)); t.put(p); p = new Put(R); p.add(F, Q, ts, Q); t.put(p); p = new Put(R); p.add(F, Q, ts + 1, Q); t.put(p); // these two should be expired but for the override // (their ts was 2s in the past) Get g = new Get(R); g.setMaxVersions(10); Result r = t.get(g); // still there? assertEquals(2, r.size()); TEST_UTIL.flush(tableName); TEST_UTIL.compact(tableName, true); g = new Get(R); g.setMaxVersions(10); r = t.get(g); // still there? assertEquals(2, r.size()); // roll time forward 2s. me.setValue(now + 2000); // now verify that data eventually does expire g = new Get(R); g.setMaxVersions(10); r = t.get(g); // should be gone now assertEquals(0, r.size()); t.close(); }
private static void put(CamelContext camelContext) { Put put = new Put(Bytes.toBytes("row1")); put.add(Bytes.toBytes("fam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); put.add(Bytes.toBytes("fam2"), Bytes.toBytes("qual2"), Bytes.toBytes("val2")); put.add(Bytes.toBytes("fam3"), Bytes.toBytes("qual3"), Bytes.toBytes("val3")); ProducerTemplate producer = camelContext.createProducerTemplate(); producer.requestBody("direct:start-put", put); }
private static Put addLocation(final Put p, final ServerName sn) { p.add( HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())); p.add( HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())); return p; }
@Test public void testRecovery() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() + ".testRecovery"); TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer(); ServerName sn2 = rs1.getRegionServer().getServerName(); String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName(); util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes()); while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) { Thread.sleep(100); } Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); table.put(put); verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete" }, tableName, new Boolean[] {false, false, true, true, true, true, false}); verifyMethodResult( SimpleRegionObserver.class, new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut"}, tableName, new Integer[] {0, 0, 1, 1}); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(1000); // Let the kill soak in. util.waitUntilAllRegionsAssigned(tableName); LOG.info("All regions assigned"); verifyMethodResult( SimpleRegionObserver.class, new String[] {"getCtPrePut", "getCtPostPut"}, tableName, new Integer[] {0, 0}); } finally { util.deleteTable(tableName); table.close(); } }
public Put addLocation(final Put p, final ServerName sn, long openSeqNum) { p.add( HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())); p.add( HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())); p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum)); return p; }
private static Put deletePut(byte[] row, long startId, byte[] columnsWritten) throws IOException { Columns cols = new Columns(columnsWritten); Put ret = new Put(row); byte[] status = new byte[1 + Bytes.SIZEOF_LONG]; status[0] = DominoConst.S_DELETE_BYTE; Bytes.putLong(status, 1, startId); ret.add(DominoConst.INNER_FAMILY, DominoConst.COLUMNS_COL, startId, cols.toByteArray()); ret.add(DominoConst.INNER_FAMILY, DominoConst.STATUS_COL, startId, status); return ret; }
@Override public void save(Blog blog) throws IOException { Put put = new Put(toBytes(blog.getUsername() + KEY_SPLIT_CHAR + convertForId(blog.getCreated()))); put.add(ENTRY_FAMILY, USER_COLUMN, toBytes(blog.getUsername())); put.add(ENTRY_FAMILY, BLOG_COLUMN, toBytes(blog.getBlogEntry())); put.add(ENTRY_FAMILY, CREATED_COLUMN, toBytes(blog.getCreated().getTime())); table.put(put); }