@Test(timeout = 300000) public void testDisableAndEnableTables() throws IOException { final byte[] row = Bytes.toBytes("row"); final byte[] qualifier = Bytes.toBytes("qualifier"); final byte[] value = Bytes.toBytes("value"); final TableName table1 = TableName.valueOf("testDisableAndEnableTable1"); final TableName table2 = TableName.valueOf("testDisableAndEnableTable2"); Table ht1 = TEST_UTIL.createTable(table1, HConstants.CATALOG_FAMILY); Table ht2 = TEST_UTIL.createTable(table2, HConstants.CATALOG_FAMILY); Put put = new Put(row); put.add(HConstants.CATALOG_FAMILY, qualifier, value); ht1.put(put); ht2.put(put); Get get = new Get(row); get.addColumn(HConstants.CATALOG_FAMILY, qualifier); ht1.get(get); ht2.get(get); this.admin.disableTables("testDisableAndEnableTable.*"); // Test that tables are disabled get = new Get(row); get.addColumn(HConstants.CATALOG_FAMILY, qualifier); boolean ok = false; try { ht1.get(get); ht2.get(get); } catch (org.apache.hadoop.hbase.DoNotRetryIOException e) { ok = true; } assertTrue(ok); this.admin.enableTables("testDisableAndEnableTable.*"); // Test that tables are enabled try { ht1.get(get); } catch (IOException e) { ok = false; } try { ht2.get(get); } catch (IOException e) { ok = false; } assertTrue(ok); ht1.close(); ht2.close(); }
public void putBatch(Optional<List<Request>> putRequests, boolean optimize) { if (!valid) { Logger.error("CANNOT PUT! NO VALID CONNECTION"); return; } List<Put> puts = new ArrayList<>(); if (putRequests.isPresent() && !putRequests.get().isEmpty()) { String tableName = putRequests.get().get(0).table; putRequests .get() .forEach( pr -> pr.getPut() .ifPresent( p -> { if (optimize) { p.setDurability(Durability.SKIP_WAL); } puts.add(p); })); try { final Table table = connection.getTable(TableName.valueOf(tableName)); if (optimize && table instanceof HTable) { ((HTable) table).setAutoFlush(false, true); } table.put(puts); table.close(); } catch (IOException e) { e.printStackTrace(); } } }
public void put(String tablename, Put p) { try { final Table table = connection.getTable(TableName.valueOf(tablename)); table.put(p); table.close(); } catch (IOException e) { e.printStackTrace(); } }
public static void verifyMobRowCount( final HBaseTestingUtility util, final TableName tableName, long expectedRows) throws IOException { Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName); try { assertEquals(expectedRows, countMobRows(table)); } finally { table.close(); } }
public void doScan() throws IOException { Table tableRef = connection.getTable(TableName.valueOf(table)); Scan scan = new Scan(); ResultScanner scanner = tableRef.getScanner(scan); long now = System.currentTimeMillis(); if (verbose) System.out.println("Starting scan"); for (Result res : scanner) { if (verbose) System.out.println(res); } if (verbose) System.out.printf("Scan finished: %d ms\n\n", System.currentTimeMillis() - now); tableRef.close(); }
@Test(timeout = 300000) public void testGetTableDescriptor() throws IOException { HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); HColumnDescriptor fam2 = new HColumnDescriptor("fam2"); HColumnDescriptor fam3 = new HColumnDescriptor("fam3"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("myTestTable")); htd.addFamily(fam1); htd.addFamily(fam2); htd.addFamily(fam3); this.admin.createTable(htd); Table table = new HTable(TEST_UTIL.getConfiguration(), htd.getTableName()); HTableDescriptor confirmedHtd = table.getTableDescriptor(); assertEquals(htd.compareTo(confirmedHtd), 0); table.close(); }
public void put(Optional<Request> putRequest) { if (!valid) { Logger.error("CANNOT PUT! NO VALID CONNECTION"); return; } putRequest.ifPresent( pr -> pr.getPut() .ifPresent( p -> { try { final Table table = connection.getTable(TableName.valueOf(pr.table)); table.put(p); table.close(); } catch (IOException e) { e.printStackTrace(); } })); }
@SuppressWarnings("deprecation") @Test(timeout = 300000) public void testReplicaAndReplication() throws Exception { HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication"); hdt.setRegionReplication(NB_SERVERS); HColumnDescriptor fam = new HColumnDescriptor(row); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); hdt.addFamily(fam); hdt.addCoprocessor(SlowMeCopro.class.getName()); HTU.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration()); conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1)); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); MiniZooKeeperCluster miniZK = HTU.getZkCluster(); HTU2 = new HBaseTestingUtility(conf2); HTU2.setZkCluster(miniZK); HTU2.startMiniCluster(NB_SERVERS); LOG.info("Setup second Zk"); HTU2.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration()); admin.addPeer("2", HTU2.getClusterKey()); admin.close(); Put p = new Put(row); p.add(row, row, row); final Table table = HTU.getConnection().getTable(hdt.getTableName()); table.put(p); HTU.getHBaseAdmin().flush(table.getName()); LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster."); Waiter.waitFor( HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { SlowMeCopro.cdl.set(new CountDownLatch(1)); Get g = new Get(row); g.setConsistency(Consistency.TIMELINE); Result r = table.get(g); Assert.assertTrue(r.isStale()); return !r.isEmpty(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } } }); table.close(); LOG.info("stale get on the first cluster done. Now for the second."); final Table table2 = HTU.getConnection().getTable(hdt.getTableName()); Waiter.waitFor( HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { SlowMeCopro.cdl.set(new CountDownLatch(1)); Get g = new Get(row); g.setConsistency(Consistency.TIMELINE); Result r = table2.get(g); Assert.assertTrue(r.isStale()); return !r.isEmpty(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } } }); table2.close(); HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); HTU2.getHBaseAdmin().disableTable(hdt.getTableName()); HTU2.deleteTable(hdt.getTableName()); // We shutdown HTU2 minicluster later, in afterClass(), as shutting down // the minicluster has negative impact of deleting all HConnections in JVM. }
@Test(timeout = 300000) public void testDisableAndEnableTable() throws IOException { final byte[] row = Bytes.toBytes("row"); final byte[] qualifier = Bytes.toBytes("qualifier"); final byte[] value = Bytes.toBytes("value"); final TableName table = TableName.valueOf("testDisableAndEnableTable"); Table ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY); Put put = new Put(row); put.add(HConstants.CATALOG_FAMILY, qualifier, value); ht.put(put); Get get = new Get(row); get.addColumn(HConstants.CATALOG_FAMILY, qualifier); ht.get(get); this.admin.disableTable(ht.getName()); assertTrue( "Table must be disabled.", TEST_UTIL .getHBaseCluster() .getMaster() .getAssignmentManager() .getTableStateManager() .isTableState(ht.getName(), ZooKeeperProtos.Table.State.DISABLED)); // Test that table is disabled get = new Get(row); get.addColumn(HConstants.CATALOG_FAMILY, qualifier); boolean ok = false; try { ht.get(get); } catch (TableNotEnabledException e) { ok = true; } ok = false; // verify that scan encounters correct exception Scan scan = new Scan(); try { ResultScanner scanner = ht.getScanner(scan); Result res = null; do { res = scanner.next(); } while (res != null); } catch (TableNotEnabledException e) { ok = true; } assertTrue(ok); this.admin.enableTable(table); assertTrue( "Table must be enabled.", TEST_UTIL .getHBaseCluster() .getMaster() .getAssignmentManager() .getTableStateManager() .isTableState(ht.getName(), ZooKeeperProtos.Table.State.ENABLED)); // Test that table is enabled try { ht.get(get); } catch (RetriesExhaustedException e) { ok = false; } assertTrue(ok); ht.close(); }
public static List<KeyValue> getAllTrainInfo(Configuration config, String date) { List<KeyValue> result = new ArrayList<>(); String strJson = null; BufferedWriter writer = null; Table table = null; try (Connection connect = ConnectionFactory.createConnection(config); Admin admin = connect.getAdmin()) { TableName tablename = TableName.valueOf(TABLE_NAME); if (!admin.tableExists(tablename)) { System.out.println("Table does not exist."); return null; } table = connect.getTable(tablename); Put put = null; String start = null; String end = null; writer = new BufferedWriter(new FileWriter(new File(strConfig), true)); for (KeyValue item : lstAllProcessStation) { start = (String) item.getKey(); end = (String) item.getValue(); try { try { Thread.sleep(200); } catch (InterruptedException e1) { e1.printStackTrace(); } System.out.println("process : " + start + ":" + end); strJson = getFromAPIX(mapStationCode.get(start), mapStationCode.get(end), date); writer.write(start + ":" + end); writer.newLine(); } catch (Exception e) { System.out.println(start + ":" + end + "error"); e.printStackTrace(); break; } JSONObject jo = new JSONObject(strJson); if (jo.has("httpstatus") && (jo.getInt("httpstatus") == 200)) { JSONObject joData = jo.getJSONObject("data"); if (joData.has("flag") && joData.getBoolean("flag")) { result.add(new DefaultKeyValue(start, end)); // 插入到hbase String rowkey = start + ":" + end; put = new Put(rowkey.getBytes()); put.addColumn( CF_JSON.getBytes(), "json".getBytes(), joData.toString().getBytes("utf-8")); table.put(put); System.out.println("start " + start + "\t end " + end + "\t has ticket"); } } } } catch (IOException e) { e.printStackTrace(); } finally { if (writer != null) { try { writer.flush(); writer.close(); } catch (IOException e) { e.printStackTrace(); } } if (table != null) { try { table.close(); } catch (IOException e) { e.printStackTrace(); } } } return result; }