示例#1
0
  @Test
  public void testStartStopRow() throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1");
    final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");
    final byte[] ROW0 = Bytes.toBytesBinary("\\x01row0");
    final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1");
    final byte[] ROW2 = Bytes.toBytesBinary("\\x01row2");

    Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
    Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY);

    // put rows into the first table
    Put p = new Put(ROW0);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW1);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW2);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);

    CopyTable copy = new CopyTable();
    assertEquals(
        0,
        ToolRunner.run(
            new Configuration(TEST_UTIL.getConfiguration()),
            copy,
            new String[] {
              "--new.name=" + TABLENAME2,
              "--startrow=\\x01row1",
              "--stoprow=\\x01row2",
              TABLENAME1.getNameAsString()
            }));

    // verify the data was copied into table 2
    // row1 exist, row0, row2 do not exist
    Get g = new Get(ROW1);
    Result r = t2.get(g);
    assertEquals(1, r.size());
    assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));

    g = new Get(ROW0);
    r = t2.get(g);
    assertEquals(0, r.size());

    g = new Get(ROW2);
    r = t2.get(g);
    assertEquals(0, r.size());

    t1.close();
    t2.close();
    TEST_UTIL.deleteTable(TABLENAME1);
    TEST_UTIL.deleteTable(TABLENAME2);
  }
  @Test
  public void testTTL() throws Exception {
    TableName tableName = TableName.valueOf("testTTL");
    if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) {
      TEST_UTIL.deleteTable(tableName);
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor(F).setMaxVersions(10).setTimeToLive(1);
    desc.addFamily(hcd);
    TEST_UTIL.getHBaseAdmin().createTable(desc);
    Table t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);
    long now = EnvironmentEdgeManager.currentTime();
    ManualEnvironmentEdge me = new ManualEnvironmentEdge();
    me.setValue(now);
    EnvironmentEdgeManagerTestHelper.injectEdge(me);
    // 2s in the past
    long ts = now - 2000;
    // Set the TTL override to 3s
    Put p = new Put(R);
    p.setAttribute("ttl", new byte[] {});
    p.add(F, tableName.getName(), Bytes.toBytes(3000L));
    t.put(p);

    p = new Put(R);
    p.add(F, Q, ts, Q);
    t.put(p);
    p = new Put(R);
    p.add(F, Q, ts + 1, Q);
    t.put(p);

    // these two should be expired but for the override
    // (their ts was 2s in the past)
    Get g = new Get(R);
    g.setMaxVersions(10);
    Result r = t.get(g);
    // still there?
    assertEquals(2, r.size());

    TEST_UTIL.flush(tableName);
    TEST_UTIL.compact(tableName, true);

    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    // still there?
    assertEquals(2, r.size());

    // roll time forward 2s.
    me.setValue(now + 2000);
    // now verify that data eventually does expire
    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    // should be gone now
    assertEquals(0, r.size());
    t.close();
  }
示例#3
0
  /**
   * Import all of the ways from the parsed XML into the HBase table as a segment. Ways are OSM
   * values which consist of a list of nodes. Segments are custom values we use which represent a
   * single node and its neighbors.
   */
  private static void import_ways() {
    System.out.println("Importing ways (segments)...");
    Table segmentTable = Util.get_table("segment");
    if (segmentTable == null) {
      System.err.println("Segment table failed to load.");
      return;
    }

    int counter = 0;
    int batch = 100;
    List<Put> puts = new ArrayList<>();
    for (Way way : ways) {
      Node previousNode = null;
      for (Node node : way.getNodes()) {
        if (previousNode == null) {
          previousNode = node;
          continue;
        }
        Put p = new Put(Bytes.toBytes(previousNode.getGeohash()));
        p.addColumn(
            NODE,
            Bytes.toBytes(node.getGeohash()),
            Bytes.toBytes(String.valueOf(way.getTagsAsSerializedJSON())));
        puts.add(p);
        p = new Put(Bytes.toBytes(node.getGeohash()));
        p.addColumn(
            NODE,
            Bytes.toBytes(previousNode.getGeohash()),
            Bytes.toBytes(String.valueOf(way.getTagsAsSerializedJSON())));
        puts.add(p);
      }
      counter += 1;
      if (counter % batch == 0) {
        try {
          System.out.print("\rBatch " + counter + " / " + ways.size());
          segmentTable.put(puts);
          puts.clear();
        } catch (IOException e) {
          System.out.println("Segment put failed");
          e.printStackTrace();
        }
      }
    }
    try {
      segmentTable.put(puts);
    } catch (IOException e) {
      System.out.println("Segment put failed");
      e.printStackTrace();
    }
    System.out.println("Added all segments!");
  }
  @Test
  public void testBaseCases() throws Exception {
    TableName tableName = TableName.valueOf("baseCases");
    if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) {
      TEST_UTIL.deleteTable(tableName);
    }
    Table t = TEST_UTIL.createTable(tableName, F, 1);
    // set the version override to 2
    Put p = new Put(R);
    p.setAttribute("versions", new byte[] {});
    p.add(F, tableName.getName(), Bytes.toBytes(2));
    t.put(p);

    long now = EnvironmentEdgeManager.currentTime();

    // insert 2 versions
    p = new Put(R);
    p.add(F, Q, now, Q);
    t.put(p);
    p = new Put(R);
    p.add(F, Q, now + 1, Q);
    t.put(p);
    Get g = new Get(R);
    g.setMaxVersions(10);
    Result r = t.get(g);
    assertEquals(2, r.size());

    TEST_UTIL.flush(tableName);
    TEST_UTIL.compact(tableName, true);

    // both version are still visible even after a flush/compaction
    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    assertEquals(2, r.size());

    // insert a 3rd version
    p = new Put(R);
    p.add(F, Q, now + 2, Q);
    t.put(p);
    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    // still only two version visible
    assertEquals(2, r.size());

    t.close();
  }
  public void addData() throws IOException {
    List<Put> list = new ArrayList<Put>();
    Put put1 = new Put(("row1").getBytes());
    put1.addColumn(columnFamily1.getBytes(), "title".getBytes(), "title".getBytes());
    put1.addColumn(columnFamily1.getBytes(), "content".getBytes(), "content".getBytes());
    put1.addColumn(columnFamily2.getBytes(), "user".getBytes(), "user".getBytes());
    put1.addColumn(columnFamily2.getBytes(), "time".getBytes(), "time".getBytes());
    list.add(put1);

    Put put2 = new Put(("row2").getBytes());
    put2.addColumn(columnFamily1.getBytes(), "thumbUrl".getBytes(), "title".getBytes());
    put2.addColumn(columnFamily1.getBytes(), "author".getBytes(), "content".getBytes());
    put2.addColumn(columnFamily2.getBytes(), "age".getBytes(), "user".getBytes());
    list.add(put2);

    Put put3 = new Put(("row3").getBytes());
    put3.addColumn(columnFamily1.getBytes(), "title".getBytes(), "title".getBytes());
    put3.addColumn(columnFamily1.getBytes(), "author".getBytes(), "content".getBytes());
    put3.addColumn(columnFamily2.getBytes(), "age".getBytes(), "user".getBytes());
    put3.addColumn(columnFamily2.getBytes(), "time".getBytes(), "time".getBytes());
    list.add(put3);

    table.put(list);
    System.out.println("data add success!");
  }
示例#6
0
  /**
   * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have
   * one.
   *
   * @param table
   * @throws IOException
   */
  private static void writeRows(Table table) throws IOException {
    final byte[] family = Bytes.toBytes(COL_FAM);
    final byte[] value = Bytes.toBytes("abcd");
    final byte[] col1 = Bytes.toBytes(COL1);
    final byte[] col2 = Bytes.toBytes(COL2);
    final byte[] col3 = Bytes.toBytes(COMPOSITE_COLUMN);
    ArrayList<Put> rowsUpdate = new ArrayList<Put>();
    // write few rows with two columns
    int i = 0;
    for (; i < TOTAL_ROWS - ROWS_WITH_ONE_COL; i++) {
      byte[] row = Bytes.toBytes("row" + i);
      Put put = new Put(row);
      put.add(family, col1, value);
      put.add(family, col2, value);
      put.add(family, col3, value);
      rowsUpdate.add(put);
    }

    // write few rows with only one column
    for (; i < TOTAL_ROWS; i++) {
      byte[] row = Bytes.toBytes("row" + i);
      Put put = new Put(row);
      put.add(family, col2, value);
      rowsUpdate.add(put);
    }
    table.put(rowsUpdate);
  }
 private void loadData(Table table) throws IOException {
   for (int i = 0; i < ROWSIZE; i++) {
     Put put = new Put(ROWS[i]);
     put.addColumn(FAMILYNAME, QUALIFIER, Bytes.toBytes(i));
     table.put(put);
   }
 }
  @Before
  public void setUp() throws Exception {
    TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
    try (Table table =
        TEST_UTIL.createTable(
            tableName, new String[] {Bytes.toString(TEST_FAMILY), Bytes.toString(TEST_FAMILY_2)})) {
      TEST_UTIL.waitTableEnabled(tableName);

      List<Put> puts = new ArrayList<Put>(5);
      Put put_1 = new Put(TEST_ROW);
      put_1.addColumn(TEST_FAMILY, Q1, value1);

      Put put_2 = new Put(TEST_ROW_2);
      put_2.addColumn(TEST_FAMILY, Q2, value2);

      Put put_3 = new Put(TEST_ROW_3);
      put_3.addColumn(TEST_FAMILY_2, Q1, value1);

      puts.add(put_1);
      puts.add(put_2);
      puts.add(put_3);

      table.put(puts);
    }

    assertEquals(1, AccessControlLists.getTablePermissions(conf, tableName).size());
    try {
      assertEquals(
          1, AccessControlClient.getUserPermissions(connection, tableName.toString()).size());
    } catch (Throwable e) {
      LOG.error("Error during call of AccessControlClient.getUserPermissions. ", e);
    }
    // setupOperations();
  }
示例#9
0
  @Test(timeout = 300000)
  public void testReplicationWithCellTags() throws Exception {
    LOG.info("testSimplePutDelete");
    Put put = new Put(ROW);
    put.setAttribute("visibility", Bytes.toBytes("myTag3"));
    put.add(FAMILY, ROW, ROW);

    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable1.put(put);

    Get get = new Get(ROW);
    try {
      for (int i = 0; i < NB_RETRIES; i++) {
        if (i == NB_RETRIES - 1) {
          fail("Waited too much time for put replication");
        }
        Result res = htable2.get(get);
        if (res.size() == 0) {
          LOG.info("Row not available");
          Thread.sleep(SLEEP_TIME);
        } else {
          assertArrayEquals(res.value(), ROW);
          assertEquals(1, TestCoprocessorForTagsAtSink.tags.size());
          Tag tag = TestCoprocessorForTagsAtSink.tags.get(0);
          assertEquals(TAG_TYPE, tag.getType());
          break;
        }
      }
    } finally {
      TestCoprocessorForTagsAtSink.tags = null;
    }
  }
示例#10
0
 public void putBatch(Optional<List<Request>> putRequests, boolean optimize) {
   if (!valid) {
     Logger.error("CANNOT PUT! NO VALID CONNECTION");
     return;
   }
   List<Put> puts = new ArrayList<>();
   if (putRequests.isPresent() && !putRequests.get().isEmpty()) {
     String tableName = putRequests.get().get(0).table;
     putRequests
         .get()
         .forEach(
             pr ->
                 pr.getPut()
                     .ifPresent(
                         p -> {
                           if (optimize) {
                             p.setDurability(Durability.SKIP_WAL);
                           }
                           puts.add(p);
                         }));
     try {
       final Table table = connection.getTable(TableName.valueOf(tableName));
       if (optimize && table instanceof HTable) {
         ((HTable) table).setAutoFlush(false, true);
       }
       table.put(puts);
       table.close();
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
 }
示例#11
0
  public static void main(String[] args) throws Exception {
    conf.set("hbase.zookeeper.quorum", "hadoop271.itversity.com");
    conf.set("hbase.zookeeper.property.clientPort", "2181");

    Connection connection = ConnectionFactory.createConnection(conf);
    Table table = connection.getTable(TableName.valueOf("demo"));

    Scan scan1 = new Scan();
    ResultScanner scanner1 = table.getScanner(scan1);

    for (Result res : scanner1) {
      System.out.println(Bytes.toString(res.getRow()));
      System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes())));
      System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes())));
    }

    scanner1.close();

    Put put = new Put("3".getBytes());

    put.addColumn("cf1".getBytes(), "column1".getBytes(), "value1".getBytes());
    put.addColumn("cf1".getBytes(), "column2".getBytes(), "value2".getBytes());

    table.put(put);

    Get get = new Get("3".getBytes());
    Result getResult = table.get(get);
    System.out.println("Printing colunns for rowkey 3");
    System.out.println(Bytes.toString(getResult.getValue("cf1".getBytes(), "column1".getBytes())));
    System.out.println(Bytes.toString(getResult.getValue("cf1".getBytes(), "column2".getBytes())));

    scanner1 = table.getScanner(scan1);
    System.out.println("Before Delete");
    for (Result res : scanner1) {
      System.out.println(Bytes.toString(res.getRow()));
      System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes())));
      System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes())));
    }

    scanner1.close();

    Delete del = new Delete("3".getBytes());
    table.delete(del);

    System.out.println("After Delete");

    scanner1 = table.getScanner(scan1);

    for (Result res : scanner1) {
      System.out.println(Bytes.toString(res.getRow()));
      System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes())));
      System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes())));
    }

    scanner1.close();
    table.close();
    connection.close();
  }
示例#12
0
  /** Test copy of table from sourceTable to targetTable all rows from family a */
  @Test
  public void testRenameFamily() throws Exception {
    TableName sourceTable = TableName.valueOf("sourceTable");
    TableName targetTable = TableName.valueOf("targetTable");

    byte[][] families = {FAMILY_A, FAMILY_B};

    Table t = TEST_UTIL.createTable(sourceTable, families);
    Table t2 = TEST_UTIL.createTable(targetTable, families);
    Put p = new Put(ROW1);
    p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11"));
    p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12"));
    p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data13"));
    t.put(p);
    p = new Put(ROW2);
    p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Dat21"));
    p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data22"));
    p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23"));
    t.put(p);

    long currentTime = System.currentTimeMillis();
    String[] args =
        new String[] {
          "--new.name=" + targetTable,
          "--families=a:b",
          "--all.cells",
          "--starttime=" + (currentTime - 100000),
          "--endtime=" + (currentTime + 100000),
          "--versions=1",
          sourceTable.getNameAsString()
        };
    assertNull(t2.get(new Get(ROW1)).getRow());

    assertTrue(runCopy(args));

    assertNotNull(t2.get(new Get(ROW1)).getRow());
    Result res = t2.get(new Get(ROW1));
    byte[] b1 = res.getValue(FAMILY_B, QUALIFIER);
    assertEquals("Data13", new String(b1));
    assertNotNull(t2.get(new Get(ROW2)).getRow());
    res = t2.get(new Get(ROW2));
    b1 = res.getValue(FAMILY_A, QUALIFIER);
    // Data from the family of B is not copied
    assertNull(b1);
  }
示例#13
0
 public void put(String tablename, Put p) {
   try {
     final Table table = connection.getTable(TableName.valueOf(tablename));
     table.put(p);
     table.close();
   } catch (IOException e) {
     e.printStackTrace();
   }
 }
 private void populateTable(Table tbl) throws IOException {
   byte[] values = {'A', 'B', 'C', 'D'};
   List<Put> puts = new ArrayList<>();
   for (int i = 0; i < values.length; i++) {
     for (int j = 0; j < values.length; j++) {
       Put put = new Put(new byte[] {values[i], values[j]});
       put.addColumn(Bytes.toBytes("fam"), new byte[] {}, new byte[] {values[i], values[j]});
       puts.add(put);
     }
   }
   tbl.put(puts);
 }
 public void prepareTestData() throws Exception {
   try {
     util.getHBaseAdmin().disableTable(TABLE);
     util.getHBaseAdmin().deleteTable(TABLE);
   } catch (Exception e) {
     // ignore table not found
   }
   table = util.createTable(TABLE, FAM);
   {
     Put put = new Put(ROW);
     put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A
     put.addColumn(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B
     put.addColumn(FAM, C, G); // G is a friend of C
     table.put(put);
     rowSize = put.size();
   }
   Put put = new Put(ROW2);
   put.addColumn(FAM, D, E);
   put.addColumn(FAM, F, G);
   table.put(put);
   row2Size = put.size();
 }
 static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
     throws Exception {
   List<Put> puts = new ArrayList<Put>();
   for (int i = 0; i < labelExps.length; i++) {
     Put put = new Put(Bytes.toBytes("row" + (i + 1)));
     put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO);
     put.setCellVisibility(new CellVisibility(labelExps[i]));
     puts.add(put);
   }
   Table table = TEST_UTIL.createTable(tableName, TEST_FAMILY);
   table.put(puts);
   return table;
 }
示例#17
0
  /**
   * Update a record in the database. Any field/value pairs in the specified values HashMap will be
   * written into the record with the specified record key, overwriting any existing values with the
   * same field name.
   *
   * @param table The name of the table
   * @param key The record key of the record to write
   * @param values A HashMap of field/value pairs to update in the record
   * @return Zero on success, a non-zero error code on error
   */
  @Override
  public Status update(String table, String key, HashMap<String, ByteIterator> values) {
    // if this is a "new" table, init HTable object. Else, use existing one
    if (!tableName.equals(table)) {
      currentTable = null;
      try {
        getHTable(table);
        tableName = table;
      } catch (IOException e) {
        System.err.println("Error accessing HBase table: " + e);
        return Status.ERROR;
      }
    }

    if (debug) {
      System.out.println("Setting up put for key: " + key);
    }
    Put p = new Put(Bytes.toBytes(key));
    p.setDurability(durability);
    for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
      byte[] value = entry.getValue().toArray();
      if (debug) {
        System.out.println(
            "Adding field/value "
                + entry.getKey()
                + "/"
                + Bytes.toStringBinary(value)
                + " to put request");
      }
      p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value);
    }

    try {
      if (clientSideBuffering) {
        Preconditions.checkNotNull(bufferedMutator);
        bufferedMutator.mutate(p);
      } else {
        currentTable.put(p);
      }
    } catch (IOException e) {
      if (debug) {
        System.err.println("Error doing put: " + e);
      }
      return Status.ERROR;
    } catch (ConcurrentModificationException e) {
      // do nothing for now...hope this is rare
      return Status.ERROR;
    }

    return Status.OK;
  }
示例#18
0
  private void doCopyTableTest(boolean bulkload) throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testCopyTable1");
    final TableName TABLENAME2 = TableName.valueOf("testCopyTable2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");

    try (Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
        Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); ) {
      // put rows into the first table
      for (int i = 0; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(FAMILY, COLUMN1, COLUMN1);
        t1.put(p);
      }

      CopyTable copy = new CopyTable();

      int code;
      if (bulkload) {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(),
                  "--bulkload",
                  TABLENAME1.getNameAsString()
                });
      } else {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(), TABLENAME1.getNameAsString()
                });
      }
      assertEquals("copy job failed", 0, code);

      // verify the data was copied into table 2
      for (int i = 0; i < 10; i++) {
        Get g = new Get(Bytes.toBytes("row" + i));
        Result r = t2.get(g);
        assertEquals(1, r.size());
        assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));
      }
    } finally {
      TEST_UTIL.deleteTable(TABLENAME1);
      TEST_UTIL.deleteTable(TABLENAME2);
    }
  }
  @Test(timeout = 60000)
  public void testExceptionFromCoprocessorDuringPut() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName());
    conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
    TEST_UTIL.startMiniCluster(2);
    try {
      // When we try to write to TEST_TABLE, the buggy coprocessor will
      // cause a NullPointerException, which will cause the regionserver (which
      // hosts the region we attempted to write to) to abort.
      final byte[] TEST_FAMILY = Bytes.toBytes("aaa");

      Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, TEST_FAMILY);
      TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);

      // Note which regionServer will abort (after put is attempted).
      final HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);

      try {
        final byte[] ROW = Bytes.toBytes("aaa");
        Put put = new Put(ROW);
        put.add(TEST_FAMILY, ROW, ROW);
        table.put(put);
      } catch (IOException e) {
        // The region server is going to be aborted.
        // We may get an exception if we retry,
        // which is not guaranteed.
      }

      // Wait 10 seconds for the regionserver to abort: expected result is that
      // it will abort.
      boolean aborted = false;
      for (int i = 0; i < 10; i++) {
        aborted = regionServer.isAborted();
        if (aborted) {
          break;
        }
        try {
          Thread.sleep(1000);
        } catch (InterruptedException e) {
          fail("InterruptedException while waiting for regionserver " + "zk node to be deleted.");
        }
      }
      Assert.assertTrue("The region server should have aborted", aborted);
      table.close();
    } finally {
      TEST_UTIL.shutdownMiniCluster();
    }
  }
示例#20
0
  @Test
  public void createTableTest() throws IOException, InterruptedException {
    String testName = "createTableTest";
    String nsName = prefix + "_" + testName;
    LOG.info(testName);

    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(nsName + ":my_table"));
    HColumnDescriptor colDesc = new HColumnDescriptor("my_cf");
    desc.addFamily(colDesc);
    try {
      admin.createTable(desc);
      fail("Expected no namespace exists exception");
    } catch (NamespaceNotFoundException ex) {
    }
    // create table and in new namespace
    admin.createNamespace(NamespaceDescriptor.create(nsName).build());
    admin.createTable(desc);
    TEST_UTIL.waitTableAvailable(desc.getTableName().getName(), 10000);
    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
    assertTrue(
        fs.exists(
            new Path(
                master.getMasterFileSystem().getRootDir(),
                new Path(
                    HConstants.BASE_NAMESPACE_DIR,
                    new Path(nsName, desc.getTableName().getQualifierAsString())))));
    assertEquals(1, admin.listTables().length);

    // verify non-empty namespace can't be removed
    try {
      admin.deleteNamespace(nsName);
      fail("Expected non-empty namespace constraint exception");
    } catch (Exception ex) {
      LOG.info("Caught expected exception: " + ex);
    }

    // sanity check try to write and read from table
    Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
    Put p = new Put(Bytes.toBytes("row1"));
    p.add(Bytes.toBytes("my_cf"), Bytes.toBytes("my_col"), Bytes.toBytes("value1"));
    table.put(p);
    // flush and read from disk to make sure directory changes are working
    admin.flush(desc.getTableName());
    Get g = new Get(Bytes.toBytes("row1"));
    assertTrue(table.exists(g));

    // normal case of removing namespace
    TEST_UTIL.deleteTable(desc.getTableName());
    admin.deleteNamespace(nsName);
  }
示例#21
0
  /** Import all of the nodes from the parsed XML into the HBase table as a node. */
  private static void import_nodes() {
    System.out.println("Importing nodes...");
    Table nodeTable = Util.get_table("node");
    if (nodeTable == null) {
      System.err.println("Node table failed to load.");
      return;
    }

    int counter = 0;
    int batch = 500;
    List<Put> puts = new ArrayList<>();
    for (Map.Entry<Long, Node> pair : nodes.entrySet()) {
      Node node = pair.getValue();
      Put p = new Put(Bytes.toBytes(node.getGeohash()));
      p.addColumn(DATA, TAGS, Bytes.toBytes(node.getTagsWithIDAsSerializedJSON()));
      puts.add(p);
      counter += 1;
      if (counter % batch == 0) {
        try {
          System.out.print("\rBatch " + counter + " / " + nodes.size());
          nodeTable.put(puts);
          puts.clear();
        } catch (IOException e) {
          System.err.println("Node put failed");
          e.printStackTrace();
        }
      }
    }
    try {
      nodeTable.put(puts);
      puts.clear();
    } catch (IOException e) {
      System.out.println("Node put failed");
      e.printStackTrace();
    }
    System.out.println("Added all nodes!");
  }
  public static void main(String[] args) throws IOException {

    Configuration conf = HBaseClientHelper.loadDefaultConfiguration();

    Connection connection = ConnectionFactory.createConnection(conf);

    try {

      Table table = connection.getTable(TableName.valueOf("testtable"));
      try {
        // 1 Put
        Put p = new Put(Bytes.toBytes("row1"));
        p.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));

        table.put(p);

        // 2 Get
        Get g = new Get(Bytes.toBytes("row1"));
        Result r = table.get(g);
        byte[] value = r.getValue(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"));
        String valueStr = Bytes.toString(value);
        System.out.println("GET: " + valueStr);

        // 3 Scan
        Scan s = new Scan();
        s.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"));
        ResultScanner scanner = table.getScanner(s);
        try {
          for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
            System.out.println("Found row: " + rr);
          }

          // The other approach is to use a foreach loop. Scanners are
          // iterable!
          // for (Result rr : scanner) {
          // System.out.println("Found row: " + rr);
          // }
        } finally {
          scanner.close();
        }

        // Close your table and cluster connection.
      } finally {
        if (table != null) table.close();
      }
    } finally {
      connection.close();
    }
  }
    public static void updateExistingEventsToEndAtLastLocation(
        Table VTEvent_Table, long imo, VesselLocation lastlocation) throws IOException {
      // update existing events that started BEFORE the first new location and end after the first
      // to end as the last location

      // Find existing events that started BEFORE the first new location and end after the first
      Scan getEventStartedBeforeAndEndAfter = new Scan();
      ;
      getEventStartedBeforeAndEndAfter
          .setStartRow(
              Bytes.toBytes(
                  LpadNum(imo, 7)
                      + LpadNum(Long.MAX_VALUE - lastlocation.recordtime, 19)
                      + "0000000000"))
          .setStopRow(Bytes.toBytes(LpadNum(imo, 7) + LpadNum(Long.MAX_VALUE, 19) + "9999999999"))
          .addColumn(details, exittime);
      getEventStartedBeforeAndEndAfter.setCaching(100);

      Filter ExistTimeValuefilter =
          new ValueFilter(
              CompareFilter.CompareOp.GREATER,
              new BinaryComparator(
                  Bytes.toBytes(new DateTime(lastlocation.recordtime).toString(rawformatter))));
      getEventStartedBeforeAndEndAfter.setFilter(ExistTimeValuefilter);

      ResultScanner Result_eventcross = VTEvent_Table.getScanner(getEventStartedBeforeAndEndAfter);

      List<Put> puts = new ArrayList<Put>();
      for (Result res : Result_eventcross) {

        // vessel event table
        // rowkey: imo(7)+timestamp(19 desc)+polygonid(8)
        // qualifier:entrytime,entrycoordinates,exittime,exitcoordinates,destination
        byte[] rowkey = res.getRow();
        Put updateevent = new Put(rowkey);
        updateevent.addColumn(
            details,
            exittime,
            Bytes.toBytes(new DateTime(lastlocation.recordtime).toString(rawformatter)));
        updateevent.addColumn(details, coordinates, Bytes.toBytes(lastlocation.coordinates));
        updateevent.addColumn(details, destination, Bytes.toBytes(lastlocation.destination));
        puts.add(updateevent);
      }

      Result_eventcross.close();
      VTEvent_Table.put(puts);
    }
    // populate tables
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(enabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      TableName tableName = selected.getTableName();
      try (Table table = connection.getTable(tableName)) {
        ArrayList<HRegionInfo> regionInfos =
            new ArrayList<HRegionInfo>(admin.getTableRegions(selected.getTableName()));
        int numRegions = regionInfos.size();
        // average number of rows to be added per action to each region
        int average_rows = 1;
        int numRows = average_rows * numRegions;
        LOG.info("Adding " + numRows + " rows to table: " + selected);
        for (int i = 0; i < numRows; i++) {
          // nextInt(Integer.MAX_VALUE)) to return positive numbers only
          byte[] rowKey =
              Bytes.toBytes(
                  "row-" + String.format("%010d", RandomUtils.nextInt(Integer.MAX_VALUE)));
          HColumnDescriptor cfd = selectFamily(selected);
          if (cfd == null) {
            return;
          }
          byte[] family = cfd.getName();
          byte[] qualifier = Bytes.toBytes("col-" + RandomUtils.nextInt(Integer.MAX_VALUE) % 10);
          byte[] value = Bytes.toBytes("val-" + RandomStringUtils.randomAlphanumeric(10));
          Put put = new Put(rowKey);
          put.addColumn(family, qualifier, value);
          table.put(put);
        }
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        enabledTables.put(tableName, freshTableDesc);
        LOG.info("Added " + numRows + " rows to table: " + selected);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
示例#25
0
 public void put(Optional<Request> putRequest) {
   if (!valid) {
     Logger.error("CANNOT PUT! NO VALID CONNECTION");
     return;
   }
   putRequest.ifPresent(
       pr ->
           pr.getPut()
               .ifPresent(
                   p -> {
                     try {
                       final Table table = connection.getTable(TableName.valueOf(pr.table));
                       table.put(p);
                       table.close();
                     } catch (IOException e) {
                       e.printStackTrace();
                     }
                   }));
 }
 @Test
 public void test() throws IOException, InterruptedException {
   testUtil
       .getHBaseAdmin()
       .createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
   Table table = testUtil.createTable(tableName, families);
   table.put(
       new Put(Bytes.toBytes("k")).addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
   MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
   List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
   Region region = null;
   for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
     HRegionServer hrs = rsts.get(i).getRegionServer();
     for (Region r : hrs.getOnlineRegions(tableName)) {
       region = r;
       break;
     }
   }
   assertNotNull(region);
   Thread.sleep(2000);
   RegionStoreSequenceIds ids =
       testUtil
           .getHBaseCluster()
           .getMaster()
           .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
   assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
   // This will be the sequenceid just before that of the earliest edit in memstore.
   long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
   assertTrue(storeSequenceId > 0);
   testUtil.getHBaseAdmin().flush(tableName);
   Thread.sleep(2000);
   ids =
       testUtil
           .getHBaseCluster()
           .getMaster()
           .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
   assertTrue(
       ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
       ids.getLastFlushedSequenceId() > storeSequenceId);
   assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
   table.close();
 }
  /**
   * Add by linjy on 2016-01-06
   *
   * @param totalCount 总数量
   * @param city 城市名 保存分析每天,每个城市的职位数的数量的结果到HBase 中
   */
  private void saveDayCityPCount(List<Map<String, String>> resultLists) {
    //		String dateTime = DateUtils.getYest0day("yyyyMMdd");
    try {
      Connection conn = HBaseUtils.getConnection();
      Table table = conn.getTable(TableName.valueOf("SalaryInfoResult".getBytes()));
      logger.info("保存每天,每个城市的职位数的数量的结果开始......统计结果数量:" + resultLists.size());
      List<Put> puts = new ArrayList<Put>();
      for (Map<String, String> map : resultLists) {
        String totalCount = map.get("totalcount");
        String city = map.get("city");
        String positionName = map.get("positionname");
        String createDate = map.get("createdate");
        String rowKey = createDate + getPostitionNameSX(positionName) + city;

        Put put = new Put(rowKey.getBytes());
        put.addColumn(
            "ResultInfoFamily".getBytes(), "resultCount".getBytes(), totalCount.getBytes());
        put.addColumn("ResultInfoFamily".getBytes(), "city".getBytes(), city.getBytes());
        put.addColumn(
            "ResultInfoFamily".getBytes(), "positionName".getBytes(), positionName.getBytes());
        put.addColumn(
            "ResultInfoFamily".getBytes(), "createDate".getBytes(), createDate.getBytes());
        put.addColumn(
            "ResultInfoFamily".getBytes(),
            "insertDate".getBytes(),
            (DateUtils.getDateFormat(new Date(), "yyyyMMdd")).getBytes());
        put.addColumn("ResultInfoFamily".getBytes(), "flag".getBytes(), "0".getBytes());

        puts.add(put);

        //				System.out.println(positionName + "    " + rowKey);
      }
      // 插入数据
      table.put(puts);
      // 刷新缓冲区
      table.close();
      logger.info("保存每天,每个城市的职位数的数量的结果结束......");
    } catch (Exception e) {
      e.printStackTrace();
      logger.error("出入结果数据到HBase总出错,出错原因:" + e.getMessage());
    }
  }
  public static void main(String[] args) throws IOException {
    Configuration conf = HBaseConfiguration.create();

    HBaseHelper helper = HBaseHelper.getHelper(conf);
    helper.dropTable("testtable");
    helper.createTable("testtable", "colfam1");

    Connection connection = ConnectionFactory.createConnection(conf);
    Table table = connection.getTable(TableName.valueOf("testtable"));

    List<Put> puts = new ArrayList<Put>();

    // vv PutListErrorExample2
    Put put1 = new Put(Bytes.toBytes("row1"));
    put1.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));
    puts.add(put1);
    Put put2 = new Put(Bytes.toBytes("row2"));
    put2.addColumn(Bytes.toBytes("BOGUS"), Bytes.toBytes("qual1"), Bytes.toBytes("val2"));
    puts.add(put2);
    Put put3 = new Put(Bytes.toBytes("row2"));
    put3.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val3"));
    puts.add(put3);
    /*[*/ Put put4 = new Put(Bytes.toBytes("row2"));
    puts.add(
        put4); /*]*/ // co PutListErrorExample2-1-AddErrorPut Add put with no content at all to
                     // list.

    /*[*/ try {
        /*]*/
      table.put(puts);
      /*[*/ } catch (Exception e) {
      System.err.println("Error: " + e);
      // table.flushCommits();
      // todo: FIX!
      /*]*/
      // co PutListErrorExample2-2-Catch Catch local exception and commit queued updates.
      /*[*/ } /*]*/
    // ^^ PutListErrorExample2
    table.close();
    connection.close();
    helper.close();
  }
示例#29
0
 private static void loadData(
     final Table ht, final byte[][] families, final int rows, final int flushes)
     throws IOException {
   List<Put> puts = new ArrayList<Put>(rows);
   byte[] qualifier = Bytes.toBytes("val");
   for (int i = 0; i < flushes; i++) {
     for (int k = 0; k < rows; k++) {
       byte[] row = Bytes.toBytes(random.nextLong());
       Put p = new Put(row);
       for (int j = 0; j < families.length; ++j) {
         p.add(families[j], qualifier, row);
       }
       puts.add(p);
     }
     ht.put(puts);
     ht.flushCommits();
     TEST_UTIL.flush();
     puts.clear();
   }
 }
示例#30
0
  // insert data
  public static void insert(String tableName, String key, Map<String, Map<String, String>> data) {
    TableName tn = TableName.valueOf(tableName);
    List<Put> puts = new ArrayList<Put>();
    try {
      Table table = conn.getTable(tn);
      Put put = new Put(Bytes.toBytes(key));
      for (Entry<String, Map<String, String>> colFamily : data.entrySet()) {
        String colFamilyName = colFamily.getKey();
        for (Entry<String, String> attr : colFamily.getValue().entrySet()) {
          String attrName = attr.getKey();
          String attrValue = attr.getValue();
          put.addColumn(
              Bytes.toBytes(colFamilyName), Bytes.toBytes(attrName), Bytes.toBytes(attrValue));
          puts.add(put);
        }
        table.put(puts);
      }

    } catch (IOException e) {
      e.printStackTrace();
    }
  }