@Before
  public void setUp() throws Exception {
    TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
    try (Table table =
        TEST_UTIL.createTable(
            tableName, new String[] {Bytes.toString(TEST_FAMILY), Bytes.toString(TEST_FAMILY_2)})) {
      TEST_UTIL.waitTableEnabled(tableName);

      List<Put> puts = new ArrayList<Put>(5);
      Put put_1 = new Put(TEST_ROW);
      put_1.addColumn(TEST_FAMILY, Q1, value1);

      Put put_2 = new Put(TEST_ROW_2);
      put_2.addColumn(TEST_FAMILY, Q2, value2);

      Put put_3 = new Put(TEST_ROW_3);
      put_3.addColumn(TEST_FAMILY_2, Q1, value1);

      puts.add(put_1);
      puts.add(put_2);
      puts.add(put_3);

      table.put(puts);
    }

    assertEquals(1, AccessControlLists.getTablePermissions(conf, tableName).size());
    try {
      assertEquals(
          1, AccessControlClient.getUserPermissions(connection, tableName.toString()).size());
    } catch (Throwable e) {
      LOG.error("Error during call of AccessControlClient.getUserPermissions. ", e);
    }
    // setupOperations();
  }
  @Test
  public void testTableWithCFNameStartWithUnderScore() throws Exception {
    Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore");
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
    String family = "_cf";
    Path familyDir = new Path(dir, family);

    byte[] from = Bytes.toBytes("begin");
    byte[] to = Bytes.toBytes("end");
    Configuration conf = util.getConfiguration();
    String tableName = "mytable_cfNameStartWithUnderScore";
    Table table = util.createTable(TableName.valueOf(tableName), family);
    HFileTestUtil.createHFile(
        conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family), QUALIFIER, from, to, 1000);

    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
    String[] args = {dir.toString(), tableName};
    try {
      loader.run(args);
      assertEquals(1000, util.countRows(table));
    } finally {
      if (null != table) {
        table.close();
      }
    }
  }
Exemplo n.º 3
0
  @Test(timeout = 300000)
  public void testReplicationWithCellTags() throws Exception {
    LOG.info("testSimplePutDelete");
    Put put = new Put(ROW);
    put.setAttribute("visibility", Bytes.toBytes("myTag3"));
    put.add(FAMILY, ROW, ROW);

    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable1.put(put);

    Get get = new Get(ROW);
    try {
      for (int i = 0; i < NB_RETRIES; i++) {
        if (i == NB_RETRIES - 1) {
          fail("Waited too much time for put replication");
        }
        Result res = htable2.get(get);
        if (res.size() == 0) {
          LOG.info("Row not available");
          Thread.sleep(SLEEP_TIME);
        } else {
          assertArrayEquals(res.value(), ROW);
          assertEquals(1, TestCoprocessorForTagsAtSink.tags.size());
          Tag tag = TestCoprocessorForTagsAtSink.tags.get(0);
          assertEquals(TAG_TYPE, tag.getType());
          break;
        }
      }
    } finally {
      TestCoprocessorForTagsAtSink.tags = null;
    }
  }
Exemplo n.º 4
0
 public void putBatch(Optional<List<Request>> putRequests, boolean optimize) {
   if (!valid) {
     Logger.error("CANNOT PUT! NO VALID CONNECTION");
     return;
   }
   List<Put> puts = new ArrayList<>();
   if (putRequests.isPresent() && !putRequests.get().isEmpty()) {
     String tableName = putRequests.get().get(0).table;
     putRequests
         .get()
         .forEach(
             pr ->
                 pr.getPut()
                     .ifPresent(
                         p -> {
                           if (optimize) {
                             p.setDurability(Durability.SKIP_WAL);
                           }
                           puts.add(p);
                         }));
     try {
       final Table table = connection.getTable(TableName.valueOf(tableName));
       if (optimize && table instanceof HTable) {
         ((HTable) table).setAutoFlush(false, true);
       }
       table.put(puts);
       table.close();
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
 }
Exemplo n.º 5
0
 public Optional<Response> get(Optional<Request> request) {
   if (!valid) {
     Logger.error("CANNOT GET! NO VALID CONNECTION");
     return Optional.empty();
   }
   Response response = new Response();
   if (request.isPresent()) {
     Request r = request.get();
     response.key = r.key;
     response.table = r.table;
     try {
       final Table htable = connection.getTable(TableName.valueOf(r.table));
       Result result = htable.get(new Get(r.key));
       if (result == null || result.isEmpty()) {
         return Optional.empty();
       }
       r.columns.forEach(
           c ->
               response.columns.add(
                   new Request.Column(
                       c.family,
                       c.qualifier,
                       result.getValue(c.family.getBytes(), c.qualifier.getBytes()))));
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
   return Optional.of(response);
 }
Exemplo n.º 6
0
 public Optional<byte[]> get(
     Optional<String> table,
     Optional<String> family,
     Optional<String> qualifier,
     Optional<String> key) {
   if (!valid) {
     Logger.error("CANNOT GET! NO VALID CONNECTION");
     return Optional.empty();
   }
   if (table.isPresent()
       && family.isPresent()
       && qualifier.isPresent()
       && key.isPresent()
       && !key.get().isEmpty()) {
     try {
       final Table htable = connection.getTable(TableName.valueOf(table.get()));
       Result result = htable.get(new Get(key.get().getBytes("UTF8")));
       return Optional.ofNullable(
           result.getValue(family.get().getBytes("UTF8"), qualifier.get().getBytes("UTF8")));
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
   return Optional.empty();
 }
 private void verify(TableName tableName) throws IOException {
   Table table = UTIL.getConnection().getTable(tableName);
   boolean verified = false;
   long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000);
   int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
   for (int i = 0; i < numRetries; i++) {
     try {
       LOG.info("Verification attempt #" + i);
       verifyAttempt(table);
       verified = true;
       break;
     } catch (NullPointerException e) {
       // If here, a cell was empty.  Presume its because updates came in
       // after the scanner had been opened.  Wait a while and retry.
       LOG.debug("Verification attempt failed: " + e.getMessage());
     }
     try {
       Thread.sleep(pause);
     } catch (InterruptedException e) {
       // continue
     }
   }
   assertTrue(verified);
   table.close();
 }
Exemplo n.º 8
0
 public Map<String, Long> getRegionSizes(String tableName) {
   Map<String, Long> regions = new HashMap<>();
   try {
     final Table table = connection.getTable(TableName.valueOf(tableName));
     RegionLocator regionLocator = connection.getRegionLocator(table.getName());
     List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
     Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
     for (HRegionLocation regionInfo : tableRegionInfos) {
       tableRegions.add(regionInfo.getRegionInfo().getRegionName());
     }
     ClusterStatus clusterStatus = connection.getAdmin().getClusterStatus();
     Collection<ServerName> servers = clusterStatus.getServers();
     final long megaByte = 1024L * 1024L;
     for (ServerName serverName : servers) {
       ServerLoad serverLoad = clusterStatus.getLoad(serverName);
       for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
         byte[] regionId = regionLoad.getName();
         if (tableRegions.contains(regionId)) {
           long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
           regions.put(regionLoad.getNameAsString(), regionSizeBytes);
         }
       }
     }
   } catch (IOException e) {
     e.printStackTrace();
   }
   return regions;
 }
Exemplo n.º 9
0
  public static int getDynamicTable(Configuration config) {
    /** Connection to the cluster. A single connection shared by all application threads. */
    Connection connection = null;
    /** A lightweight handle to a specific table. Used from a single thread. */
    Table table = null;

    try {
      connection = ConnectionFactory.createConnection(config);
      table = connection.getTable(TABLE_NAME1);
      Get get = new Get(Bytes.toBytes("cloudera"));
      get.addFamily(CF);
      get.setMaxVersions(Integer.MAX_VALUE);
      Result result = table.get(get);

      NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = result.getMap();
      for (Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> columnFamilyEntry :
          map.entrySet()) {
        NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap = columnFamilyEntry.getValue();
        for (Entry<byte[], NavigableMap<Long, byte[]>> columnEntry : columnMap.entrySet()) {
          NavigableMap<Long, byte[]> cellMap = columnEntry.getValue();
          for (Entry<Long, byte[]> cellEntry : cellMap.entrySet()) {
            System.out.println(
                String.format(
                    "Key : %s, Value :%s",
                    Bytes.toString(columnEntry.getKey()), Bytes.toString(cellEntry.getValue())));
          }
        }
      }
    } catch (IOException e) {
      e.printStackTrace();
    }

    return 0;
  }
Exemplo n.º 10
0
  /**
   * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)}
   * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that the
   * compression map is correctly serialized into and deserialized from configuration
   *
   * @throws IOException
   */
  @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
  @Test
  public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
    for (int numCfs = 0; numCfs <= 3; numCfs++) {
      Configuration conf = new Configuration(this.util.getConfiguration());
      Map<String, DataBlockEncoding> familyToDataBlockEncoding =
          getMockColumnFamiliesForDataBlockEncoding(numCfs);
      Table table = Mockito.mock(HTable.class);
      setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding);
      HTableDescriptor tableDescriptor = table.getTableDescriptor();
      HFileOutputFormat2.configureDataBlockEncoding(tableDescriptor, conf);

      // read back family specific data block encoding settings from the
      // configuration
      Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap =
          HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf);

      // test that we have a value for all column families that matches with the
      // used mock values
      for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
        assertEquals(
            "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(),
            entry.getValue(),
            retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
      }
    }
  }
Exemplo n.º 11
0
 /** @throws java.lang.Exception */
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   TEST_UTIL.startMiniCluster();
   TEST_UTIL.startMiniMapReduceCluster();
   Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM));
   writeRows(table);
   table.close();
 }
  private void runTest(
      String testName,
      HTableDescriptor htd,
      BloomType bloomType,
      boolean preCreateTable,
      byte[][] tableSplitKeys,
      byte[][][] hfileRanges)
      throws Exception {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(
          util.getConfiguration(),
          fs,
          new Path(familyDir, "hfile_" + hfileIdx++),
          FAMILY,
          QUALIFIER,
          from,
          to,
          1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
    String[] args = {dir.toString(), tableName.toString()};
    loader.run(args);

    Table table = new HTable(util.getConfiguration(), tableName);
    try {
      assertEquals(expectedRows, util.countRows(table));
    } finally {
      table.close();
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue(
            "Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
Exemplo n.º 13
0
 public void put(String tablename, Put p) {
   try {
     final Table table = connection.getTable(TableName.valueOf(tablename));
     table.put(p);
     table.close();
   } catch (IOException e) {
     e.printStackTrace();
   }
 }
Exemplo n.º 14
0
  @Test
  public void testStartStopRow() throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1");
    final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");
    final byte[] ROW0 = Bytes.toBytesBinary("\\x01row0");
    final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1");
    final byte[] ROW2 = Bytes.toBytesBinary("\\x01row2");

    Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
    Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY);

    // put rows into the first table
    Put p = new Put(ROW0);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW1);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW2);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);

    CopyTable copy = new CopyTable();
    assertEquals(
        0,
        ToolRunner.run(
            new Configuration(TEST_UTIL.getConfiguration()),
            copy,
            new String[] {
              "--new.name=" + TABLENAME2,
              "--startrow=\\x01row1",
              "--stoprow=\\x01row2",
              TABLENAME1.getNameAsString()
            }));

    // verify the data was copied into table 2
    // row1 exist, row0, row2 do not exist
    Get g = new Get(ROW1);
    Result r = t2.get(g);
    assertEquals(1, r.size());
    assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));

    g = new Get(ROW0);
    r = t2.get(g);
    assertEquals(0, r.size());

    g = new Get(ROW2);
    r = t2.get(g);
    assertEquals(0, r.size());

    t1.close();
    t2.close();
    TEST_UTIL.deleteTable(TABLENAME1);
    TEST_UTIL.deleteTable(TABLENAME2);
  }
  /**
   * Confirm ImportTsv via data in online table.
   *
   * @param dataAvailable
   */
  private static void validateTable(
      Configuration conf,
      TableName tableName,
      String family,
      int valueMultiplier,
      boolean dataAvailable)
      throws IOException {

    LOG.debug("Validating table.");
    Connection connection = ConnectionFactory.createConnection(conf);
    Table table = connection.getTable(tableName);
    boolean verified = false;
    long pause = conf.getLong("hbase.client.pause", 5 * 1000);
    int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
    for (int i = 0; i < numRetries; i++) {
      try {
        Scan scan = new Scan();
        // Scan entire family.
        scan.addFamily(Bytes.toBytes(family));
        if (dataAvailable) {
          ResultScanner resScanner = table.getScanner(scan);
          for (Result res : resScanner) {
            LOG.debug("Getting results " + res.size());
            assertTrue(res.size() == 2);
            List<Cell> kvs = res.listCells();
            assertTrue(CellUtil.matchingRow(kvs.get(0), Bytes.toBytes("KEY")));
            assertTrue(CellUtil.matchingRow(kvs.get(1), Bytes.toBytes("KEY")));
            assertTrue(
                CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier)));
            assertTrue(
                CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier)));
            // Only one result set is expected, so let it loop.
            verified = true;
          }
        } else {
          ResultScanner resScanner = table.getScanner(scan);
          Result[] next = resScanner.next(2);
          assertEquals(0, next.length);
          verified = true;
        }

        break;
      } catch (NullPointerException e) {
        // If here, a cell was empty. Presume its because updates came in
        // after the scanner had been opened. Wait a while and retry.
      }
      try {
        Thread.sleep(pause);
      } catch (InterruptedException e) {
        // continue
      }
    }
    table.close();
    connection.close();
    assertTrue(verified);
  }
Exemplo n.º 16
0
 @Override
 protected void closeHTable() {
   if (table != null) {
     try {
       table.close();
     } catch (Exception e) {
       LOG.error("Error in closing the table " + table.getName(), e);
     }
   }
 }
Exemplo n.º 17
0
 @Test
 public void testJobConfiguration() throws Exception {
   Job job = new Job(util.getConfiguration());
   job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
   Table table = Mockito.mock(Table.class);
   RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
   setupMockStartKeys(regionLocator);
   HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
   assertEquals(job.getNumReduceTasks(), 4);
 }
Exemplo n.º 18
0
 public Result get(String table, String family, byte[] key) {
   final Table htable;
   try {
     htable = connection.getTable(TableName.valueOf(table));
     return htable.get(new Get(key));
   } catch (IOException e) {
     e.printStackTrace();
   }
   return null;
 }
    // Get all events with exit at last location
    public static Map<Integer, VesselEvent> getAllEventsStartBeforeEndAfterBeforeLocation(
        Table VTEvent_Table, String IMO_str, VesselLocation location) throws IOException {
      Scan getAllEventsWithExistAtLastLocation = new Scan();
      getAllEventsWithExistAtLastLocation
          .setStartRow(
              Bytes.toBytes(
                  IMO_str + LpadNum(Long.MAX_VALUE - location.recordtime, 19) + "0000000000"))
          .setStopRow(Bytes.toBytes(IMO_str + LpadNum(Long.MAX_VALUE, 19) + "9999999999"))
          .addColumn(details, exittime);
      getAllEventsWithExistAtLastLocation.setCaching(100);

      Filter ExistTimeValuefilter =
          new ValueFilter(
              CompareFilter.CompareOp.GREATER_OR_EQUAL,
              new BinaryComparator(
                  Bytes.toBytes(new DateTime(location.recordtime).toString(rawformatter))));
      getAllEventsWithExistAtLastLocation.setFilter(ExistTimeValuefilter);

      ResultScanner Result_event = VTEvent_Table.getScanner(getAllEventsWithExistAtLastLocation);

      Map<Integer, VesselEvent> events = new HashMap<Integer, VesselEvent>();

      for (Result res : Result_event) {

        Get get = new Get(res.getRow());
        get.addColumn(details, entrytime);
        get.addColumn(details, entrycoordinates);

        Result result = VTEvent_Table.get(get);
        String rowkey = Bytes.toString(result.getRow());
        String polygonid = rowkey.substring(26);

        VesselEvent VE = new VesselEvent();
        VE.exittime = location.recordtime;
        VE.exitcoordinates = location.coordinates;
        VE.destination = location.destination;
        VE.polygonid = Integer.parseInt(polygonid);

        for (Cell cell : result.rawCells()) {
          String Qualifier = Bytes.toString(CellUtil.cloneQualifier(cell));
          String Value = Bytes.toString(CellUtil.cloneValue(cell));

          if (Qualifier.equals("entertime")) {
            VE.entrytime = DateTime.parse(Value, rawformatter).getMillis();
          } else if (Qualifier.equals("entercoordinates")) {
            VE.entrycoordinates = Value;
          }
        }

        events.put(VE.polygonid, VE);
      }

      Result_event.close();
      return events;
    }
Exemplo n.º 20
0
  public static void verifyMobRowCount(
      final HBaseTestingUtility util, final TableName tableName, long expectedRows)
      throws IOException {

    Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
    try {
      assertEquals(expectedRows, countMobRows(table));
    } finally {
      table.close();
    }
  }
Exemplo n.º 21
0
  /**
   * Returns the number of rows in a given table. HBase must be up and the table should be present
   * (will wait for timeout for a while otherwise)
   *
   * @return # of rows in the specified table
   */
  protected int tableRowCount(Configuration conf, TableName table) throws IOException {
    Table t = TEST_UTIL.getConnection().getTable(table);
    Scan st = new Scan();

    ResultScanner rst = t.getScanner(st);
    int count = 0;
    for (@SuppressWarnings("unused") Result rt : rst) {
      count++;
    }
    t.close();
    return count;
  }
Exemplo n.º 22
0
 public void doScan() throws IOException {
   Table tableRef = connection.getTable(TableName.valueOf(table));
   Scan scan = new Scan();
   ResultScanner scanner = tableRef.getScanner(scan);
   long now = System.currentTimeMillis();
   if (verbose) System.out.println("Starting scan");
   for (Result res : scanner) {
     if (verbose) System.out.println(res);
   }
   if (verbose) System.out.printf("Scan finished: %d ms\n\n", System.currentTimeMillis() - now);
   tableRef.close();
 }
 static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
     throws Exception {
   List<Put> puts = new ArrayList<Put>();
   for (int i = 0; i < labelExps.length; i++) {
     Put put = new Put(Bytes.toBytes("row" + (i + 1)));
     put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO);
     put.setCellVisibility(new CellVisibility(labelExps[i]));
     puts.add(put);
   }
   Table table = TEST_UTIL.createTable(tableName, TEST_FAMILY);
   table.put(puts);
   return table;
 }
  public static void main(String[] args) throws IOException {
    Configuration conf = HBaseConfiguration.create();

    HBaseHelper helper = HBaseHelper.getHelper(conf);
    helper.dropTable("testtable");
    helper.createTable("testtable", "colfam1", "colfam2");
    System.out.println("Adding rows to table...");
    helper.fillTable("testtable", 1, 10, 10, "colfam1", "colfam2");

    Connection connection = ConnectionFactory.createConnection(conf);
    Table table = connection.getTable(TableName.valueOf("testtable"));
    // vv SingleColumnValueFilterExample
    SingleColumnValueFilter filter =
        new SingleColumnValueFilter(
            Bytes.toBytes("colfam1"),
            Bytes.toBytes("col-5"),
            CompareFilter.CompareOp.NOT_EQUAL,
            new SubstringComparator("val-5"));
    filter.setFilterIfMissing(true);

    Scan scan = new Scan();
    scan.setFilter(filter);
    ResultScanner scanner = table.getScanner(scan);
    // ^^ SingleColumnValueFilterExample
    System.out.println("Results of scan:");
    // vv SingleColumnValueFilterExample
    for (Result result : scanner) {
      for (Cell cell : result.rawCells()) {
        System.out.println(
            "Cell: "
                + cell
                + ", Value: "
                + Bytes.toString(
                    cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
      }
    }
    scanner.close();

    Get get = new Get(Bytes.toBytes("row-6"));
    get.setFilter(filter);
    Result result = table.get(get);
    System.out.println("Result of get: ");
    for (Cell cell : result.rawCells()) {
      System.out.println(
          "Cell: "
              + cell
              + ", Value: "
              + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
    }
    // ^^ SingleColumnValueFilterExample
  }
Exemplo n.º 25
0
  /**
   * Import all of the ways from the parsed XML into the HBase table as a segment. Ways are OSM
   * values which consist of a list of nodes. Segments are custom values we use which represent a
   * single node and its neighbors.
   */
  private static void import_ways() {
    System.out.println("Importing ways (segments)...");
    Table segmentTable = Util.get_table("segment");
    if (segmentTable == null) {
      System.err.println("Segment table failed to load.");
      return;
    }

    int counter = 0;
    int batch = 100;
    List<Put> puts = new ArrayList<>();
    for (Way way : ways) {
      Node previousNode = null;
      for (Node node : way.getNodes()) {
        if (previousNode == null) {
          previousNode = node;
          continue;
        }
        Put p = new Put(Bytes.toBytes(previousNode.getGeohash()));
        p.addColumn(
            NODE,
            Bytes.toBytes(node.getGeohash()),
            Bytes.toBytes(String.valueOf(way.getTagsAsSerializedJSON())));
        puts.add(p);
        p = new Put(Bytes.toBytes(node.getGeohash()));
        p.addColumn(
            NODE,
            Bytes.toBytes(previousNode.getGeohash()),
            Bytes.toBytes(String.valueOf(way.getTagsAsSerializedJSON())));
        puts.add(p);
      }
      counter += 1;
      if (counter % batch == 0) {
        try {
          System.out.print("\rBatch " + counter + " / " + ways.size());
          segmentTable.put(puts);
          puts.clear();
        } catch (IOException e) {
          System.out.println("Segment put failed");
          e.printStackTrace();
        }
      }
    }
    try {
      segmentTable.put(puts);
    } catch (IOException e) {
      System.out.println("Segment put failed");
      e.printStackTrace();
    }
    System.out.println("Added all segments!");
  }
Exemplo n.º 26
0
  private void doCopyTableTest(boolean bulkload) throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testCopyTable1");
    final TableName TABLENAME2 = TableName.valueOf("testCopyTable2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");

    try (Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
        Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); ) {
      // put rows into the first table
      for (int i = 0; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(FAMILY, COLUMN1, COLUMN1);
        t1.put(p);
      }

      CopyTable copy = new CopyTable();

      int code;
      if (bulkload) {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(),
                  "--bulkload",
                  TABLENAME1.getNameAsString()
                });
      } else {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(), TABLENAME1.getNameAsString()
                });
      }
      assertEquals("copy job failed", 0, code);

      // verify the data was copied into table 2
      for (int i = 0; i < 10; i++) {
        Get g = new Get(Bytes.toBytes("row" + i));
        Result r = t2.get(g);
        assertEquals(1, r.size());
        assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));
      }
    } finally {
      TEST_UTIL.deleteTable(TABLENAME1);
      TEST_UTIL.deleteTable(TABLENAME2);
    }
  }
  @Test(timeout = 60000)
  public void testExceptionFromCoprocessorDuringPut() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName());
    conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
    TEST_UTIL.startMiniCluster(2);
    try {
      // When we try to write to TEST_TABLE, the buggy coprocessor will
      // cause a NullPointerException, which will cause the regionserver (which
      // hosts the region we attempted to write to) to abort.
      final byte[] TEST_FAMILY = Bytes.toBytes("aaa");

      Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, TEST_FAMILY);
      TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);

      // Note which regionServer will abort (after put is attempted).
      final HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);

      try {
        final byte[] ROW = Bytes.toBytes("aaa");
        Put put = new Put(ROW);
        put.add(TEST_FAMILY, ROW, ROW);
        table.put(put);
      } catch (IOException e) {
        // The region server is going to be aborted.
        // We may get an exception if we retry,
        // which is not guaranteed.
      }

      // Wait 10 seconds for the regionserver to abort: expected result is that
      // it will abort.
      boolean aborted = false;
      for (int i = 0; i < 10; i++) {
        aborted = regionServer.isAborted();
        if (aborted) {
          break;
        }
        try {
          Thread.sleep(1000);
        } catch (InterruptedException e) {
          fail("InterruptedException while waiting for regionserver " + "zk node to be deleted.");
        }
      }
      Assert.assertTrue("The region server should have aborted", aborted);
      table.close();
    } finally {
      TEST_UTIL.shutdownMiniCluster();
    }
  }
Exemplo n.º 28
0
 @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
 @Test
 public void testJobConfiguration() throws Exception {
   Configuration conf = new Configuration(this.util.getConfiguration());
   conf.set("hbase.fs.tmp.dir", util.getDataTestDir("testJobConfiguration").toString());
   Job job = new Job(conf);
   job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
   Table table = Mockito.mock(Table.class);
   RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
   setupMockStartKeys(regionLocator);
   setupMockTableName(regionLocator);
   HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
   assertEquals(job.getNumReduceTasks(), 4);
 }
Exemplo n.º 29
0
  @Test
  public void createTableTest() throws IOException, InterruptedException {
    String testName = "createTableTest";
    String nsName = prefix + "_" + testName;
    LOG.info(testName);

    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(nsName + ":my_table"));
    HColumnDescriptor colDesc = new HColumnDescriptor("my_cf");
    desc.addFamily(colDesc);
    try {
      admin.createTable(desc);
      fail("Expected no namespace exists exception");
    } catch (NamespaceNotFoundException ex) {
    }
    // create table and in new namespace
    admin.createNamespace(NamespaceDescriptor.create(nsName).build());
    admin.createTable(desc);
    TEST_UTIL.waitTableAvailable(desc.getTableName().getName(), 10000);
    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
    assertTrue(
        fs.exists(
            new Path(
                master.getMasterFileSystem().getRootDir(),
                new Path(
                    HConstants.BASE_NAMESPACE_DIR,
                    new Path(nsName, desc.getTableName().getQualifierAsString())))));
    assertEquals(1, admin.listTables().length);

    // verify non-empty namespace can't be removed
    try {
      admin.deleteNamespace(nsName);
      fail("Expected non-empty namespace constraint exception");
    } catch (Exception ex) {
      LOG.info("Caught expected exception: " + ex);
    }

    // sanity check try to write and read from table
    Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
    Put p = new Put(Bytes.toBytes("row1"));
    p.add(Bytes.toBytes("my_cf"), Bytes.toBytes("my_col"), Bytes.toBytes("value1"));
    table.put(p);
    // flush and read from disk to make sure directory changes are working
    admin.flush(desc.getTableName());
    Get g = new Get(Bytes.toBytes("row1"));
    assertTrue(table.exists(g));

    // normal case of removing namespace
    TEST_UTIL.deleteTable(desc.getTableName());
    admin.deleteNamespace(nsName);
  }
  public static void main(String[] args) throws IOException {

    Configuration conf = HBaseClientHelper.loadDefaultConfiguration();

    Connection connection = ConnectionFactory.createConnection(conf);

    try {

      Table table = connection.getTable(TableName.valueOf("testtable"));
      try {
        // 1 Put
        Put p = new Put(Bytes.toBytes("row1"));
        p.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));

        table.put(p);

        // 2 Get
        Get g = new Get(Bytes.toBytes("row1"));
        Result r = table.get(g);
        byte[] value = r.getValue(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"));
        String valueStr = Bytes.toString(value);
        System.out.println("GET: " + valueStr);

        // 3 Scan
        Scan s = new Scan();
        s.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"));
        ResultScanner scanner = table.getScanner(s);
        try {
          for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
            System.out.println("Found row: " + rr);
          }

          // The other approach is to use a foreach loop. Scanners are
          // iterable!
          // for (Result rr : scanner) {
          // System.out.println("Found row: " + rr);
          // }
        } finally {
          scanner.close();
        }

        // Close your table and cluster connection.
      } finally {
        if (table != null) table.close();
      }
    } finally {
      connection.close();
    }
  }