Exemplo n.º 1
0
 /**
  * This utility method creates a list of Thrift TRowResult "struct" based on an Hbase RowResult
  * object. The empty list is returned if the input is null.
  *
  * @param in Hbase RowResult object
  * @param sortColumns This boolean dictates if row data is returned in a sorted order sortColumns
  *     = True will set TRowResult's sortedColumns member which is an ArrayList of TColumn struct
  *     sortColumns = False will set TRowResult's columns member which is a map of columnName and
  *     TCell struct
  * @return Thrift TRowResult array
  */
 public static List<TRowResult> rowResultFromHBase(Result[] in, boolean sortColumns) {
   List<TRowResult> results = new ArrayList<TRowResult>();
   for (Result result_ : in) {
     if (result_ == null || result_.isEmpty()) {
       continue;
     }
     TRowResult result = new TRowResult();
     result.row = ByteBuffer.wrap(result_.getRow());
     if (sortColumns) {
       result.sortedColumns = new ArrayList<TColumn>();
       for (Cell kv : result_.rawCells()) {
         result.sortedColumns.add(
             new TColumn(
                 ByteBuffer.wrap(
                     KeyValue.makeColumn(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv))),
                 new TCell(ByteBuffer.wrap(CellUtil.cloneValue(kv)), kv.getTimestamp())));
       }
     } else {
       result.columns = new TreeMap<ByteBuffer, TCell>();
       for (Cell kv : result_.rawCells()) {
         result.columns.put(
             ByteBuffer.wrap(
                 KeyValue.makeColumn(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv))),
             new TCell(ByteBuffer.wrap(CellUtil.cloneValue(kv)), kv.getTimestamp()));
       }
     }
     results.add(result);
   }
   return results;
 }
Exemplo n.º 2
0
  /**
   * Get the field out of the row without checking whether parsing is needed. This is called by both
   * getField and getFieldsAsList.
   *
   * @param fieldID The id of the field starting from 0.
   * @return The value of the field
   */
  private Object uncheckedGetField(int fieldID) {

    LazyObjectBase[] fields = getFields();
    boolean[] fieldsInited = getFieldInited();

    if (!fieldsInited[fieldID]) {
      fieldsInited[fieldID] = true;

      ColumnMapping colMap = columnsMapping[fieldID];

      if (!colMap.hbaseRowKey && !colMap.hbaseTimestamp && colMap.qualifierName == null) {
        // it is a column family
        // primitive type for Map<Key, Value> can be stored in binary format. Pass in the
        // qualifier prefix to cherry pick the qualifiers that match the prefix instead of picking
        // up everything
        ((LazyHBaseCellMap) fields[fieldID])
            .init(
                result,
                colMap.familyNameBytes,
                colMap.binaryStorage,
                colMap.qualifierPrefixBytes,
                colMap.isDoPrefixCut());
        return fields[fieldID].getObject();
      }

      if (colMap.hbaseTimestamp) {
        // Get the latest timestamp of all the cells as the row timestamp
        long timestamp = result.rawCells()[0].getTimestamp(); // from hbase-0.96.0
        for (int i = 1; i < result.rawCells().length; i++) {
          timestamp = Math.max(timestamp, result.rawCells()[i].getTimestamp());
        }
        LazyObjectBase lz = fields[fieldID];
        if (lz instanceof LazyTimestamp) {
          ((LazyTimestamp) lz).getWritableObject().setTime(timestamp);
        } else {
          ((LazyLong) lz).getWritableObject().set(timestamp);
        }
        return lz.getObject();
      }

      byte[] bytes;
      if (colMap.hbaseRowKey) {
        bytes = result.getRow();
      } else {
        // it is a column i.e. a column-family with column-qualifier
        bytes = result.getValue(colMap.familyNameBytes, colMap.qualifierNameBytes);
      }
      if (bytes == null || isNull(oi.getNullSequence(), bytes, 0, bytes.length)) {
        fields[fieldID].setNull();
      } else {
        ByteArrayRef ref = new ByteArrayRef();
        ref.setData(bytes);
        fields[fieldID].init(ref, 0, bytes.length);
      }
    }

    return fields[fieldID].getObject();
  }
  public static void main(String[] args) throws IOException {
    Configuration conf = HBaseConfiguration.create();

    HBaseHelper helper = HBaseHelper.getHelper(conf);
    helper.dropTable("testtable");
    helper.createTable("testtable", "colfam1", "colfam2");
    System.out.println("Adding rows to table...");
    helper.fillTable("testtable", 1, 10, 10, "colfam1", "colfam2");

    Connection connection = ConnectionFactory.createConnection(conf);
    Table table = connection.getTable(TableName.valueOf("testtable"));
    // vv SingleColumnValueFilterExample
    SingleColumnValueFilter filter =
        new SingleColumnValueFilter(
            Bytes.toBytes("colfam1"),
            Bytes.toBytes("col-5"),
            CompareFilter.CompareOp.NOT_EQUAL,
            new SubstringComparator("val-5"));
    filter.setFilterIfMissing(true);

    Scan scan = new Scan();
    scan.setFilter(filter);
    ResultScanner scanner = table.getScanner(scan);
    // ^^ SingleColumnValueFilterExample
    System.out.println("Results of scan:");
    // vv SingleColumnValueFilterExample
    for (Result result : scanner) {
      for (Cell cell : result.rawCells()) {
        System.out.println(
            "Cell: "
                + cell
                + ", Value: "
                + Bytes.toString(
                    cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
      }
    }
    scanner.close();

    Get get = new Get(Bytes.toBytes("row-6"));
    get.setFilter(filter);
    Result result = table.get(get);
    System.out.println("Result of get: ");
    for (Cell cell : result.rawCells()) {
      System.out.println(
          "Cell: "
              + cell
              + ", Value: "
              + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
    }
    // ^^ SingleColumnValueFilterExample
  }
    public static VesselLocation getLocation(Table VTLocation_Table, String RowKey)
        throws IOException {
      if (RowKey != null) {
        Get get = new Get(Bytes.toBytes(RowKey));
        Result result = VTLocation_Table.get(get);

        VesselLocation VL = new VesselLocation();

        for (Cell cell : result.rawCells()) {
          String Qualifier = Bytes.toString(CellUtil.cloneQualifier(cell));
          String Value = Bytes.toString(CellUtil.cloneValue(cell));

          if (Qualifier.equals("coordinates")) {
            VL.coordinates = Value;
          } else if (Qualifier.equals("speed")) {
            VL.speed = Value;
          } else if (Qualifier.equals("destination")) {
            VL.destination = Value;
          } else if (Qualifier.equals("timestamp")) {
            VL.recordtime = DateTime.parse(Value, rawformatter).getMillis();
          } else if (Qualifier.equals("previouslocation")) {
            VL.previouslocation = Value;
          } else if (Qualifier.equals("nextlocation")) {
            VL.nextlocation = Value;
          }
        }
        return VL;
      } else {
        return null;
      }
    }
    @Override
    public void handleLastResult(Result lastResult) {
      if (lastResult == null) {
        return;
      }

      Cell[] rawCells = lastResult.rawCells();
      Cell last = rawCells[rawCells.length - 1];
      byte[] row = CellUtil.cloneRow(last);
      byte[] originalRow = traceIdRowKeyDistributor.getOriginalKey(row);
      long reverseStartTime =
          BytesUtils.bytesToLong(originalRow, PinpointConstants.APPLICATION_NAME_MAX_LEN);
      this.lastRowTimestamp = TimeUtils.recoveryTimeMillis(reverseStartTime);

      byte[] qualifier = CellUtil.cloneQualifier(last);
      this.lastTransactionId = TransactionIdMapper.parseVarTransactionId(qualifier, 0);
      this.lastTransactionElapsed = BytesUtils.bytesToInt(qualifier, 0);

      if (logger.isDebugEnabled()) {
        logger.debug(
            "lastRowTimestamp={}, lastTransactionId={}, lastTransactionElapsed={}",
            DateUtils.longToDateStr(lastRowTimestamp),
            lastTransactionId,
            lastTransactionElapsed);
      }
    }
Exemplo n.º 6
0
 private static Put resultToPut(ImmutableBytesWritable key, Result result) throws IOException {
   Put put = new Put(key.get());
   for (Cell kv : result.rawCells()) {
     put.add(kv);
   }
   return put;
 }
Exemplo n.º 7
0
  @Test
  public void TestMap() throws Exception {
    String prefix = "0000";
    final String fileName = "19691231f2cd014ea28f42788214560a21a44cef";
    final String mobFilePath = prefix + fileName;

    ImmutableBytesWritable r = new ImmutableBytesWritable(Bytes.toBytes("r"));
    final KeyValue[] kvList = new KeyValue[1];
    kvList[0] =
        new KeyValue(
            Bytes.toBytes("row"),
            Bytes.toBytes("family"),
            Bytes.toBytes("column"),
            Bytes.toBytes(mobFilePath));

    Result columns = mock(Result.class);
    when(columns.rawCells()).thenReturn(kvList);

    Configuration configuration = new Configuration(TEST_UTIL.getConfiguration());
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(configuration, "1", new DummyMobAbortable());
    TableName tn = TableName.valueOf("testSweepMapper");
    TableName lockName = MobUtils.getTableLockName(tn);
    String znode = ZKUtil.joinZNode(zkw.tableLockZNode, lockName.getNameAsString());
    configuration.set(SweepJob.SWEEP_JOB_ID, "1");
    configuration.set(SweepJob.SWEEP_JOB_TABLE_NODE, znode);
    ServerName serverName = SweepJob.getCurrentServerName(configuration);
    configuration.set(SweepJob.SWEEP_JOB_SERVERNAME, serverName.toString());

    TableLockManager tableLockManager =
        TableLockManager.createTableLockManager(configuration, zkw, serverName);
    TableLock lock = tableLockManager.writeLock(lockName, "Run sweep tool");
    lock.acquire();
    try {
      Mapper<ImmutableBytesWritable, Result, Text, KeyValue>.Context ctx =
          mock(Mapper.Context.class);
      when(ctx.getConfiguration()).thenReturn(configuration);
      SweepMapper map = new SweepMapper();
      doAnswer(
              new Answer<Void>() {

                @Override
                public Void answer(InvocationOnMock invocation) throws Throwable {
                  Text text = (Text) invocation.getArguments()[0];
                  KeyValue kv = (KeyValue) invocation.getArguments()[1];

                  assertEquals(Bytes.toString(text.getBytes(), 0, text.getLength()), fileName);
                  assertEquals(0, Bytes.compareTo(kv.getKey(), kvList[0].getKey()));

                  return null;
                }
              })
          .when(ctx)
          .write(any(Text.class), any(KeyValue.class));

      map.map(r, columns, ctx);
    } finally {
      lock.release();
    }
  }
Exemplo n.º 8
0
  @Test
  public void testStartStopRow() throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1");
    final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");
    final byte[] ROW0 = Bytes.toBytesBinary("\\x01row0");
    final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1");
    final byte[] ROW2 = Bytes.toBytesBinary("\\x01row2");

    Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
    Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY);

    // put rows into the first table
    Put p = new Put(ROW0);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW1);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW2);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);

    CopyTable copy = new CopyTable();
    assertEquals(
        0,
        ToolRunner.run(
            new Configuration(TEST_UTIL.getConfiguration()),
            copy,
            new String[] {
              "--new.name=" + TABLENAME2,
              "--startrow=\\x01row1",
              "--stoprow=\\x01row2",
              TABLENAME1.getNameAsString()
            }));

    // verify the data was copied into table 2
    // row1 exist, row0, row2 do not exist
    Get g = new Get(ROW1);
    Result r = t2.get(g);
    assertEquals(1, r.size());
    assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));

    g = new Get(ROW0);
    r = t2.get(g);
    assertEquals(0, r.size());

    g = new Get(ROW2);
    r = t2.get(g);
    assertEquals(0, r.size());

    t1.close();
    t2.close();
    TEST_UTIL.deleteTable(TABLENAME1);
    TEST_UTIL.deleteTable(TABLENAME2);
  }
    // Get all events with exit at last location
    public static Map<Integer, VesselEvent> getAllEventsStartBeforeEndAfterBeforeLocation(
        Table VTEvent_Table, String IMO_str, VesselLocation location) throws IOException {
      Scan getAllEventsWithExistAtLastLocation = new Scan();
      getAllEventsWithExistAtLastLocation
          .setStartRow(
              Bytes.toBytes(
                  IMO_str + LpadNum(Long.MAX_VALUE - location.recordtime, 19) + "0000000000"))
          .setStopRow(Bytes.toBytes(IMO_str + LpadNum(Long.MAX_VALUE, 19) + "9999999999"))
          .addColumn(details, exittime);
      getAllEventsWithExistAtLastLocation.setCaching(100);

      Filter ExistTimeValuefilter =
          new ValueFilter(
              CompareFilter.CompareOp.GREATER_OR_EQUAL,
              new BinaryComparator(
                  Bytes.toBytes(new DateTime(location.recordtime).toString(rawformatter))));
      getAllEventsWithExistAtLastLocation.setFilter(ExistTimeValuefilter);

      ResultScanner Result_event = VTEvent_Table.getScanner(getAllEventsWithExistAtLastLocation);

      Map<Integer, VesselEvent> events = new HashMap<Integer, VesselEvent>();

      for (Result res : Result_event) {

        Get get = new Get(res.getRow());
        get.addColumn(details, entrytime);
        get.addColumn(details, entrycoordinates);

        Result result = VTEvent_Table.get(get);
        String rowkey = Bytes.toString(result.getRow());
        String polygonid = rowkey.substring(26);

        VesselEvent VE = new VesselEvent();
        VE.exittime = location.recordtime;
        VE.exitcoordinates = location.coordinates;
        VE.destination = location.destination;
        VE.polygonid = Integer.parseInt(polygonid);

        for (Cell cell : result.rawCells()) {
          String Qualifier = Bytes.toString(CellUtil.cloneQualifier(cell));
          String Value = Bytes.toString(CellUtil.cloneValue(cell));

          if (Qualifier.equals("entertime")) {
            VE.entrytime = DateTime.parse(Value, rawformatter).getMillis();
          } else if (Qualifier.equals("entercoordinates")) {
            VE.entrycoordinates = Value;
          }
        }

        events.put(VE.polygonid, VE);
      }

      Result_event.close();
      return events;
    }
Exemplo n.º 10
0
  private void doCopyTableTest(boolean bulkload) throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testCopyTable1");
    final TableName TABLENAME2 = TableName.valueOf("testCopyTable2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");

    try (Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
        Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); ) {
      // put rows into the first table
      for (int i = 0; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(FAMILY, COLUMN1, COLUMN1);
        t1.put(p);
      }

      CopyTable copy = new CopyTable();

      int code;
      if (bulkload) {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(),
                  "--bulkload",
                  TABLENAME1.getNameAsString()
                });
      } else {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(), TABLENAME1.getNameAsString()
                });
      }
      assertEquals("copy job failed", 0, code);

      // verify the data was copied into table 2
      for (int i = 0; i < 10; i++) {
        Get g = new Get(Bytes.toBytes("row" + i));
        Result r = t2.get(g);
        assertEquals(1, r.size());
        assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));
      }
    } finally {
      TEST_UTIL.deleteTable(TABLENAME1);
      TEST_UTIL.deleteTable(TABLENAME2);
    }
  }
 private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families)
     throws IOException {
   for (int i = startRow; i < startRow + numRows; i++) {
     byte[] row = Bytes.toBytes("" + i);
     Get get = new Get(row);
     for (byte[] family : families) {
       get.addColumn(family, qf);
     }
     Result result = newReg.get(get);
     Cell[] raw = result.rawCells();
     assertEquals(families.length, result.size());
     for (int j = 0; j < families.length; j++) {
       assertTrue(CellUtil.matchingRow(raw[j], row));
       assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
       assertTrue(CellUtil.matchingQualifier(raw[j], qf));
     }
   }
 }
    public static List<VesselLocation> getLocationsBetween(
        Table VTLocation_Table, String imo_str, long first_timestamp, long last_timestamp)
        throws IOException {

      // scan
      // 'cdb_vessel:vessel_location',{FILTER=>"(PrefixFilter('0000003162')"}
      Scan GetExistingLocations = new Scan();
      GetExistingLocations.setStartRow(
              Bytes.toBytes(imo_str + LpadNum(Long.MAX_VALUE - last_timestamp, 19)))
          .setStopRow(Bytes.toBytes(imo_str + LpadNum(Long.MAX_VALUE - first_timestamp + 1, 19)));
      GetExistingLocations.setCaching(1000);

      ResultScanner Result_ExistingLocations = VTLocation_Table.getScanner(GetExistingLocations);
      List<VesselLocation> result = new ArrayList<VesselLocation>();

      for (Result res : Result_ExistingLocations) {
        VesselLocation VL = new VesselLocation();

        for (Cell cell : res.rawCells()) {
          String Qualifier = Bytes.toString(CellUtil.cloneQualifier(cell));
          String Value = Bytes.toString(CellUtil.cloneValue(cell));

          if (Qualifier.equals("coordinates")) {
            VL.coordinates = Value;
          } else if (Qualifier.equals("speed")) {
            VL.speed = Value;
          } else if (Qualifier.equals("destination")) {
            VL.destination = Value;
          } else if (Qualifier.equals("timestamp")) {
            VL.recordtime = DateTime.parse(Value, rawformatter).getMillis();
          } else if (Qualifier.equals("previouslocation")) {
            VL.previouslocation = Value;
          } else if (Qualifier.equals("nextlocation")) {
            VL.nextlocation = Value;
          }
        }
        result.add(VL);
      }

      Result_ExistingLocations.close();

      return result;
    }
Exemplo n.º 13
0
  @Override
  public String mapRow(Result result, int rowNum) throws Exception {
    if (result.isEmpty()) {
      return null;
    }
    Cell[] rawCells = result.rawCells();

    if (rawCells.length == 0) {
      return null;
    }

    String[] ret = new String[rawCells.length];
    int index = 0;

    for (Cell cell : rawCells) {
      ret[index++] = BytesUtils.toString(CellUtil.cloneQualifier(cell));
    }

    return ret[0];
  }
Exemplo n.º 14
0
  // select data
  // http://blog.csdn.net/cnweike/article/details/42920547 for more detail.
  public static String select(
      String tableName, String begin, String end, String colFamily, String column, String value) {
    String result = "";
    TableName tn = TableName.valueOf(tableName);
    try {
      Table table = conn.getTable(tn);
      Scan scan = new Scan(Bytes.toBytes(begin), Bytes.toBytes(end));

      SingleColumnValueFilter scvf =
          new SingleColumnValueFilter(
              Bytes.toBytes(colFamily),
              Bytes.toBytes(column),
              CompareFilter.CompareOp.EQUAL,
              new SubstringComparator(value));
      scvf.setFilterIfMissing(false);
      scvf.setLatestVersionOnly(true); // OK

      scan.setFilter(scvf);

      ResultScanner scanner = table.getScanner(scan);
      List<Map<String, String>> total = new ArrayList<Map<String, String>>();
      for (Result res : scanner) {
        Map<String, String> map = new HashMap<String, String>();
        for (Cell cell : res.rawCells())
          map.put(
              Bytes.toString(CellUtil.cloneQualifier(cell)),
              Bytes.toString(CellUtil.cloneValue(cell)));
        total.add(map);
      }

      ObjectMapper mapper = new ObjectMapper();
      result = mapper.writeValueAsString(total);

    } catch (IOException e) {
      e.printStackTrace();
    }

    return result;
  }
Exemplo n.º 15
0
  /**
   * Get all stocks in the same exchanger
   *
   * @param type
   */
  public static void GetStocksSymbol(String type) {
    Scan scan = new Scan();
    Filter ColumnFilter =
        new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("20150806195250")));
    scan.setFilter(ColumnFilter);
    Hbase.setStrings("Stocks", "list");
    ResultScanner results = Hbase.getAllData(scan);
    BufferedWriter out = null;
    File LogFile = new File(type + "Stocks");
    try {
      out = new BufferedWriter(new FileWriter(LogFile, false));
    } catch (IOException e) {
      e.printStackTrace();
    }

    for (Result result : results) {
      for (Cell cell : result.rawCells()) {
        try {
          JSONObject info = new JSONObject(new String(CellUtil.cloneValue(cell)));
          if (info.getString("exchange").equals(type)) {
            out.write(new String(CellUtil.cloneRow(cell)) + "\r\n");
          }
        } catch (JSONException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        } catch (IOException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    }
    try {
      out.flush(); // 把缓存区内容压入文件
      out.close();
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
  }
Exemplo n.º 16
0
  public void testPool() throws IOException {
    PoolConfig config = new PoolConfig();
    config.setMaxTotal(20);
    config.setMaxIdle(5);
    config.setMaxWaitMillis(1000);
    config.setTestOnBorrow(true);

    /* properties */
    Properties props = new Properties();
    props.setProperty("hbase.zookeeper.quorum", "host1,host2,host3");
    props.setProperty("hbase.zookeeper.property.clientPort", "2181");
    props.setProperty("hbase.master", "host1:60000");
    props.setProperty("hbase.rootdir", "hdfs://host1:9000/hbase");

    /* connection pool */
    HbaseConnectionPool pool = new HbaseConnectionPool(config, props);
    HTableInterface table = null;

    HConnection conn = pool.getConnection();
    table = conn.getTable(TableName.valueOf("relation"));

    Get get = new Get(Bytes.toBytes("rowKey"));
    Result r = table.get(get);
    for (Cell cell : r.rawCells()) {
      System.out.println(
          "Rowkey : "
              + Bytes.toString(r.getRow())
              + " Familiy:Quilifier : "
              + Bytes.toString(CellUtil.cloneQualifier(cell))
              + " Value : "
              + Bytes.toString(CellUtil.cloneValue(cell)));
    }
    table.close();
    System.out.println(table);
    pool.returnConnection(conn);

    pool.close();
  }
    public static VesselLocation getLocationBefore(
        Table VTLocation_Table, String IMO_str, long timestamp) throws IOException {
      Scan getLastLocation = new Scan();
      getLastLocation.setStartRow(
          Bytes.toBytes(IMO_str + LpadNum(Long.MAX_VALUE - timestamp + 1, 19)));
      getLastLocation.setMaxResultSize(1);

      ResultScanner Result_LastLocation = VTLocation_Table.getScanner(getLastLocation);

      for (Result res : Result_LastLocation) {
        VesselLocation VL = new VesselLocation();

        for (Cell cell : res.rawCells()) {
          String Qualifier = Bytes.toString(CellUtil.cloneQualifier(cell));
          String Value = Bytes.toString(CellUtil.cloneValue(cell));

          if (Qualifier.equals("coordinates")) {
            VL.coordinates = Value;
          } else if (Qualifier.equals("speed")) {
            VL.speed = Value;
          } else if (Qualifier.equals("destination")) {
            VL.destination = Value;
          } else if (Qualifier.equals("timestamp")) {
            VL.recordtime = DateTime.parse(Value, rawformatter).getMillis();
          } else if (Qualifier.equals("previouslocation")) {
            VL.previouslocation = Value;
          } else if (Qualifier.equals("nextlocation")) {
            VL.nextlocation = Value;
          }
        }

        Result_LastLocation.close();
        return VL;
      }

      Result_LastLocation.close();
      return null;
    }
Exemplo n.º 18
0
 // select data
 // get data where its key value between two values.
 // this adapt for envinfo / measureinfo / weatherinfo.
 public static String select(String tableName, String begin, String end) {
   String result = "";
   TableName tn = TableName.valueOf(tableName);
   try {
     Table table = conn.getTable(tn);
     Scan scan = new Scan(Bytes.toBytes(begin), Bytes.toBytes(end));
     ResultScanner scanner = table.getScanner(scan);
     List<Map<String, String>> total = new ArrayList<Map<String, String>>();
     for (Result res : scanner) {
       Map<String, String> map = new HashMap<String, String>();
       for (Cell cell : res.rawCells())
         map.put(
             Bytes.toString(CellUtil.cloneQualifier(cell)),
             Bytes.toString(CellUtil.cloneValue(cell)));
       total.add(map);
     }
     ObjectMapper mapper = new ObjectMapper();
     result = mapper.writeValueAsString(total);
   } catch (IOException e) {
     e.printStackTrace();
   }
   return result;
 }
Exemplo n.º 19
0
  private void doIncrementalLoadTest(boolean shouldChangeRegions) throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    byte[][] splitKeys = generateRandomSplitKeys(4);
    util.startMiniCluster();
    try {
      HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
      Admin admin = table.getConnection().getAdmin();
      Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = -1;
      try (RegionLocator r = table.getRegionLocator()) {
        numRegions = r.getStartKeys().length;
      }
      assertEquals("Should make 5 regions", numRegions, 5);

      // Generate the bulk load files
      util.startMiniMapReduceCluster();
      runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);

        while (table.getRegionLocator().getAllRegionLocations().size() != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      util.shutdownMiniMapReduceCluster();
      util.shutdownMiniCluster();
    }
  }
Exemplo n.º 20
0
  private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality)
      throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
      // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
      // explicit hostnames parameter just like MiniDFSCluster does.
      hostCount = 3;
      regionNum = 20;
    }

    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
      hostnames[i] = "datanode_" + i;
    }
    util.startMiniCluster(1, hostCount, hostnames);

    Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    try (RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME);
        Admin admin = util.getConnection().getAdmin(); ) {
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = r.getStartKeys().length;
      assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);

      // Generate the bulk load files
      runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);

        while (util.getConnection().getRegionLocator(TABLE_NAME).getAllRegionLocations().size()
                != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Check region locality
      HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
      for (HRegion region : util.getHBaseCluster().getRegions(TABLE_NAME)) {
        hbd.add(region.getHDFSBlocksDistribution());
      }
      for (String hostname : hostnames) {
        float locality = hbd.getBlockLocalityIndex(hostname);
        LOG.info("locality of [" + hostname + "]: " + locality);
        assertEquals(100, (int) (locality * 100));
      }

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      testDir.getFileSystem(conf).delete(testDir, true);
      util.deleteTable(TABLE_NAME);
      util.shutdownMiniCluster();
    }
  }