コード例 #1
0
ファイル: HbaseTest.java プロジェクト: calm4wei/bitmap4hadoop
  @Test
  public void insert_rowkey_prefix_date() throws IOException {

    System.out.println(errorTable);
    errorTable.setAutoFlushTo(false);
    List<Put> puts = new ArrayList<Put>();
    long t1 = System.currentTimeMillis();
    for (int i = 0; i < 10000000; i++) {
      String uuid = UUID.randomUUID().toString().replaceAll("-", "").substring(0, 8);
      Put put = new Put(Bytes.toBytes("20150705" + "_" + uuid));
      put.add(
          fBytes,
          Bytes.toBytes("stacktrace"),
          Bytes.toBytes("java.io.IOException:file not found" + UUID.randomUUID().toString()));
      //            puts.add(put);
      errorTable.put(put);
      if (i % 10000 == 0) {
        errorTable.flushCommits();
      }
    }
    errorTable.flushCommits();
    long t2 = System.currentTimeMillis();
    System.out.println("count=" + puts.size() + ",t2-t1=" + (t2 - t1));
    //        errorTable.close();
  }
コード例 #2
0
ファイル: TagDao.java プロジェクト: rterbush/aq2o
 /*
  * (non-Javadoc)
  *
  * @see com.activequant.archive.IArchiveWriter#commit()
  */
 public void commit() throws IOException {
   synchronized (puts) {
     htable.put(puts);
     puts.clear();
     htable.flushCommits();
   }
 }
コード例 #3
0
    protected void batchUpdate(DeleteBuffer kvBuff, boolean flushCommits) throws HiveException {
      try {

        HTable htable = HTableFactory.getHTable(configMap);
        // Disable auto flush when specified so in the config map
        if (disableAutoFlush) htable.setAutoFlushTo(false);

        // Overwrite the write buffer size when config map specifies to do so
        if (writeBufferSizeBytes > 0) htable.setWriteBufferSize(writeBufferSizeBytes);
        System.out.println("deleting" + kvBuff.deleteList + "size" + kvBuff.deleteList.size());
        if (flushCommits) htable.flushCommits();
        numDeleteRecords += kvBuff.deleteList.size();
        if (kvBuff.deleteList.size() > 0)
          LOG.info(
              " Doing Batch Delete "
                  + kvBuff.deleteList.size()
                  + " records; Total delete records = "
                  + numDeleteRecords
                  + " ; Start = "
                  + (new String(kvBuff.deleteList.get(0).getRow()))
                  + " ; End = "
                  + (new String(kvBuff.deleteList.get(kvBuff.deleteList.size() - 1).getRow())));
        else LOG.info(" Doing Batch Delete with ZERO 0 records");

        getReporter()
            .getCounter(BatchDeleteUDAFCounter.NUMBER_OF_SUCCESSFUL_DELETES)
            .increment(kvBuff.deleteList.size());
        getReporter().getCounter(BatchDeleteUDAFCounter.NUMBER_OF_BATCH_OPERATIONS).increment(1);
        htable.delete(kvBuff.deleteList);
        kvBuff.deleteList.clear();
      } catch (IOException e) {
        throw new HiveException(e);
      }
    }
  @Test
  public void testPreWALRestoreSkip() throws Exception {
    LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
    TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});

    JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
    ServerName sn2 = rs1.getRegionServer().getServerName();
    String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

    util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
    while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
      Thread.sleep(100);
    }

    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);
    table.put(put);
    table.flushCommits();

    cluster.killRegionServer(rs1.getRegionServer().getServerName());
    Threads.sleep(20000); // just to be sure that the kill has fully started.
    util.waitUntilAllRegionsAssigned(tableName);

    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"getCtPreWALRestore", "getCtPostWALRestore"},
        tableName,
        new Integer[] {0, 0});

    util.deleteTable(tableName);
    table.close();
  }
 @Test
 public void testCheckAndDeleteHooks() throws IOException {
   TableName tableName =
       TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
   HTable table = util.createTable(tableName, new byte[][] {A, B, C});
   try {
     Put p = new Put(Bytes.toBytes(0));
     p.add(A, A, A);
     table.put(p);
     table.flushCommits();
     Delete d = new Delete(Bytes.toBytes(0));
     table.delete(d);
     verifyMethodResult(
         SimpleRegionObserver.class,
         new String[] {
           "hadPreCheckAndDelete", "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"
         },
         tableName,
         new Boolean[] {false, false, false});
     table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
     verifyMethodResult(
         SimpleRegionObserver.class,
         new String[] {
           "hadPreCheckAndDelete", "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"
         },
         tableName,
         new Boolean[] {true, true, true});
   } finally {
     util.deleteTable(tableName);
     table.close();
   }
 }
コード例 #6
0
  /*
   * (non-Javadoc)
   *
   * @see com.hazelcast.core.MapStore#storeAll(java.util.Map)
   */
  @Override
  public void storeAll(Map<String, String> pairs) {
    HTable table = null;
    try {
      List<Put> puts = new ArrayList<Put>(pairs.size());
      for (Map.Entry<String, String> pair : pairs.entrySet()) {
        try {
          byte[] rowId =
              prefixDate ? IdUtil.bucketizeId(pair.getKey()) : Bytes.toBytes(pair.getKey());
          Put p = new Put(rowId);
          if (outputFormatType == StoreFormatType.SMILE) {
            p.add(family, qualifier, jsonSmileConverter.convertToSmile(pair.getValue()));
          } else {
            p.add(family, qualifier, Bytes.toBytes(pair.getValue()));
          }
          puts.add(p);
        } catch (NumberFormatException nfe) {
          LOG.error("Encountered bad key: " + pair.getKey(), nfe);
        }
      }

      table = (HTable) pool.getTable(tableName);
      table.setAutoFlush(false);
      table.put(puts);
      table.flushCommits();
    } catch (IOException e) {
      LOG.error("Error during puts", e);
    } finally {
      if (table != null) {
        pool.putTable(table);
      }
    }
  }
コード例 #7
0
ファイル: EnNiuHBaseProxy.java プロジェクト: joezxh/DATAX-UI
 public void flush() throws IOException {
   if (!buffer.isEmpty()) {
     htable.put(buffer);
     buffer.clear();
   }
   htable.flushCommits();
 }
コード例 #8
0
ファイル: TestScannerResource.java プロジェクト: kfive/hbase
 static int insertData(Configuration conf, TableName tableName, String column, double prob)
     throws IOException {
   Random rng = new Random();
   int count = 0;
   HTable table = new HTable(conf, tableName);
   byte[] k = new byte[3];
   byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
   for (byte b1 = 'a'; b1 < 'z'; b1++) {
     for (byte b2 = 'a'; b2 < 'z'; b2++) {
       for (byte b3 = 'a'; b3 < 'z'; b3++) {
         if (rng.nextDouble() < prob) {
           k[0] = b1;
           k[1] = b2;
           k[2] = b3;
           Put put = new Put(k);
           put.setDurability(Durability.SKIP_WAL);
           put.add(famAndQf[0], famAndQf[1], k);
           table.put(put);
           count++;
         }
       }
     }
   }
   table.flushCommits();
   table.close();
   return count;
 }
コード例 #9
0
  /**
   * Processes a List of Puts and writes them to an HTable instance in RegionServer buckets via the
   * htable.put method. This will utilize the writeBuffer, thus the writeBuffer flush frequency may
   * be tuned accordingly via htable.setWriteBufferSize. <br>
   * <br>
   * The benefit of submitting Puts in this manner is to minimize the number of RegionServer RPCs in
   * each flush. <br>
   * <br>
   * Assumption #1: Regions have been pre-created for the table. If they haven't, then all of the
   * Puts will go to the same region, defeating the purpose of this utility method. See the Apache
   * HBase book for an explanation of how to do this. <br>
   * Assumption #2: Row-keys are not monotonically increasing. See the Apache HBase book for an
   * explanation of this problem. <br>
   * Assumption #3: That the input list of Puts is big enough to be useful (in the thousands or
   * more). The intent of this method is to process larger chunks of data. <br>
   * Assumption #4: htable.setAutoFlush(false) has been set. This is a requirement to use the
   * writeBuffer. <br>
   * <br>
   *
   * @param htable HTable instance for target HBase table
   * @param puts List of Put instances
   * @throws IOException if a remote or network exception occurs
   */
  public static void bucketRsPut(HTable htable, List<Put> puts) throws IOException {

    Map<String, List<Put>> putMap = createRsPutMap(htable, puts);
    for (List<Put> rsPuts : putMap.values()) {
      htable.put(rsPuts);
    }
    htable.flushCommits();
  }
コード例 #10
0
ファイル: HBaseClient.java プロジェクト: jxiang/ycsb
 /**
  * Cleanup any state for this DB. Called once per DB instance; there is one DB instance per client
  * thread.
  */
 public void cleanup() throws DBException {
   try {
     if (_hTable != null) {
       _hTable.flushCommits();
     }
   } catch (IOException e) {
     throw new DBException(e);
   }
 }
コード例 #11
0
 @Override
 protected void undoState(Iterable<byte[]> rows, int size) throws IOException {
   List<Delete> deletes = Lists.newArrayListWithCapacity(size);
   for (byte[] row : rows) {
     Delete delete = new Delete(keyDistributor.getDistributedKey(row));
     delete.deleteColumns(QueueEntryRow.COLUMN_FAMILY, stateColumnName);
     deletes.add(delete);
   }
   hTable.delete(deletes);
   hTable.flushCommits();
 }
コード例 #12
0
  @Override
  protected void updateState(Iterable<byte[]> rows, int size, byte[] value) throws IOException {
    List<Put> puts = Lists.newArrayListWithCapacity(size);

    for (byte[] row : rows) {
      Put put = new Put(keyDistributor.getDistributedKey(row));
      put.add(QueueEntryRow.COLUMN_FAMILY, stateColumnName, value);
      puts.add(put);
    }
    hTable.put(puts);
    hTable.flushCommits();
  }
コード例 #13
0
 public static void SeedData(HBaseConfiguration conf) throws IOException {
   HTable table = new HTable(conf, "people");
   Put put = new Put(Bytes.toBytes("doe-john-m-12345"));
   put.add(Bytes.toBytes("personal"), Bytes.toBytes("givenName"), Bytes.toBytes("John"));
   put.add(Bytes.toBytes("personal"), Bytes.toBytes("mi"), Bytes.toBytes("M"));
   put.add(Bytes.toBytes("personal"), Bytes.toBytes("surame"), Bytes.toBytes("Doe"));
   put.add(
       Bytes.toBytes("contactinfo"),
       Bytes.toBytes("email"),
       Bytes.toBytes("*****@*****.**"));
   table.put(put);
   table.flushCommits();
   table.close();
 }
コード例 #14
0
ファイル: EnNiuHBaseProxy.java プロジェクト: joezxh/DATAX-UI
 public boolean fetchLine(Line line) throws IOException {
   if (null == this.rs) {
     throw new IllegalStateException("HBase Client try to fetch data failed .");
   }
   Result result = this.rs.next();
   if (null == result) {
     return false;
   }
   if (this.maxversion == -1) { // 多版本记录,按照每个字段的设置获取不同版本数据
     NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> nMap = result.getMap();
     this.p = new Put(result.getRow());
     this.p.setDurability(Durability.SKIP_WAL);
     for (int i = 0; i < this.families.length; i++) {
       NavigableMap<Long, byte[]> vmap =
           nMap.get(this.families[i].getBytes()).get(this.columns[i].getBytes());
       byte[] value = null;
       byte[] firstValue = null;
       if (vmap == null || vmap.size() == 0) { // 无记录,判断后获取替换的字段值
         line.addField(null);
         continue;
       } else if (vmap.size() > 1) { // 多个版本
         if ("1".equalsIgnoreCase(this.column_version[i])) {
           Iterator<Map.Entry<Long, byte[]>> iter = vmap.entrySet().iterator();
           int id = 0;
           while (iter.hasNext()) {
             if (id == 0) {
               firstValue = iter.next().getValue();
               value = firstValue;
             } else {
               value = iter.next().getValue();
             }
             id++;
           }
           if (id > 0) {
             this.p.addColumn(families[i].getBytes(), this.columns[i].getBytes(), value);
           }
         } else { // 取第一个最新版本的值
           value = vmap.entrySet().iterator().next().getValue();
         }
       } else { // 单个版本
         value = vmap.entrySet().iterator().next().getValue();
       }
       if (null == value) {
         line.addField(null);
       } else {
         line.addField(new String(value, encode));
       }
     }
     // 判断是否将hbase值替换和修改
     if (ETLStringUtils.isNotEmpty(this.columnProcRule) && this.partRuleId == 1) { // 需要替换字段
       if (ETLStringUtils.isEmpty(line.getField(this.partColumnIdx))) { // 需要替换的字段为空,则替换字段
         line.addField(line.getField(this.bakPartColumnIdx), this.partColumnIdx);
         this.p.addColumn(
             families[partColumnIdx].getBytes(),
             this.columns[partColumnIdx].getBytes(),
             line.getField(this.bakPartColumnIdx).getBytes());
       }
     }
     if (this.p.size() > 0) {
       buffer.add(this.p);
     }
     if (buffer.size() >= BUFFER_LINE) {
       htable.put(buffer);
       htable.flushCommits();
       buffer.clear();
     }
   } else {
     for (int i = 0; i < this.families.length; i++) {
       byte[] value = result.getValue(this.families[i].getBytes(), this.columns[i].getBytes());
       if (null == value) {
         line.addField(null);
       } else {
         line.addField(new String(value, encode));
       }
     }
   }
   //
   line.addField(new String(result.getRow(), encode));
   return true;
 }
コード例 #15
0
 public void flushCommits() throws IOException {
   super.flushCommits();
 }