/**
   * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
   * loaded during region startup.
   *
   * @throws Exception
   */
  @BeforeClass
  public static void setupBeforeClass() throws Exception {

    conf.set(
        CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
        "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

    util.startMiniCluster(2);
    HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
    util.createMultiRegions(
        util.getConfiguration(),
        table,
        TEST_FAMILY,
        new byte[][] {HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1], ROWS[rowSeperator2]});
    /**
     * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
     * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
     */
    for (int i = 0; i < ROWSIZE; i++) {
      Put put = new Put(ROWS[i]);
      put.setWriteToWAL(false);
      Long l = new Long(i);
      put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(l));
      table.put(put);
      Put p2 = new Put(ROWS[i]);
      put.setWriteToWAL(false);
      p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(l)), Bytes.toBytes(l * 10));
      table.put(p2);
    }
    table.close();
  }
  /**
   * Creates a HBase {@link Put} from a Storm {@link Tuple}
   *
   * @param tuple The {@link Tuple}
   * @return {@link Put}
   */
  public Put getPutFromTuple(final Tuple tuple) {
    byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField));

    long ts = 0;
    if (!tupleTimestampField.equals("")) {
      ts = tuple.getLongByField(tupleTimestampField);
    }

    Put p = new Put(rowKey);
    p.setWriteToWAL(writeToWAL);

    if (columnFamilies.size() > 0) {
      for (String cf : columnFamilies.keySet()) {
        byte[] cfBytes = Bytes.toBytes(cf);
        for (String cq : columnFamilies.get(cf)) {
          byte[] cqBytes = Bytes.toBytes(cq);
          byte[] val = Bytes.toBytes(tuple.getStringByField(cq));

          if (ts > 0) {
            p.add(cfBytes, cqBytes, ts, val);
          } else {
            p.add(cfBytes, cqBytes, val);
          }
        }
      }
    }

    return p;
  }
Ejemplo n.º 3
0
 /*
  * Add to each of the regions in .META. a value.  Key is the startrow of the
  * region (except its 'aaa' for first region).  Actual value is the row name.
  * @param expected
  * @return
  * @throws IOException
  */
 private static int addToEachStartKey(final int expected) throws IOException {
   HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
   HTable meta = new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
   int rows = 0;
   Scan scan = new Scan();
   scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
   ResultScanner s = meta.getScanner(scan);
   for (Result r = null; (r = s.next()) != null; ) {
     byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
     if (b == null || b.length <= 0) break;
     HRegionInfo hri = Writables.getHRegionInfo(b);
     // If start key, add 'aaa'.
     byte[] row = getStartKey(hri);
     Put p = new Put(row);
     p.setWriteToWAL(false);
     p.add(getTestFamily(), getTestQualifier(), row);
     t.put(p);
     rows++;
   }
   s.close();
   Assert.assertEquals(expected, rows);
   t.close();
   meta.close();
   return rows;
 }
Ejemplo n.º 4
0
  private void putData(byte[] cf, String row, String col, long versionStart, long versionEnd)
      throws IOException {
    byte columnBytes[] = Bytes.toBytes(col);
    Put put = new Put(Bytes.toBytes(row));
    put.setWriteToWAL(false);

    for (long version = versionStart; version <= versionEnd; version++) {
      put.add(cf, columnBytes, version, genValue(row, col, version));
    }
    region.put(put);
  }
Ejemplo n.º 5
0
  public Put constructRow(
      String rowKey, String family, String[] qualifiers, long ts, String[] values)
      throws Exception {
    if (table == null) throw new Exception("!!!!!!!!!!! No table handler");

    Put put = new Put(rowKey.getBytes());
    put.setWriteToWAL(false);
    for (int i = 0; i < qualifiers.length; i++) {
      if (ts >= 0) {
        put.add(family.getBytes(), qualifiers[i].getBytes(), ts, values[i].getBytes());
      } else {
        put.add(family.getBytes(), qualifiers[i].getBytes(), values[i].getBytes());
      }
    }

    return put;
  }
Ejemplo n.º 6
0
 @SuppressWarnings("deprecation")
 @Override
 public void commitRow(byte[] row, long startId, long commitId, boolean isDelete, Integer lockId)
     throws IOException {
   Get get = new Get(row);
   get.setMaxVersions();
   get.addFamily(DominoConst.INNER_FAMILY);
   Result r = region.get(get, lockId);
   if (!containsStatus(r, startId)) {
     // Other transaction may have committed this row of this version
     LOG.info(
         "Commit: No status found, returning: {}.{}", new String(this.getName()), new String(row));
     return;
   }
   List<KeyValue> versions = r.getColumn(DominoConst.INNER_FAMILY, DominoConst.VERSION_COL);
   Put commit = new Put(row);
   commit.setWriteToWAL(true);
   boolean isFresh = true;
   if (versions.size() >= DominoConst.MAX_VERSION) {
     // We need to clean the earliest version.
     LOG.info(
         "Commit: rolling version window: {}.{}", new String(this.getName()), new String(row));
     isFresh = addClearColumns(commit, versions, r, row, isDelete, commitId, startId, lockId);
   }
   KeyValue clearStatusKV =
       new KeyValue(
           row, DominoConst.INNER_FAMILY, DominoConst.STATUS_COL, startId, KeyValue.Type.Delete);
   commit.add(clearStatusKV);
   byte[] value = DominoConst.versionValue(startId, isDelete);
   if (isFresh) {
     KeyValue commitKV =
         new KeyValue(row, DominoConst.INNER_FAMILY, DominoConst.VERSION_COL, commitId, value);
     commit.add(commitKV);
   }
   // commitNumericModifications(row, startId, lockId, commit);
   mutateRow(commit, lockId);
 }
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.startMiniCluster(3);
    REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
    context =
        JAXBContext.newInstance(
            CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class);
    marshaller = context.createMarshaller();
    unmarshaller = context.createUnmarshaller();
    client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    if (!admin.tableExists(TABLE)) {
      HTableDescriptor htd = new HTableDescriptor(TABLE);
      htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
      htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
      admin.createTable(htd);
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
      // Insert first half
      for (byte[] ROW : ROWS_ONE) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_ONE) {
          p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
        }
        table.put(p);
      }
      for (byte[] ROW : ROWS_TWO) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_TWO) {
          p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
        }
        table.put(p);
      }

      // Insert second half (reverse families)
      for (byte[] ROW : ROWS_ONE) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_ONE) {
          p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
        }
        table.put(p);
      }
      for (byte[] ROW : ROWS_TWO) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_TWO) {
          p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
        }
        table.put(p);
      }

      // Delete the second qualifier from all rows and families
      for (byte[] ROW : ROWS_ONE) {
        Delete d = new Delete(ROW);
        d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
        d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
        table.delete(d);
      }
      for (byte[] ROW : ROWS_TWO) {
        Delete d = new Delete(ROW);
        d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
        d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
        table.delete(d);
      }
      colsPerRow -= 2;

      // Delete the second rows from both groups, one column at a time
      for (byte[] QUALIFIER : QUALIFIERS_ONE) {
        Delete d = new Delete(ROWS_ONE[1]);
        d.deleteColumns(FAMILIES[0], QUALIFIER);
        d.deleteColumns(FAMILIES[1], QUALIFIER);
        table.delete(d);
      }
      for (byte[] QUALIFIER : QUALIFIERS_TWO) {
        Delete d = new Delete(ROWS_TWO[1]);
        d.deleteColumns(FAMILIES[0], QUALIFIER);
        d.deleteColumns(FAMILIES[1], QUALIFIER);
        table.delete(d);
      }
      numRows -= 2;
      table.close();
    }
    admin.close();
  }