Пример #1
0
  /**
   * Find all column families that are replicated from this cluster
   *
   * @return the full list of the replicated column families of this cluster as: tableName, family
   *     name, replicationType
   *     <p>Currently replicationType is Global. In the future, more replication types may be
   *     extended here. For example 1) the replication may only apply to selected peers instead of
   *     all peers 2) the replicationType may indicate the host Cluster servers as Slave for the
   *     table:columnFam.
   */
  public List<HashMap<String, String>> listReplicated() throws IOException {
    List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>();

    Admin admin = connection.getAdmin();
    HTableDescriptor[] tables;
    try {
      tables = admin.listTables();
    } finally {
      if (admin != null) admin.close();
    }

    for (HTableDescriptor table : tables) {
      HColumnDescriptor[] columns = table.getColumnFamilies();
      String tableName = table.getNameAsString();
      for (HColumnDescriptor column : columns) {
        if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
          // At this moment, the columfam is replicated to all peers
          HashMap<String, String> replicationEntry = new HashMap<String, String>();
          replicationEntry.put(TNAME, tableName);
          replicationEntry.put(CFNAME, column.getNameAsString());
          replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL);
          replicationColFams.add(replicationEntry);
        }
      }
    }

    return replicationColFams;
  }
Пример #2
0
 WALPutBenchmark(
     final HRegion region,
     final HTableDescriptor htd,
     final long numIterations,
     final boolean noSync,
     final int syncInterval,
     final double traceFreq) {
   this.numIterations = numIterations;
   this.noSync = noSync;
   this.syncInterval = syncInterval;
   this.numFamilies = htd.getColumnFamilies().length;
   this.region = region;
   this.htd = htd;
   String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
   if (spanReceivers == null || spanReceivers.isEmpty()) {
     loopSampler = Sampler.NEVER;
   } else {
     if (traceFreq <= 0.0) {
       LOG.warn("Tracing enabled but traceFreq=0.");
       loopSampler = Sampler.NEVER;
     } else if (traceFreq >= 1.0) {
       loopSampler = Sampler.ALWAYS;
       if (numIterations > 1000) {
         LOG.warn(
             "Full tracing of all iterations will produce a lot of data. Be sure your"
                 + " SpanReciever can keep up.");
       }
     } else {
       getConf().setDouble("hbase.sampler.fraction", traceFreq);
       loopSampler = new ProbabilitySampler(new HBaseHTraceConfiguration(getConf()));
     }
   }
 }
 /**
  * Create a new table descriptor cloning the snapshot table schema.
  *
  * @param snapshotTableDescriptor
  * @param tableName
  * @return cloned table descriptor
  * @throws IOException
  */
 public static HTableDescriptor cloneTableSchema(
     final HTableDescriptor snapshotTableDescriptor, final byte[] tableName) throws IOException {
   HTableDescriptor htd = new HTableDescriptor(tableName);
   for (HColumnDescriptor hcd : snapshotTableDescriptor.getColumnFamilies()) {
     htd.addFamily(hcd);
   }
   return htd;
 }
Пример #4
0
 HLogPutBenchmark(
     final HRegion region,
     final HTableDescriptor htd,
     final long numIterations,
     final boolean noSync) {
   this.numIterations = numIterations;
   this.noSync = noSync;
   this.numFamilies = htd.getColumnFamilies().length;
   this.region = region;
   this.htd = htd;
 }
Пример #5
0
  @Test(timeout = 120000)
  public void testChangeTable() throws Exception {
    HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable");
    hdt.setRegionReplication(NB_SERVERS);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration());

    // basic test: it should work.
    Put p = new Put(row);
    p.add(f, row, row);
    table.put(p);

    Get g = new Get(row);
    Result r = table.get(g);
    Assert.assertFalse(r.isStale());

    // Add a CF, it should work.
    HTableDescriptor bHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
    HColumnDescriptor hcd = new HColumnDescriptor(row);
    hdt.addFamily(hcd);
    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.getHBaseAdmin().modifyTable(hdt.getTableName(), hdt);
    HTU.getHBaseAdmin().enableTable(hdt.getTableName());
    HTableDescriptor nHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
    Assert.assertEquals(
        "fams=" + Arrays.toString(nHdt.getColumnFamilies()),
        bHdt.getColumnFamilies().length + 1,
        nHdt.getColumnFamilies().length);

    p = new Put(row);
    p.add(row, row, row);
    table.put(p);

    g = new Get(row);
    r = table.get(g);
    Assert.assertFalse(r.isStale());

    try {
      SlowMeCopro.cdl.set(new CountDownLatch(1));
      g = new Get(row);
      g.setConsistency(Consistency.TIMELINE);
      r = table.get(g);
      Assert.assertTrue(r.isStale());
    } finally {
      SlowMeCopro.cdl.get().countDown();
      SlowMeCopro.sleepTime.set(0);
    }

    Admin admin = HTU.getHBaseAdmin();
    nHdt = admin.getTableDescriptor(hdt.getTableName());
    Assert.assertEquals(
        "fams=" + Arrays.toString(nHdt.getColumnFamilies()),
        bHdt.getColumnFamilies().length + 1,
        nHdt.getColumnFamilies().length);

    admin.disableTable(hdt.getTableName());
    admin.deleteTable(hdt.getTableName());
    admin.close();
  }
Пример #6
0
  @Test(timeout = 30000)
  public void testBulkLoad() throws IOException {
    // Create table then get the single region for our new table.
    LOG.debug("Creating test table");
    HTableDescriptor hdt = HTU.createTableDescriptor("testBulkLoad");
    hdt.setRegionReplication(NB_SERVERS);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration());

    // create hfiles to load.
    LOG.debug("Creating test data");
    Path dir = HTU.getDataTestDirOnTestFS("testBulkLoad");
    final int numRows = 10;
    final byte[] qual = Bytes.toBytes("qual");
    final byte[] val = Bytes.toBytes("val");
    final List<Pair<byte[], String>> famPaths = new ArrayList<Pair<byte[], String>>();
    for (HColumnDescriptor col : hdt.getColumnFamilies()) {
      Path hfile = new Path(dir, col.getNameAsString());
      TestHRegionServerBulkLoad.createHFile(
          HTU.getTestFileSystem(), hfile, col.getName(), qual, val, numRows);
      famPaths.add(new Pair<byte[], String>(col.getName(), hfile.toString()));
    }

    // bulk load HFiles
    LOG.debug("Loading test data");
    @SuppressWarnings("deprecation")
    final HConnection conn = HTU.getHBaseAdmin().getConnection();
    RegionServerCallable<Void> callable =
        new RegionServerCallable<Void>(
            conn, hdt.getTableName(), TestHRegionServerBulkLoad.rowkey(0)) {
          @Override
          public Void call(int timeout) throws Exception {
            LOG.debug(
                "Going to connect to server "
                    + getLocation()
                    + " for row "
                    + Bytes.toStringBinary(getRow()));
            byte[] regionName = getLocation().getRegionInfo().getRegionName();
            BulkLoadHFileRequest request =
                RequestConverter.buildBulkLoadHFileRequest(famPaths, regionName, true);
            getStub().bulkLoadHFile(null, request);
            return null;
          }
        };
    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(HTU.getConfiguration());
    RpcRetryingCaller<Void> caller = factory.<Void>newCaller();
    caller.callWithRetries(callable, 10000);

    // verify we can read them from the primary
    LOG.debug("Verifying data load");
    for (int i = 0; i < numRows; i++) {
      byte[] row = TestHRegionServerBulkLoad.rowkey(i);
      Get g = new Get(row);
      Result r = table.get(g);
      Assert.assertFalse(r.isStale());
    }

    // verify we can read them from the replica
    LOG.debug("Verifying replica queries");
    try {
      SlowMeCopro.cdl.set(new CountDownLatch(1));
      for (int i = 0; i < numRows; i++) {
        byte[] row = TestHRegionServerBulkLoad.rowkey(i);
        Get g = new Get(row);
        g.setConsistency(Consistency.TIMELINE);
        Result r = table.get(g);
        Assert.assertTrue(r.isStale());
      }
      SlowMeCopro.cdl.get().countDown();
    } finally {
      SlowMeCopro.cdl.get().countDown();
      SlowMeCopro.sleepTime.set(0);
    }

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());
  }