/**
   * Creates the tables used by the graph store.
   *
   * @param config Hadoop configuration
   * @param vertexDataHandler vertex storage handler
   * @param edgeDataHandler edge storage handler
   * @param graphDataHandler graph storage handler
   * @param vertexDataTableName vertex data table name
   * @param edgeTableName edge data table name
   * @param graphDataTableName graph data table name
   * @throws IOException
   */
  private static void createTablesIfNotExists(
      final Configuration config,
      final VertexDataHandler vertexDataHandler,
      final EdgeDataHandler edgeDataHandler,
      final GraphDataHandler graphDataHandler,
      final String vertexDataTableName,
      final String edgeTableName,
      final String graphDataTableName)
      throws IOException {
    HTableDescriptor vertexDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(vertexDataTableName));
    HTableDescriptor edgeDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(edgeTableName));
    HTableDescriptor graphDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(graphDataTableName));

    HBaseAdmin admin = new HBaseAdmin(config);

    if (!admin.tableExists(vertexDataTableDescriptor.getName())) {
      vertexDataHandler.createTable(admin, vertexDataTableDescriptor);
    }
    if (!admin.tableExists(edgeDataTableDescriptor.getName())) {
      edgeDataHandler.createTable(admin, edgeDataTableDescriptor);
    }
    if (!admin.tableExists(graphDataTableDescriptor.getName())) {
      graphDataHandler.createTable(admin, graphDataTableDescriptor);
    }

    admin.close();
  }
  /**
   * This test assumes a master crash/failure during the meta migration process and attempts to
   * continue the meta migration process when a new master takes over. When a master dies during the
   * meta migration we will have some rows of META.CatalogFamily updated with PB serialization and
   * some still hanging with writable serialization. When the backup master/ or fresh start of
   * master attempts the migration it will encounter some rows of META already updated with new HRI
   * and some still legacy. This test will simulate this scenario and validates that the migration
   * process can safely skip the updated rows and migrate any pending rows at startup.
   *
   * @throws Exception
   */
  @Test
  public void testMasterCrashDuringMetaMigration() throws Exception {
    final byte[] FAMILY = Bytes.toBytes("family");
    HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration");
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
    htd.addFamily(hcd);
    Configuration conf = TEST_UTIL.getConfiguration();
    // Create 10 New regions.
    createMultiRegionsWithPBSerialization(conf, htd.getName(), 10);
    // Create 10 Legacy regions.
    createMultiRegionsWithWritableSerialization(conf, htd.getName(), 10);
    CatalogTracker ct = TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
    // Erase the current version of root meta for this test.
    undoVersionInRoot(ct);

    MetaReader.fullScanMetaAndPrint(ct);
    LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");

    long numMigratedRows =
        MetaMigrationConvertingToPB.updateMetaIfNecessary(TEST_UTIL.getHBaseCluster().getMaster());
    assertEquals(numMigratedRows, 10);

    // Assert that the flag in ROOT is updated to reflect the correct status
    boolean metaUpdated =
        MetaMigrationConvertingToPB.isMetaTableUpdated(
            TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
    assertEquals(true, metaUpdated);

    verifyMetaRowsAreUpdated(ct);

    LOG.info("END testMasterCrashDuringMetaMigration");
  }
  /**
   * Deletes the tables given tables.
   *
   * @param config cluster configuration
   * @param vertexDataTableName vertex data table name
   * @param edgeDataTableName edge data table name
   * @param graphDataTableName graph data table name
   * @throws IOException
   */
  private static void deleteTablesIfExists(
      final Configuration config,
      final String vertexDataTableName,
      final String edgeDataTableName,
      final String graphDataTableName)
      throws IOException {
    HTableDescriptor vertexDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(vertexDataTableName));
    HTableDescriptor edgeDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(edgeDataTableName));
    HTableDescriptor graphsTableDescriptor =
        new HTableDescriptor(TableName.valueOf(graphDataTableName));

    HBaseAdmin admin = new HBaseAdmin(config);

    if (admin.tableExists(vertexDataTableDescriptor.getName())) {
      deleteTable(admin, vertexDataTableDescriptor);
    }
    if (admin.tableExists(edgeDataTableDescriptor.getName())) {
      deleteTable(admin, edgeDataTableDescriptor);
    }
    if (admin.tableExists(graphsTableDescriptor.getName())) {
      deleteTable(admin, graphsTableDescriptor);
    }

    admin.close();
  }
 @Override
 public void add(HTableDescriptor htd) throws IOException {
   if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
     throw new NotImplementedException();
   }
   if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
     throw new NotImplementedException();
   }
   if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
     throw new NotImplementedException();
   }
   if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
   long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
   this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
 }
  @Test
  public void testMetaMigration() throws Exception {
    LOG.info("Starting testMetaMigration");
    final byte[] FAMILY = Bytes.toBytes("family");
    HTableDescriptor htd = new HTableDescriptor("testMetaMigration");
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
    htd.addFamily(hcd);
    Configuration conf = TEST_UTIL.getConfiguration();
    byte[][] regionNames =
        new byte[][] {
          HConstants.EMPTY_START_ROW, Bytes.toBytes("region_a"), Bytes.toBytes("region_b")
        };
    createMultiRegionsWithWritableSerialization(conf, htd.getName(), regionNames);
    CatalogTracker ct = TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
    // Erase the current version of root meta for this test.
    undoVersionInRoot(ct);
    MetaReader.fullScanMetaAndPrint(ct);
    LOG.info("Meta Print completed.testMetaMigration");

    long numMigratedRows =
        MetaMigrationConvertingToPB.updateMeta(TEST_UTIL.getHBaseCluster().getMaster());
    MetaReader.fullScanMetaAndPrint(ct);

    // Should be one entry only and it should be for the table we just added.
    assertEquals(regionNames.length, numMigratedRows);

    // Assert that the flag in ROOT is updated to reflect the correct status
    boolean metaUpdated =
        MetaMigrationConvertingToPB.isMetaTableUpdated(
            TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
    assertEquals(true, metaUpdated);
    verifyMetaRowsAreUpdated(ct);
  }
 /**
  * Create a new {@link HRegionInfo} from the snapshot region info. Keep the same startKey, endKey,
  * regionId and split information but change the table name.
  *
  * @param snapshotRegionInfo Info for region to clone.
  * @return the new HRegion instance
  */
 public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
   return new HRegionInfo(
       tableDesc.getName(),
       snapshotRegionInfo.getStartKey(),
       snapshotRegionInfo.getEndKey(),
       snapshotRegionInfo.isSplit(),
       snapshotRegionInfo.getRegionId());
 }
 /**
  * Update table descriptor
  *
  * @param fs
  * @param conf
  * @param hTableDescriptor
  * @return New tableinfo or null if we failed update.
  * @throws IOException Thrown if failed update.
  */
 static Path updateHTableDescriptor(FileSystem fs, Path rootdir, HTableDescriptor hTableDescriptor)
     throws IOException {
   Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
   Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir, getTableInfoPath(fs, tableDir));
   if (p == null) throw new IOException("Failed update");
   LOG.info("Updated tableinfo=" + p);
   return p;
 }
  @Override
  public void setUp() throws Exception {
    // setup config values necessary for store
    this.conf = TEST_UTIL.getConfiguration();
    this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
    this.conf.setInt("hbase.hstore.compaction.min", minFiles);
    this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
    this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
    this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
    this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

    // Setting up a Store
    Path basedir = new Path(DIR);
    String logName = "logs";
    Path logdir = new Path(DIR, logName);
    Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
    FileSystem fs = FileSystem.get(conf);

    fs.delete(logdir, true);

    HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table"));
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);

    hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
    region = HRegion.createHRegion(info, basedir, conf, htd);
    HRegion.closeHRegion(region);
    Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
    region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);

    store = new HStore(basedir, region, hcd, fs, conf);
    manager = store.compactionPolicy;

    TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
    fs.create(TEST_FILE);
  }
Exemple #9
0
 private void initHRegion(
     byte[] tableName, String callingMethod, Configuration conf, byte[]... families)
     throws IOException {
   HTableDescriptor htd = new HTableDescriptor(tableName);
   for (byte[] family : families) {
     htd.addFamily(new HColumnDescriptor(family));
   }
   HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
   Path path = new Path(DIR + callingMethod);
   if (fs.exists(path)) {
     if (!fs.delete(path, true)) {
       throw new IOException("Failed delete of " + path);
     }
   }
   region = HRegion.createHRegion(info, path, conf, htd);
 }
 @Test
 public void testCreateAndUpdate() throws IOException {
   Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
   HTableDescriptor htd = new HTableDescriptor("testCreate");
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
   assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
   FileStatus[] statuses = fs.listStatus(testdir);
   assertTrue("statuses.length=" + statuses.length, statuses.length == 1);
   for (int i = 0; i < 10; i++) {
     FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
   }
   statuses = fs.listStatus(testdir);
   assertTrue(statuses.length == 1);
   Path tmpTableDir = new Path(FSUtils.getTablePath(testdir, htd.getName()), ".tmp");
   statuses = fs.listStatus(tmpTableDir);
   assertTrue(statuses.length == 0);
 }
  /**
   * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
   *
   * @param tableName
   * @param callingMethod
   * @param conf
   * @param families
   * @throws IOException
   * @return created and initialized region.
   */
  private HRegion initHRegion(
      byte[] tableName, String callingMethod, HBaseConfiguration conf, String family)
      throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor familyDesc;
    for (int i = 0; i < BLOOM_TYPE.length; i++) {
      BloomType bloomType = BLOOM_TYPE[i];
      familyDesc =
          new HColumnDescriptor(family + "_" + bloomType)
              .setBlocksize(1)
              .setBloomFilterType(BLOOM_TYPE[i]);
      htd.addFamily(familyDesc);
    }

    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
    Path path = new Path(DIR + callingMethod);
    HRegion r = HRegion.createHRegion(info, path, conf, htd);
    blockCache = new CacheConfig(conf).getBlockCache();
    return r;
  }
  // Initialize and set up tables
  public static void initialize() throws Exception {

    setupLog4j();

    Configuration config = HBaseConfiguration.create();

    HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin = new HBaseAdmin(config);
    try {
      System.out.println("Creating the table " + TABLE_NAME);
      admin.createTable(desc);
    } catch (TableExistsException e) {
      System.out.println("Table " + TABLE_NAME + " already exists");
    }

    table = new TransactionalTable(config, desc.getName());

    try {
      transactionManager = new TransactionManager(config);
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
  }
 /**
  * Deletes a HBase table.
  *
  * @param admin HBase admin
  * @param tableDescriptor descriptor for the table to delete
  * @throws IOException
  */
 private static void deleteTable(final HBaseAdmin admin, final HTableDescriptor tableDescriptor)
     throws IOException {
   LOG.info("deleting table: " + tableDescriptor.getNameAsString());
   admin.disableTable(tableDescriptor.getName());
   admin.deleteTable(tableDescriptor.getName());
 }
Exemple #14
0
 @After
 public void tearDown() throws Exception {
   for (HTableDescriptor htd : this.admin.listTables()) {
     TEST_UTIL.deleteTable(htd.getName());
   }
 }