public static boolean createTableOrOverwrite(String tableName, String[] columnFamily) {
    if (tableName == null && columnFamily == null) {
      return false;
    }
    Connection connection = null;
    try {
      connection = ConnectionFactory.createConnection(config);
      Admin admin = connection.getAdmin();
      if (admin.tableExists(TableName.valueOf(tableName))) {
        admin.disableTable(TableName.valueOf(tableName));
        admin.deleteTable(TableName.valueOf(tableName));
      }
      HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));

      for (String cf : columnFamily) {
        table.addFamily(new HColumnDescriptor(cf));
      }

      admin.createTable(table);
      System.out.println("create table successfully.");
      return true;
    } catch (IOException e) {
      e.printStackTrace();
      return false;
    } finally {
      try {
        connection.close();
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }
 private void deleteTable() throws IOException {
   admin.disableTable(tn);
   admin.deleteTable(tn);
   if (!admin.tableExists(tn)) {
     System.out.println(tableName + " is delete !");
   }
 }
Exemple #3
0
 /**
  * Set the table's replication switch if the table's replication switch is already not set.
  *
  * @param tableName name of the table
  * @param isRepEnabled is replication switch enable or disable
  * @throws IOException if a remote or network exception occurs
  */
 private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
   Admin admin = null;
   try {
     admin = this.connection.getAdmin();
     HTableDescriptor htd = admin.getTableDescriptor(tableName);
     if (isTableRepEnabled(htd) ^ isRepEnabled) {
       boolean isOnlineSchemaUpdateEnabled =
           this.connection
               .getConfiguration()
               .getBoolean("hbase.online.schema.update.enable", true);
       if (!isOnlineSchemaUpdateEnabled) {
         admin.disableTable(tableName);
       }
       for (HColumnDescriptor hcd : htd.getFamilies()) {
         hcd.setScope(
             isRepEnabled
                 ? HConstants.REPLICATION_SCOPE_GLOBAL
                 : HConstants.REPLICATION_SCOPE_LOCAL);
       }
       admin.modifyTable(tableName, htd);
       if (!isOnlineSchemaUpdateEnabled) {
         admin.enableTable(tableName);
       }
     }
   } finally {
     if (admin != null) {
       try {
         admin.close();
       } catch (IOException e) {
         LOG.warn("Failed to close admin connection.");
         LOG.debug("Details on failure to close admin connection.", e);
       }
     }
   }
 }
Exemple #4
0
 @Test
 public void createTableInDefaultNamespace() throws Exception {
   HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("default_table"));
   HColumnDescriptor colDesc = new HColumnDescriptor("cf1");
   desc.addFamily(colDesc);
   admin.createTable(desc);
   assertTrue(admin.listTables().length == 1);
   admin.disableTable(desc.getTableName());
   admin.deleteTable(desc.getTableName());
 }
 @After
 public void tearDown() throws IOException {
   LOG.info("Cleaning up after test.");
   Admin admin = util.getHBaseAdmin();
   if (admin.tableExists(TABLE_NAME)) {
     if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
     admin.deleteTable(TABLE_NAME);
   }
   LOG.info("Restoring cluster.");
   util.restoreCluster();
   LOG.info("Cluster restored.");
 }
Exemple #6
0
 @Before
 public void beforeMethod() throws IOException {
   for (HTableDescriptor desc : admin.listTables(prefix + ".*")) {
     admin.disableTable(desc.getTableName());
     admin.deleteTable(desc.getTableName());
   }
   for (NamespaceDescriptor ns : admin.listNamespaceDescriptors()) {
     if (ns.getName().startsWith(prefix)) {
       admin.deleteNamespace(ns.getName());
     }
   }
 }
Exemple #7
0
 @Test
 public void createTableInSystemNamespace() throws Exception {
   TableName tableName = TableName.valueOf("hbase:createTableInSystemNamespace");
   HTableDescriptor desc = new HTableDescriptor(tableName);
   HColumnDescriptor colDesc = new HColumnDescriptor("cf1");
   desc.addFamily(colDesc);
   admin.createTable(desc);
   assertEquals(0, admin.listTables().length);
   assertTrue(admin.tableExists(tableName));
   admin.disableTable(desc.getTableName());
   admin.deleteTable(desc.getTableName());
 }
Exemple #8
0
 public boolean removeTable(String tableName) {
   try {
     Admin admin = connection.getAdmin();
     TableName t = TableName.valueOf(tableName);
     if (admin.tableExists(t)) {
       admin.disableTable(t);
       admin.deleteTable(t);
       return true;
     }
   } catch (IOException e) {
     e.printStackTrace();
   }
   return false;
 }
  @Before
  public void setUp() throws Exception {
    LOG.info(String.format("Initializing cluster with %d region servers.", REGION_SERVER_COUNT));
    util.initializeCluster(REGION_SERVER_COUNT);
    LOG.info("Cluster initialized");

    Admin admin = util.getHBaseAdmin();
    if (admin.tableExists(TABLE_NAME)) {
      LOG.info(String.format("Deleting existing table %s.", TABLE_NAME));
      if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
      admin.deleteTable(TABLE_NAME);
      LOG.info(String.format("Existing table %s deleted.", TABLE_NAME));
    }
    LOG.info("Cluster ready");
  }
 public static boolean dropTable(Admin admin, String table) {
   TableName tableName = TableName.valueOf(table);
   try {
     admin.disableTable(tableName);
   } catch (IOException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
     return false;
   }
   try {
     admin.deleteTable(tableName);
     return true;
   } catch (IOException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
     return false;
   }
 }
    @Override
    void perform() throws IOException {

      HTableDescriptor selected = selectTable(enabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        TableName tableName = selected.getTableName();
        LOG.info("Disabling table :" + selected);
        admin.disableTable(tableName);
        Assert.assertTrue(
            "Table: " + selected + " was not disabled", admin.isTableDisabled(tableName));
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        disabledTables.put(tableName, freshTableDesc);
        LOG.info("Disabled table :" + freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        // TODO workaround
        // loose restriction for TableNotDisabledException/TableNotEnabledException thrown in sync
        // operations
        // 1) when enable/disable starts, the table state is changed to ENABLING/DISABLING (ZK node
        // in 1.x), which will be further changed to ENABLED/DISABLED once the operation completes
        // 2) if master failover happens in the middle of the enable/disable operation, the new
        // master will try to recover the tables in ENABLING/DISABLING state, as programmed in
        // AssignmentManager#recoverTableInEnablingState() and
        // AssignmentManager#recoverTableInDisablingState()
        // 3) after the new master initialization completes, the procedure tries to re-do the
        // enable/disable operation, which was already done. Ignore those exceptions before change
        // of behaviors of AssignmentManager in presence of PV2
        if (e instanceof TableNotEnabledException) {
          LOG.warn("Caught TableNotEnabledException in action: " + this.getClass());
          e.printStackTrace();
        } else {
          throw e;
        }
      } finally {
        admin.close();
      }
      verifyTables();
    }
Exemple #12
0
  @Test
  public void createDoubleTest() throws IOException, InterruptedException {
    String testName = "createDoubleTest";
    String nsName = prefix + "_" + testName;
    LOG.info(testName);

    TableName tableName = TableName.valueOf("my_table");
    TableName tableNameFoo = TableName.valueOf(nsName + ":my_table");
    // create namespace and verify
    admin.createNamespace(NamespaceDescriptor.create(nsName).build());
    TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName));
    TEST_UTIL.createTable(tableNameFoo, Bytes.toBytes(nsName));
    assertEquals(2, admin.listTables().length);
    assertNotNull(admin.getTableDescriptor(tableName));
    assertNotNull(admin.getTableDescriptor(tableNameFoo));
    // remove namespace and verify
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
    assertEquals(1, admin.listTables().length);
  }
  public boolean doTest()
      throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException,
          IOException, InterruptedException {
    /** create config */
    conf_h = HBaseConfiguration.create();
    conf_h.set("hbase.zookeeper.quorum", "localhost");
    conf_h.set("hbase.zookeeper.property.clientPort", "2181");
    Connection con_h = null;
    try {
      con_h = ConnectionFactory.createConnection(conf_h);
    } catch (IOException e) {
      e.printStackTrace();
    }
    Admin admin = con_h.getAdmin();
    HTableDescriptor tableDesc = new HTableDescriptor(tableName_chi);
    HColumnDescriptor colFamDesc = new HColumnDescriptor("count");
    colFamDesc.setMaxVersions(1);
    tableDesc.addFamily(colFamDesc);
    admin.createTable(tableDesc);

    /** counting and insert in chiTable */
    Scan scan = new Scan();
    scan.addColumn(Bytes.toBytes("products"), Bytes.toBytes("product_category_id"));
    scan.addColumn(Bytes.toBytes("orders"), Bytes.toBytes("order_date"));
    // Creates a new Job with no particular Cluster
    Job job =
        Job.getInstance(conf_h, "Count"); // Job.getInstance(Configuration conf, String JobName)
    job.setJarByClass(
        ChiSquaredTest2_abc.class); // Set the Jar by finding where a given class came from
    // initTableMapperJob(String table, Scan scan, Class<? extends TableMapper> mapper, Class<?>
    // outputKeyClass, Class<?> outputValueClass, org.apache.hadoop.mapreduce.Job job)
    TableMapReduceUtil.initTableMapperJob(
        "retail_order", scan, Map1.class, Text.class, IntWritable.class, job);
    // initTableReducerJob(String table, Class<? extends TableReducer> reducer,
    // org.apache.hadoop.mapreduce.Job job)
    TableMapReduceUtil.initTableReducerJob("chiTable", Reduce1.class, job);

    // boolean waitForCompletion(boolean verbose), verbose - print the progress to the user
    job.waitForCompletion(true); // Submit the job to the cluster and wait for it to finish

    /** extract value from chiTable */
    int totalY = 0;
    int totalN = 0;
    ArrayList<CellOfHTable> chiTable = new ArrayList<CellOfHTable>();
    Table table_h = con_h.getTable(tableName_chi);
    Scan s = new Scan();
    s.addFamily(Bytes.toBytes("count"));
    ResultScanner results = table_h.getScanner(s);
    for (Result r : results) {
      CellOfHTable c =
          new CellOfHTable(
              r.getRow(),
              r.getValue(Bytes.toBytes("count"), Bytes.toBytes("Y")) == null
                  ? Bytes.toBytes(0)
                  : r.getValue(Bytes.toBytes("count"), Bytes.toBytes("Y")),
              r.getValue(Bytes.toBytes("count"), Bytes.toBytes("N")) == null
                  ? Bytes.toBytes(0)
                  : r.getValue(
                      Bytes.toBytes("count"), Bytes.toBytes("N"))); // (id, count_Y, count_N)
      chiTable.add(c);
      totalY = totalY + c.countY;
      totalN = totalN + c.countN;
    }

    results.close();
    table_h.close();
    admin.disableTable(tableName_chi);
    admin.deleteTable(tableName_chi);

    double chisquare = 0.0;
    for (int i = 0; i < chiTable.size(); i++) {
      CellOfHTable c = chiTable.get(i);
      double expectY =
          (double) (c.countY + c.countN) * (double) totalY / (double) (totalY + totalN);
      chisquare =
          chisquare + (((double) c.countY - expectY) * ((double) c.countY - expectY) / expectY);
      double expectN =
          (double) (c.countY + c.countN) * (double) totalN / (double) (totalY + totalN);
      chisquare =
          chisquare + (((double) c.countN - expectN) * ((double) c.countN - expectN) / expectN);
    }

    System.out.println(chisquare);
    ChiSquareDist csd = new ChiSquareDist((chiTable.size() - 1));
    if (chisquare > csd.inverseF(1.0 - alpha)) {
      return true;
    }
    return false;
  }
  private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality)
      throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
      // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
      // explicit hostnames parameter just like MiniDFSCluster does.
      hostCount = 3;
      regionNum = 20;
    }

    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
      hostnames[i] = "datanode_" + i;
    }
    util.startMiniCluster(1, hostCount, hostnames);

    Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    try (RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME);
        Admin admin = util.getConnection().getAdmin(); ) {
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = r.getStartKeys().length;
      assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);

      // Generate the bulk load files
      runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);

        while (util.getConnection().getRegionLocator(TABLE_NAME).getAllRegionLocations().size()
                != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Check region locality
      HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
      for (HRegion region : util.getHBaseCluster().getRegions(TABLE_NAME)) {
        hbd.add(region.getHDFSBlocksDistribution());
      }
      for (String hostname : hostnames) {
        float locality = hbd.getBlockLocalityIndex(hostname);
        LOG.info("locality of [" + hostname + "]: " + locality);
        assertEquals(100, (int) (locality * 100));
      }

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      testDir.getFileSystem(conf).delete(testDir, true);
      util.deleteTable(TABLE_NAME);
      util.shutdownMiniCluster();
    }
  }
 @After
 public void tearDown() throws Exception {
   admin.disableTable(TableName.valueOf(tableName));
   admin.deleteTable(TableName.valueOf(tableName));
   admin.close();
 }
  private void doIncrementalLoadTest(boolean shouldChangeRegions) throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    byte[][] splitKeys = generateRandomSplitKeys(4);
    util.startMiniCluster();
    try {
      HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
      Admin admin = table.getConnection().getAdmin();
      Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = -1;
      try (RegionLocator r = table.getRegionLocator()) {
        numRegions = r.getStartKeys().length;
      }
      assertEquals("Should make 5 regions", numRegions, 5);

      // Generate the bulk load files
      util.startMiniMapReduceCluster();
      runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);

        while (table.getRegionLocator().getAllRegionLocations().size() != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      util.shutdownMiniMapReduceCluster();
      util.shutdownMiniCluster();
    }
  }