Exemple #1
0
 @Override
 protected int doWork() throws Exception {
   Connection connection = null;
   Admin admin = null;
   try {
     connection = ConnectionFactory.createConnection(getConf());
     admin = connection.getAdmin();
     HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
     if (snapshotType != null) {
       type = ProtobufUtil.createProtosSnapShotDescType(snapshotName);
     }
     admin.snapshot(
         new SnapshotDescription(snapshotName, tableName, ProtobufUtil.createSnapshotType(type)));
   } catch (Exception e) {
     return -1;
   } finally {
     if (admin != null) {
       admin.close();
     }
     if (connection != null) {
       connection.close();
     }
   }
   return 0;
 }
Exemple #2
0
 /**
  * Sweeps the mob files on one column family. It deletes the unused mob files and merges the small
  * mob files into bigger ones.
  *
  * @param tableName The current table name in string format.
  * @param familyName The column family name.
  * @return 0 if success, 2 if job aborted with an exception, 3 if unable to start due to other
  *     compaction,4 if mr job was unsuccessful
  * @throws IOException
  * @throws InterruptedException
  * @throws ClassNotFoundException
  * @throws KeeperException
  * @throws ServiceException
  */
 int sweepFamily(String tableName, String familyName)
     throws IOException, InterruptedException, ClassNotFoundException, KeeperException,
         ServiceException {
   Configuration conf = getConf();
   // make sure the target HBase exists.
   HBaseAdmin.checkHBaseAvailable(conf);
   Connection connection = ConnectionFactory.createConnection(getConf());
   Admin admin = connection.getAdmin();
   try {
     FileSystem fs = FileSystem.get(conf);
     TableName tn = TableName.valueOf(tableName);
     HTableDescriptor htd = admin.getTableDescriptor(tn);
     HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
     if (family == null || !family.isMobEnabled()) {
       throw new IOException("Column family " + familyName + " is not a MOB column family");
     }
     SweepJob job = new SweepJob(conf, fs);
     // Run the sweeping
     return job.sweep(tn, family);
   } catch (Exception e) {
     System.err.println("Job aborted due to exception " + e);
     return 2; // job failed
   } finally {
     try {
       admin.close();
     } catch (IOException e) {
       System.out.println("Failed to close the HBaseAdmin: " + e.getMessage());
     }
     try {
       connection.close();
     } catch (IOException e) {
       System.out.println("Failed to close the connection: " + e.getMessage());
     }
   }
 }
  private static void createTable() throws Exception {
    try {
      Configuration configuration = HBaseConfiguration.create();
      HBaseAdmin.checkHBaseAvailable(configuration);
      Connection connection = ConnectionFactory.createConnection(configuration);

      // Instantiating HbaseAdmin class
      Admin admin = connection.getAdmin();

      // Instantiating table descriptor class
      HTableDescriptor stockTableDesc =
          new HTableDescriptor(TableName.valueOf(Constants.STOCK_DATES_TABLE));

      // Adding column families to table descriptor
      HColumnDescriptor stock_0414 = new HColumnDescriptor(Constants.STOCK_DATES_CF);
      stockTableDesc.addFamily(stock_0414);

      // Execute the table through admin
      if (!admin.tableExists(stockTableDesc.getTableName())) {
        admin.createTable(stockTableDesc);
        System.out.println("Stock table created !!!");
      }

      // Load hbase-site.xml
      HBaseConfiguration.addHbaseResources(configuration);
    } catch (ServiceException e) {
      log.error("Error occurred while creating HBase tables", e);
      throw new Exception("Error occurred while creating HBase tables", e);
    }
  }
Exemple #4
0
 public Map<String, Long> getRegionSizes(String tableName) {
   Map<String, Long> regions = new HashMap<>();
   try {
     final Table table = connection.getTable(TableName.valueOf(tableName));
     RegionLocator regionLocator = connection.getRegionLocator(table.getName());
     List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
     Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
     for (HRegionLocation regionInfo : tableRegionInfos) {
       tableRegions.add(regionInfo.getRegionInfo().getRegionName());
     }
     ClusterStatus clusterStatus = connection.getAdmin().getClusterStatus();
     Collection<ServerName> servers = clusterStatus.getServers();
     final long megaByte = 1024L * 1024L;
     for (ServerName serverName : servers) {
       ServerLoad serverLoad = clusterStatus.getLoad(serverName);
       for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
         byte[] regionId = regionLoad.getName();
         if (tableRegions.contains(regionId)) {
           long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
           regions.put(regionLoad.getNameAsString(), regionSizeBytes);
         }
       }
     }
   } catch (IOException e) {
     e.printStackTrace();
   }
   return regions;
 }
  public static boolean createTableOrOverwrite(String tableName, String[] columnFamily) {
    if (tableName == null && columnFamily == null) {
      return false;
    }
    Connection connection = null;
    try {
      connection = ConnectionFactory.createConnection(config);
      Admin admin = connection.getAdmin();
      if (admin.tableExists(TableName.valueOf(tableName))) {
        admin.disableTable(TableName.valueOf(tableName));
        admin.deleteTable(TableName.valueOf(tableName));
      }
      HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));

      for (String cf : columnFamily) {
        table.addFamily(new HColumnDescriptor(cf));
      }

      admin.createTable(table);
      System.out.println("create table successfully.");
      return true;
    } catch (IOException e) {
      e.printStackTrace();
      return false;
    } finally {
      try {
        connection.close();
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }
Exemple #6
0
 /**
  * 查询所有表
  *
  * @return 所以表 / null
  * @throws Exception
  */
 public static List<HTableDescriptor> queryALLTable() throws Exception {
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     if (admin != null) {
       HTableDescriptor[] listTables = admin.listTables();
       if (null != listTables && listTables.length > 0) {
         return Arrays.asList(listTables);
       }
     }
   } catch (Exception e) {
     e.printStackTrace();
   } finally {
     try {
       if (null != admin) {
         admin.close();
       }
     } catch (IOException e) {
       logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
     }
     try {
       if (null != conn) {
         conn.close();
       }
     } catch (Exception e) {
       logger.error("Connection close exception, errMsg:{}", e.getMessage());
     }
   }
   return null;
 }
Exemple #7
0
 /**
  * 删除指定表名
  *
  * @param rowKey
  */
 public void deleteTable(byte[] rowKey) {
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     // 在删除一张表前,要使其失效
     admin.disableTable(rowKey);
     admin.deleteTable(rowKey);
     admin.enableTable(rowKey);
   } catch (Exception e) {
     logger.error("HBaseAdmin deleteTable exception, errMsg:{}", e.getMessage());
   } finally {
     try {
       if (null != admin) {
         admin.close();
       }
     } catch (IOException e) {
       logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
     }
     try {
       if (null != conn) {
         conn.close();
       }
     } catch (Exception e) {
       logger.error("Connection close exception, errMsg:{}", e.getMessage());
     }
   }
 }
Exemple #8
0
 /**
  * 删除指定名称的列簇
  *
  * @param tableName 表名
  * @param columnFamilyName 列族
  */
 public static void deleteFamily(byte[] tableName, String columnFamilyName) {
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     admin.deleteColumn(tableName, columnFamilyName);
   } catch (Exception e) {
     logger.error("HBaseAdmin deleteColumn exception, errMsg:{}", e.getMessage());
   } finally {
     try {
       if (null != admin) {
         admin.close();
       }
     } catch (IOException e) {
       logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
     }
     try {
       if (null != conn) {
         conn.close();
       }
     } catch (Exception e) {
       logger.error("Connection close exception, errMsg:{}", e.getMessage());
     }
   }
 }
Exemple #9
0
  /**
   * Find all column families that are replicated from this cluster
   *
   * @return the full list of the replicated column families of this cluster as: tableName, family
   *     name, replicationType
   *     <p>Currently replicationType is Global. In the future, more replication types may be
   *     extended here. For example 1) the replication may only apply to selected peers instead of
   *     all peers 2) the replicationType may indicate the host Cluster servers as Slave for the
   *     table:columnFam.
   */
  public List<HashMap<String, String>> listReplicated() throws IOException {
    List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>();

    Admin admin = connection.getAdmin();
    HTableDescriptor[] tables;
    try {
      tables = admin.listTables();
    } finally {
      if (admin != null) admin.close();
    }

    for (HTableDescriptor table : tables) {
      HColumnDescriptor[] columns = table.getColumnFamilies();
      String tableName = table.getNameAsString();
      for (HColumnDescriptor column : columns) {
        if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
          // At this moment, the columfam is replicated to all peers
          HashMap<String, String> replicationEntry = new HashMap<String, String>();
          replicationEntry.put(TNAME, tableName);
          replicationEntry.put(CFNAME, column.getNameAsString());
          replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL);
          replicationColFams.add(replicationEntry);
        }
      }
    }

    return replicationColFams;
  }
Exemple #10
0
 public boolean tableExists(String tableName) {
   try {
     Admin admin = connection.getAdmin();
     return admin.tableExists(TableName.valueOf(tableName));
   } catch (IOException e) {
     e.printStackTrace();
   }
   return false;
 }
 protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException {
   HTableDescriptor[] htbls = null;
   try (Connection connection = ConnectionFactory.createConnection(configuration)) {
     try (Admin admin = connection.getAdmin()) {
       htbls = admin.listTables();
     }
   }
   return htbls;
 }
 public static Admin getAadmin(Connection connection) {
   Admin admin = null;
   try {
     admin = connection.getAdmin();
   } catch (IOException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
   }
   return admin;
 }
Exemple #13
0
 public void createTable(String tableName, List<String> columnFamilies) {
   try {
     Admin admin = connection.getAdmin();
     HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(tableName));
     for (String family : columnFamilies) {
       descriptor.addFamily(new HColumnDescriptor(family));
     }
     admin.createTable(descriptor);
   } catch (IOException e) {
     e.printStackTrace();
   }
 }
 @Before
 public void init() {
   conf = HBaseConfiguration.create();
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = conn.getAdmin();
     tn = TableName.valueOf(tableName);
     table = conn.getTable(tn);
     initData();
   } catch (Exception e) {
     e.printStackTrace();
   }
 }
Exemple #15
0
 public boolean removeTable(String tableName) {
   try {
     Admin admin = connection.getAdmin();
     TableName t = TableName.valueOf(tableName);
     if (admin.tableExists(t)) {
       admin.disableTable(t);
       admin.deleteTable(t);
       return true;
     }
   } catch (IOException e) {
     e.printStackTrace();
   }
   return false;
 }
 protected void verifyNamespaces() throws IOException {
   Connection connection = getConnection();
   Admin admin = connection.getAdmin();
   // iterating concurrent map
   for (String nsName : namespaceMap.keySet()) {
     try {
       Assert.assertTrue(
           "Namespace: " + nsName + " in namespaceMap does not exist",
           admin.getNamespaceDescriptor(nsName) != null);
     } catch (NamespaceNotFoundException nsnfe) {
       Assert.fail(
           "Namespace: " + nsName + " in namespaceMap does not exist: " + nsnfe.getMessage());
     }
   }
   admin.close();
 }
Exemple #17
0
  @Test(timeout = 60000)
  public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception {
    ThreadPoolExecutor exec =
        getRS().getExecutorService().getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);

    assertEquals(0, exec.getCompletedTaskCount());

    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.setPriority(HConstants.HIGH_QOS);
    htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
        Admin admin = connection.getAdmin()) {
      admin.createTable(htd);
    }

    assertEquals(1, exec.getCompletedTaskCount());
  }
 public void manualTest(String args[]) throws Exception {
   Configuration conf = HBaseConfiguration.create();
   util = new HBaseTestingUtility(conf);
   if ("newtable".equals(args[0])) {
     TableName tname = TableName.valueOf(args[1]);
     byte[][] splitKeys = generateRandomSplitKeys(4);
     try (Table table = util.createTable(tname, FAMILIES, splitKeys)) {}
   } else if ("incremental".equals(args[0])) {
     TableName tname = TableName.valueOf(args[1]);
     try (Connection c = ConnectionFactory.createConnection(conf);
         Admin admin = c.getAdmin();
         RegionLocator regionLocator = c.getRegionLocator(tname)) {
       Path outDir = new Path("incremental-out");
       runIncrementalPELoad(conf, admin.getTableDescriptor(tname), regionLocator, outDir);
     }
   } else {
     throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental");
   }
 }
 protected void verifyTables() throws IOException {
   Connection connection = getConnection();
   Admin admin = connection.getAdmin();
   // iterating concurrent map
   for (TableName tableName : enabledTables.keySet()) {
     Assert.assertTrue(
         "Table: " + tableName + " in enabledTables is not enabled",
         admin.isTableEnabled(tableName));
   }
   for (TableName tableName : disabledTables.keySet()) {
     Assert.assertTrue(
         "Table: " + tableName + " in disabledTables is not disabled",
         admin.isTableDisabled(tableName));
   }
   for (TableName tableName : deletedTables.keySet()) {
     Assert.assertFalse(
         "Table: " + tableName + " in deletedTables is not deleted", admin.tableExists(tableName));
   }
   admin.close();
 }
Exemple #20
0
 /**
  * 创建一张表
  *
  * @param table
  * @throws ParamIsNullException 参数为空
  * @throws TableExistsException 表已存在
  */
 public static void createTable(HTableDescriptor table)
     throws ParamIsNullException, TableExistsException {
   if (null == table) {
     throw new ParamIsNullException("参数不能为空");
   }
   logger.info("create table begin... , table:{}", table.toString());
   Connection conn = null;
   HBaseAdmin admin = null;
   String tableName = table.getNameAsString();
   try {
     logger.info("获取connection");
     conn = ConnectionFactory.createConnection(conf);
     logger.info("获取admin");
     admin = (HBaseAdmin) conn.getAdmin();
     /** 表已存在 */
     if (admin.tableExists(Bytes.toBytes(tableName))) {
       throw new TableExistsException(tableName);
     }
     logger.info("create...");
     admin.createTable(table);
     logger.info("table create success, tableName:{}", tableName);
   } catch (IOException e) {
     logger.error("table create fail, tableName:{}, errMsg:{}", tableName, e);
   } finally {
     if (null != admin) {
       try {
         admin.close();
       } catch (IOException e) {
         logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
       }
     }
     if (null != conn) {
       try {
         conn.close();
       } catch (IOException e) {
         logger.error("Connection close exception, errMsg:{}", e.getMessage());
       }
     }
   }
 }
Exemple #21
0
 /**
  * 添加一个列族
  *
  * @param tableName
  * @param family 列族
  * @throws ParamIsNullException 参数为空
  * @throws TableNotFoundException 表不存在/获取表连接失败
  */
 public static void addColumnFamily(String tableName, HColumnDescriptor family)
     throws ParamIsNullException, TableNotFoundException {
   if (null == tableName) {
     throw new ParamIsNullException("tableName不能为空");
   }
   if (null == family) {
     throw new ParamIsNullException("HColumnDescriptor不能为空");
   }
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     /** 表不存在 */
     if (!admin.tableExists(Bytes.toBytes(tableName))) {
       throw new TableNotFoundException(tableName);
     }
     HTableDescriptor table = admin.getTableDescriptor(Bytes.toBytes(tableName));
     table.addFamily(family);
   } catch (IOException e) {
     logger.error("获取Hbase连接发生异常, errMsg:{}", e.getMessage());
   } finally {
     if (null != admin) {
       try {
         admin.close();
       } catch (IOException e) {
         logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
       }
     }
     if (null != conn) {
       try {
         conn.close();
       } catch (IOException e) {
         logger.error("Connection close exception, errMsg:{}", e.getMessage());
       }
     }
   }
 }
  public static void main(String[] args) throws IOException, InterruptedException {
    Configuration conf = HBaseConfiguration.create();
    HBaseHelper helper = HBaseHelper.getHelper(conf);
    helper.dropTable("testtable");
    Connection connection = ConnectionFactory.createConnection(conf);
    Admin admin = connection.getAdmin();

    // vv CreateTableWithNamespaceExample
    /*[*/ NamespaceDescriptor namespace = NamespaceDescriptor.create("testspace").build();
    admin.createNamespace(namespace); /*]*/

    TableName tableName = TableName.valueOf("testspace", "testtable");
    HTableDescriptor desc = new HTableDescriptor(tableName);

    HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("colfam1"));
    desc.addFamily(coldef);

    admin.createTable(desc);
    // ^^ CreateTableWithNamespaceExample

    boolean avail = admin.isTableAvailable(tableName);
    System.out.println("Table available: " + avail);
  }
Exemple #23
0
 /**
  * Connect to peer and check the table descriptor on peer:
  *
  * <ol>
  *   <li>Create the same table on peer when not exist.
  *   <li>Throw exception if the table exists on peer cluster but descriptors are not same.
  * </ol>
  *
  * @param tableName name of the table to sync to the peer
  * @param splits table split keys
  * @throws IOException
  */
 private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits)
     throws IOException {
   List<ReplicationPeer> repPeers = listValidReplicationPeers();
   if (repPeers == null || repPeers.size() <= 0) {
     throw new IllegalArgumentException("Found no peer cluster for replication.");
   }
   for (ReplicationPeer repPeer : repPeers) {
     Configuration peerConf = repPeer.getConfiguration();
     HTableDescriptor htd = null;
     try (Connection conn = ConnectionFactory.createConnection(peerConf);
         Admin admin = this.connection.getAdmin();
         Admin repHBaseAdmin = conn.getAdmin()) {
       htd = admin.getTableDescriptor(tableName);
       HTableDescriptor peerHtd = null;
       if (!repHBaseAdmin.tableExists(tableName)) {
         repHBaseAdmin.createTable(htd, splits);
       } else {
         peerHtd = repHBaseAdmin.getTableDescriptor(tableName);
         if (peerHtd == null) {
           throw new IllegalArgumentException(
               "Failed to get table descriptor for table "
                   + tableName.getNameAsString()
                   + " from peer cluster "
                   + repPeer.getId());
         } else if (!peerHtd.equals(htd)) {
           throw new IllegalArgumentException(
               "Table "
                   + tableName.getNameAsString()
                   + " exists in peer cluster "
                   + repPeer.getId()
                   + ", but the table descriptors are not same when comapred with source cluster."
                   + " Thus can not enable the table's replication switch.");
         }
       }
     }
   }
 }
  public void convertH()
      throws SQLException, IOException, ClassNotFoundException, IllegalAccessException,
          InstantiationException {
    /** create HBase connection */
    conf_h = HBaseConfiguration.create();
    conf_h.set("hbase.zookeeper.quorum", "localhost");
    conf_h.set("hbase.zookeeper.property.clientPort", "2181");
    try {
      conn_h = ConnectionFactory.createConnection(conf_h);
    } catch (IOException e) {
      e.printStackTrace();
    }
    /** create Mysql connection */
    try {
      Class.forName("com.mysql.jdbc.Driver").newInstance();
      // con_sql = DriverManager.getConnection("jdbc:mysql:///retail_db", "root", "cloudera");
      conn_sql = DriverManager.getConnection("jdbc:mysql:///retail_db", "root", "cloudera");
      if (!conn_sql.isClosed()) {
        System.out.println("Successfully connected to MySQL server...");
      }
    } catch (Exception e) {
      System.err.println(e.getMessage());
    }
    /** create table and family */
    Admin admin = conn_h.getAdmin();

    HTableDescriptor tableDesc = new HTableDescriptor(tableName_retail);
    // create Family Name
    String[] familyName = {"order_items", "orders", "products"};
    for (String name : familyName) {
      HColumnDescriptor colDesc = new HColumnDescriptor(name);
      tableDesc.addFamily(colDesc);
    }
    admin.createTable(tableDesc);

    /** query from mysql */
    if (conn_sql != null) {
      Statement stat = conn_sql.createStatement();
      stat.executeQuery(
          "select * from products "
              + "inner join order_items on product_id = order_item_product_id "
              + "inner join orders on order_item_order_id = order_id ");
      ResultSet rs = stat.getResultSet();
      while (rs.next()) {
        // order_items
        String rowKey = rs.getString("order_item_id");
        Put put = new Put(Bytes.toBytes(rowKey)); // new Put(rowKey)
        put.addColumn(
            Bytes.toBytes("order_items"), Bytes.toBytes("order_item_id"), Bytes.toBytes(rowKey));
        String order_item_order_id = rs.getString("order_item_order_id");
        put.addColumn(
            Bytes.toBytes("order_items"),
            Bytes.toBytes("order_item_order_id"),
            Bytes.toBytes(order_item_order_id));
        String order_item_product_id = rs.getString("order_item_product_id");
        put.addColumn(
            Bytes.toBytes("order_items"),
            Bytes.toBytes("order_item_product_id"),
            Bytes.toBytes(order_item_product_id));
        String order_item_quantity = rs.getString("order_item_quantity");
        put.addColumn(
            Bytes.toBytes("order_items"),
            Bytes.toBytes("order_item_quantity"),
            Bytes.toBytes(order_item_quantity));
        String order_item_subtotal = rs.getString("order_item_subtotal");
        put.addColumn(
            Bytes.toBytes("order_items"),
            Bytes.toBytes("order_item_subtotal"),
            Bytes.toBytes(order_item_subtotal));
        String order_item_product_price = rs.getString("order_item_product_price");
        put.addColumn(
            Bytes.toBytes("order_items"),
            Bytes.toBytes("order_item_product_price"),
            Bytes.toBytes(order_item_product_price));
        // orders
        String order_id = rs.getString("order_id");
        put.addColumn(Bytes.toBytes("orders"), Bytes.toBytes("order_id"), Bytes.toBytes(order_id));
        String order_date = rs.getString("order_date");
        put.addColumn(
            Bytes.toBytes("orders"), Bytes.toBytes("order_date"), Bytes.toBytes(order_date));
        String order_customer_id = rs.getString("order_customer_id");
        put.addColumn(
            Bytes.toBytes("orders"),
            Bytes.toBytes("order_customer_id"),
            Bytes.toBytes(order_customer_id));
        String order_status = rs.getString("order_status");
        put.addColumn(
            Bytes.toBytes("orders"), Bytes.toBytes("order_status"), Bytes.toBytes(order_status));
        // products
        String product_id = rs.getString("product_id");
        put.addColumn(
            Bytes.toBytes("products"), Bytes.toBytes("product_id"), Bytes.toBytes(product_id));
        String product_category_id = rs.getString("product_category_id");
        put.addColumn(
            Bytes.toBytes("products"),
            Bytes.toBytes("product_category_id"),
            Bytes.toBytes(product_category_id));
        String product_name = rs.getString("product_name");
        put.addColumn(
            Bytes.toBytes("products"), Bytes.toBytes("product_name"), Bytes.toBytes(product_name));
        String product_description = rs.getString("product_description");
        put.addColumn(
            Bytes.toBytes("products"),
            Bytes.toBytes("product_description"),
            Bytes.toBytes(product_description));
        String product_price = rs.getString("product_price");
        put.addColumn(
            Bytes.toBytes("products"),
            Bytes.toBytes("product_price"),
            Bytes.toBytes(product_price));
        String product_image = rs.getString("product_image");
        put.addColumn(
            Bytes.toBytes("products"),
            Bytes.toBytes("product_image"),
            Bytes.toBytes(product_image));
        // put in table
        Table table = conn_h.getTable(tableName_retail);
        table.put(put);
        table.close();
      }
      rs.close();
      stat.close();
    }
    conn_h.close();
    conn_sql.close();
  }
  public boolean doTest()
      throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException,
          IOException, InterruptedException {
    /** create config */
    conf_h = HBaseConfiguration.create();
    conf_h.set("hbase.zookeeper.quorum", "localhost");
    conf_h.set("hbase.zookeeper.property.clientPort", "2181");
    Connection con_h = null;
    try {
      con_h = ConnectionFactory.createConnection(conf_h);
    } catch (IOException e) {
      e.printStackTrace();
    }
    Admin admin = con_h.getAdmin();
    HTableDescriptor tableDesc = new HTableDescriptor(tableName_chi);
    HColumnDescriptor colFamDesc = new HColumnDescriptor("count");
    colFamDesc.setMaxVersions(1);
    tableDesc.addFamily(colFamDesc);
    admin.createTable(tableDesc);

    /** counting and insert in chiTable */
    Scan scan = new Scan();
    scan.addColumn(Bytes.toBytes("products"), Bytes.toBytes("product_category_id"));
    scan.addColumn(Bytes.toBytes("orders"), Bytes.toBytes("order_date"));
    // Creates a new Job with no particular Cluster
    Job job =
        Job.getInstance(conf_h, "Count"); // Job.getInstance(Configuration conf, String JobName)
    job.setJarByClass(
        ChiSquaredTest2_abc.class); // Set the Jar by finding where a given class came from
    // initTableMapperJob(String table, Scan scan, Class<? extends TableMapper> mapper, Class<?>
    // outputKeyClass, Class<?> outputValueClass, org.apache.hadoop.mapreduce.Job job)
    TableMapReduceUtil.initTableMapperJob(
        "retail_order", scan, Map1.class, Text.class, IntWritable.class, job);
    // initTableReducerJob(String table, Class<? extends TableReducer> reducer,
    // org.apache.hadoop.mapreduce.Job job)
    TableMapReduceUtil.initTableReducerJob("chiTable", Reduce1.class, job);

    // boolean waitForCompletion(boolean verbose), verbose - print the progress to the user
    job.waitForCompletion(true); // Submit the job to the cluster and wait for it to finish

    /** extract value from chiTable */
    int totalY = 0;
    int totalN = 0;
    ArrayList<CellOfHTable> chiTable = new ArrayList<CellOfHTable>();
    Table table_h = con_h.getTable(tableName_chi);
    Scan s = new Scan();
    s.addFamily(Bytes.toBytes("count"));
    ResultScanner results = table_h.getScanner(s);
    for (Result r : results) {
      CellOfHTable c =
          new CellOfHTable(
              r.getRow(),
              r.getValue(Bytes.toBytes("count"), Bytes.toBytes("Y")) == null
                  ? Bytes.toBytes(0)
                  : r.getValue(Bytes.toBytes("count"), Bytes.toBytes("Y")),
              r.getValue(Bytes.toBytes("count"), Bytes.toBytes("N")) == null
                  ? Bytes.toBytes(0)
                  : r.getValue(
                      Bytes.toBytes("count"), Bytes.toBytes("N"))); // (id, count_Y, count_N)
      chiTable.add(c);
      totalY = totalY + c.countY;
      totalN = totalN + c.countN;
    }

    results.close();
    table_h.close();
    admin.disableTable(tableName_chi);
    admin.deleteTable(tableName_chi);

    double chisquare = 0.0;
    for (int i = 0; i < chiTable.size(); i++) {
      CellOfHTable c = chiTable.get(i);
      double expectY =
          (double) (c.countY + c.countN) * (double) totalY / (double) (totalY + totalN);
      chisquare =
          chisquare + (((double) c.countY - expectY) * ((double) c.countY - expectY) / expectY);
      double expectN =
          (double) (c.countY + c.countN) * (double) totalN / (double) (totalY + totalN);
      chisquare =
          chisquare + (((double) c.countN - expectN) * ((double) c.countN - expectN) / expectN);
    }

    System.out.println(chisquare);
    ChiSquareDist csd = new ChiSquareDist((chiTable.size() - 1));
    if (chisquare > csd.inverseF(1.0 - alpha)) {
      return true;
    }
    return false;
  }
  /**
   * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and
   * excluded from minor compaction. Without the fix of HBASE-6901, an
   * ArrayIndexOutOfBoundsException will be thrown.
   */
  @Ignore("Flakey: See HBASE-9051")
  @Test
  public void testExcludeAllFromMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);

    util.startMiniCluster();
    try (Connection conn = ConnectionFactory.createConnection();
        Admin admin = conn.getAdmin();
        Table table = util.createTable(TABLE_NAME, FAMILIES);
        RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
      final FileSystem fs = util.getDFSCluster().getFileSystem();
      assertEquals("Should start with empty table", 0, util.countRows(table));

      // deep inspection: get the StoreFile dir
      final Path storePath =
          new Path(
              FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
              new Path(
                  admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
                  Bytes.toString(FAMILIES[0])));
      assertEquals(0, fs.listStatus(storePath).length);

      // Generate two bulk load files
      conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);

      for (int i = 0; i < 2; i++) {
        Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
        runIncrementalPELoad(
            conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME), testDir);
        // Perform the actual load
        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
      }

      // Ensure data shows up
      int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));

      // should have a second StoreFile now
      assertEquals(2, fs.listStatus(storePath).length);

      // minor compactions shouldn't get rid of the file
      admin.compact(TABLE_NAME);
      try {
        quickPoll(
            new Callable<Boolean>() {
              @Override
              public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
              }
            },
            5000);
        throw new IOException("SF# = " + fs.listStatus(storePath).length);
      } catch (AssertionError ae) {
        // this is expected behavior
      }

      // a major compaction should work though
      admin.majorCompact(TABLE_NAME);
      quickPoll(
          new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

    } finally {
      util.shutdownMiniCluster();
    }
  }
 public RSGroupAdminClient(Connection conn) throws IOException {
   proxy =
       RSGroupAdminProtos.RSGroupAdminService.newBlockingStub(
           conn.getAdmin().coprocessorService());
 }
  public static void main(String[] args) throws IOException {
    Logger logger = new Logger(CheckConfig.class);
    GenericOptionsParser optionsParser =
        new GenericOptionsParser(HBaseConfiguration.create(), args);
    Configuration fullConfiguration = optionsParser.getConfiguration();

    BigtableOptions options;
    try {
      options = BigtableOptionsFactory.fromConfiguration(fullConfiguration);
    } catch (IOException | RuntimeException exc) {
      logger.warn("Encountered errors attempting to parse configuration.", exc);
      return;
    }

    System.out.println(String.format("User Agent: %s", options.getChannelOptions().getUserAgent()));
    System.out.println(String.format("Project ID: %s", options.getProjectId()));
    System.out.println(String.format("Cluster Name: %s", options.getCluster()));
    System.out.println(String.format("Zone: %s", options.getZone()));
    System.out.println(String.format("Cluster admin host: %s", options.getClusterAdminHost()));
    System.out.println(String.format("Table admin host: %s", options.getTableAdminHost()));
    System.out.println(String.format("Data host: %s", options.getDataHost()));

    Credentials credentials = options.getChannelOptions().getCredential();
    try {
      System.out.println("Attempting credential refresh...");
      credentials.refresh();
    } catch (IOException ioe) {
      logger.warn("Encountered errors attempting to refresh credentials.", ioe);
      return;
    }

    String configuredConnectionClass =
        fullConfiguration.get(HConnection.HBASE_CLIENT_CONNECTION_IMPL);

    boolean isCorrectClassSpecified = false;

    if (!Strings.isNullOrEmpty(configuredConnectionClass)) {
      try {
        Class<?> connectionClass = Class.forName(configuredConnectionClass);
        isCorrectClassSpecified =
            AbstractBigtableConnection.class.isAssignableFrom(connectionClass);
      } catch (Exception e) {
        // Ignore. Problems will be logged in the println below.
      }
    }
    // We can actually determine if this value is correct (disregarding custom subclasses).
    System.out.println(
        String.format(
            "HBase Connection Class = %s %s",
            configuredConnectionClass, isCorrectClassSpecified ? "(OK)" : "(Configuration error)"));

    System.out.println("Opening table admin connection...");
    try (Connection conn = ConnectionFactory.createConnection(fullConfiguration)) {
      try (Admin admin = conn.getAdmin()) {
        System.out.println(String.format("Tables in cluster %s:", options.getCluster()));
        TableName[] tableNames = admin.listTableNames();
        if (tableNames.length == 0) {
          System.out.println("No tables found.");
        } else {
          for (TableName table : tableNames) {
            System.out.println(table.getNameAsString());
          }
        }
      }
      System.out.println("Closing connection...");
    }
  }
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    conf1.setInt("hfile.format.version", 3);
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf1.setStrings(
        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
        TestCoprocessorForTagsAtSource.class.getName());

    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    replicationAdmin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");

    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.setInt("hfile.format.version", 3);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf2.setStrings(
        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
        TestCoprocessorForTagsAtSink.class.getName());

    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);

    replicationAdmin.addPeer("2", utility2.getClusterKey());

    LOG.info("Setup second Zk");
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);

    HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
    HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
    fam.setMaxVersions(3);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    try (Connection conn = ConnectionFactory.createConnection(conf1);
        Admin admin = conn.getAdmin()) {
      admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Connection conn = ConnectionFactory.createConnection(conf2);
        Admin admin = conn.getAdmin()) {
      admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable2 = utility2.getConnection().getTable(TABLE_NAME);
  }
Exemple #30
0
  public void printStats() throws IOException {
    Admin admin = connection.getAdmin();

    ClusterStatus status =
        admin.getClusterStatus(); // co ClusterStatusExample-1-GetStatus Get the cluster status.
    System.out.println("Cluster Status:\n--------------");
    System.out.println("HBase Version: " + status.getHBaseVersion());
    System.out.println("Version: " + status.getVersion());
    System.out.println("Cluster ID: " + status.getClusterId());
    System.out.println("Master: " + status.getMaster());
    System.out.println("No. Backup Masters: " + status.getBackupMastersSize());
    System.out.println("Backup Masters: " + status.getBackupMasters());
    System.out.println("No. Live Servers: " + status.getServersSize());
    System.out.println("Servers: " + status.getServers());
    System.out.println("No. Dead Servers: " + status.getDeadServers());
    System.out.println("Dead Servers: " + status.getDeadServerNames());
    System.out.println("No. Regions: " + status.getRegionsCount());
    System.out.println("Regions in Transition: " + status.getRegionsInTransition());
    System.out.println("No. Requests: " + status.getRequestsCount());
    System.out.println("Avg Load: " + status.getAverageLoad());
    System.out.println("Balancer On: " + status.getBalancerOn());
    System.out.println("Is Balancer On: " + status.isBalancerOn());
    System.out.println("Master Coprocessors: " + Arrays.asList(status.getMasterCoprocessors()));
    System.out.println("\nServer Info:\n--------------");
    for (ServerName server :
        status
            .getServers()) { // co ClusterStatusExample-2-ServerInfo Iterate over the included
                             // server instances.
      System.out.println("Hostname: " + server.getHostname());
      System.out.println("Host and Port: " + server.getHostAndPort());
      System.out.println("Server Name: " + server.getServerName());
      System.out.println("RPC Port: " + server.getPort());
      System.out.println("Start Code: " + server.getStartcode());
      ServerLoad load =
          status.getLoad(
              server); // co ClusterStatusExample-3-ServerLoad Retrieve the load details for the
                       // current server.
      System.out.println("\nServer Load:\n--------------");
      System.out.println("Info Port: " + load.getInfoServerPort());
      System.out.println("Load: " + load.getLoad());
      System.out.println("Max Heap (MB): " + load.getMaxHeapMB());
      System.out.println("Used Heap (MB): " + load.getUsedHeapMB());
      System.out.println("Memstore Size (MB): " + load.getMemstoreSizeInMB());
      System.out.println("No. Regions: " + load.getNumberOfRegions());
      System.out.println("No. Requests: " + load.getNumberOfRequests());
      System.out.println("Total No. Requests: " + load.getTotalNumberOfRequests());
      System.out.println("No. Requests per Sec: " + load.getRequestsPerSecond());
      System.out.println("No. Read Requests: " + load.getReadRequestsCount());
      System.out.println("No. Write Requests: " + load.getWriteRequestsCount());
      System.out.println("No. Stores: " + load.getStores());
      System.out.println("Store Size Uncompressed (MB): " + load.getStoreUncompressedSizeMB());
      System.out.println("No. Storefiles: " + load.getStorefiles());
      System.out.println("Storefile Size (MB): " + load.getStorefileSizeInMB());
      System.out.println("Storefile Index Size (MB): " + load.getStorefileIndexSizeInMB());
      System.out.println("Root Index Size: " + load.getRootIndexSizeKB());
      System.out.println("Total Bloom Size: " + load.getTotalStaticBloomSizeKB());
      System.out.println("Total Index Size: " + load.getTotalStaticIndexSizeKB());
      System.out.println("Current Compacted Cells: " + load.getCurrentCompactedKVs());
      System.out.println("Total Compacting Cells: " + load.getTotalCompactingKVs());
      System.out.println("Coprocessors1: " + Arrays.asList(load.getRegionServerCoprocessors()));
      System.out.println("Coprocessors2: " + Arrays.asList(load.getRsCoprocessors()));
      System.out.println("Replication Load Sink: " + load.getReplicationLoadSink());
      System.out.println("Replication Load Source: " + load.getReplicationLoadSourceList());
      System.out.println("\nRegion Load:\n--------------");
      for (Map.Entry<byte[], RegionLoad>
          entry : // co ClusterStatusExample-4-Regions Iterate over the region details of the
                  // current server.
          load.getRegionsLoad().entrySet()) {
        System.out.println("Region: " + Bytes.toStringBinary(entry.getKey()));
        RegionLoad regionLoad =
            entry
                .getValue(); // co ClusterStatusExample-5-RegionLoad Get the load details for the
                             // current region.
        System.out.println("Name: " + Bytes.toStringBinary(regionLoad.getName()));
        System.out.println("Name (as String): " + regionLoad.getNameAsString());
        System.out.println("No. Requests: " + regionLoad.getRequestsCount());
        System.out.println("No. Read Requests: " + regionLoad.getReadRequestsCount());
        System.out.println("No. Write Requests: " + regionLoad.getWriteRequestsCount());
        System.out.println("No. Stores: " + regionLoad.getStores());
        System.out.println("No. Storefiles: " + regionLoad.getStorefiles());
        System.out.println("Data Locality: " + regionLoad.getDataLocality());
        System.out.println("Storefile Size (MB): " + regionLoad.getStorefileSizeMB());
        System.out.println("Storefile Index Size (MB): " + regionLoad.getStorefileIndexSizeMB());
        System.out.println("Memstore Size (MB): " + regionLoad.getMemStoreSizeMB());
        System.out.println("Root Index Size: " + regionLoad.getRootIndexSizeKB());
        System.out.println("Total Bloom Size: " + regionLoad.getTotalStaticBloomSizeKB());
        System.out.println("Total Index Size: " + regionLoad.getTotalStaticIndexSizeKB());
        System.out.println("Current Compacted Cells: " + regionLoad.getCurrentCompactedKVs());
        System.out.println("Total Compacting Cells: " + regionLoad.getTotalCompactingKVs());
        System.out.println();
      }
    }
  }