public void createTable(String tableName, List<String> ColumnFamilies) {
    HBaseAdmin admin = null;
    try {
      admin = new HBaseAdmin(conf);
      HTableDescriptor tableDescriptor = new HTableDescriptor(Bytes.toBytes(tableName));

      for (String columnFamily : ColumnFamilies) {
        HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily);
        tableDescriptor.addFamily(columnDescriptor);
      }
      admin.createTable(tableDescriptor);
      admin.close();
    } catch (TableExistsException e) {
      System.out.println("Table already exist:" + tableName);
      try {
        admin.close();
      } catch (IOException e1) {
        System.out.println("Error occurred while cloing the HBaseAdmin conneciton:" + e1);
      }
    } catch (MasterNotRunningException e) {
      throw new RuntimeException("HBase master not running, table creation failed.");
    } catch (ZooKeeperConnectionException e) {
      throw new RuntimeException("Zookeeper not running, table creation failed.");
    } catch (IOException e) {
      throw new RuntimeException("IO error, table creation failed.");
    }
  }
예제 #2
0
  /**
   * Creates the tables used by the graph store.
   *
   * @param config Hadoop configuration
   * @param vertexDataHandler vertex storage handler
   * @param edgeDataHandler edge storage handler
   * @param graphDataHandler graph storage handler
   * @param vertexDataTableName vertex data table name
   * @param edgeTableName edge data table name
   * @param graphDataTableName graph data table name
   * @throws IOException
   */
  private static void createTablesIfNotExists(
      final Configuration config,
      final VertexDataHandler vertexDataHandler,
      final EdgeDataHandler edgeDataHandler,
      final GraphDataHandler graphDataHandler,
      final String vertexDataTableName,
      final String edgeTableName,
      final String graphDataTableName)
      throws IOException {
    HTableDescriptor vertexDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(vertexDataTableName));
    HTableDescriptor edgeDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(edgeTableName));
    HTableDescriptor graphDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(graphDataTableName));

    HBaseAdmin admin = new HBaseAdmin(config);

    if (!admin.tableExists(vertexDataTableDescriptor.getName())) {
      vertexDataHandler.createTable(admin, vertexDataTableDescriptor);
    }
    if (!admin.tableExists(edgeDataTableDescriptor.getName())) {
      edgeDataHandler.createTable(admin, edgeDataTableDescriptor);
    }
    if (!admin.tableExists(graphDataTableDescriptor.getName())) {
      graphDataHandler.createTable(admin, graphDataTableDescriptor);
    }

    admin.close();
  }
예제 #3
0
 /**
  * 查询所有表
  *
  * @return 所以表 / null
  * @throws Exception
  */
 public static List<HTableDescriptor> queryALLTable() throws Exception {
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     if (admin != null) {
       HTableDescriptor[] listTables = admin.listTables();
       if (null != listTables && listTables.length > 0) {
         return Arrays.asList(listTables);
       }
     }
   } catch (Exception e) {
     e.printStackTrace();
   } finally {
     try {
       if (null != admin) {
         admin.close();
       }
     } catch (IOException e) {
       logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
     }
     try {
       if (null != conn) {
         conn.close();
       }
     } catch (Exception e) {
       logger.error("Connection close exception, errMsg:{}", e.getMessage());
     }
   }
   return null;
 }
예제 #4
0
 /**
  * 删除指定表名
  *
  * @param rowKey
  */
 public void deleteTable(byte[] rowKey) {
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     // 在删除一张表前,要使其失效
     admin.disableTable(rowKey);
     admin.deleteTable(rowKey);
     admin.enableTable(rowKey);
   } catch (Exception e) {
     logger.error("HBaseAdmin deleteTable exception, errMsg:{}", e.getMessage());
   } finally {
     try {
       if (null != admin) {
         admin.close();
       }
     } catch (IOException e) {
       logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
     }
     try {
       if (null != conn) {
         conn.close();
       }
     } catch (Exception e) {
       logger.error("Connection close exception, errMsg:{}", e.getMessage());
     }
   }
 }
예제 #5
0
 /**
  * 删除指定名称的列簇
  *
  * @param tableName 表名
  * @param columnFamilyName 列族
  */
 public static void deleteFamily(byte[] tableName, String columnFamilyName) {
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     admin.deleteColumn(tableName, columnFamilyName);
   } catch (Exception e) {
     logger.error("HBaseAdmin deleteColumn exception, errMsg:{}", e.getMessage());
   } finally {
     try {
       if (null != admin) {
         admin.close();
       }
     } catch (IOException e) {
       logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
     }
     try {
       if (null != conn) {
         conn.close();
       }
     } catch (Exception e) {
       logger.error("Connection close exception, errMsg:{}", e.getMessage());
     }
   }
 }
예제 #6
0
  /**
   * Deletes the tables given tables.
   *
   * @param config cluster configuration
   * @param vertexDataTableName vertex data table name
   * @param edgeDataTableName edge data table name
   * @param graphDataTableName graph data table name
   * @throws IOException
   */
  private static void deleteTablesIfExists(
      final Configuration config,
      final String vertexDataTableName,
      final String edgeDataTableName,
      final String graphDataTableName)
      throws IOException {
    HTableDescriptor vertexDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(vertexDataTableName));
    HTableDescriptor edgeDataTableDescriptor =
        new HTableDescriptor(TableName.valueOf(edgeDataTableName));
    HTableDescriptor graphsTableDescriptor =
        new HTableDescriptor(TableName.valueOf(graphDataTableName));

    HBaseAdmin admin = new HBaseAdmin(config);

    if (admin.tableExists(vertexDataTableDescriptor.getName())) {
      deleteTable(admin, vertexDataTableDescriptor);
    }
    if (admin.tableExists(edgeDataTableDescriptor.getName())) {
      deleteTable(admin, edgeDataTableDescriptor);
    }
    if (admin.tableExists(graphsTableDescriptor.getName())) {
      deleteTable(admin, graphsTableDescriptor);
    }

    admin.close();
  }
예제 #7
0
  public static void main(String[] args)
      throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
    Configuration conf = HBaseConfiguration.create();
    conf.set(
        "hbase.zookeeper.quorum", "192.168.10.163:2181,192.168.10.164:2181,192.168.10.165:2181");

    // admin用户
    HBaseAdmin admin = new HBaseAdmin(conf);
    // table
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("people"));
    // info列簇
    HColumnDescriptor hcd_info = new HColumnDescriptor("info");
    hcd_info.setMaxVersions(3);
    // data列簇
    HColumnDescriptor hcd_data = new HColumnDescriptor("data");

    // 将列簇添加到htable中
    htd.addFamily(hcd_info);
    htd.addFamily(hcd_data);

    // 创建表
    admin.createTable(htd);

    // 关闭连接
    admin.close();
  }
예제 #8
0
 @BeforeClass
 public static void doBeforeTestSetup() throws Exception {
   HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
   try {
     try {
       admin.disableTable(HBASE_DYNAMIC_COLUMNS_BYTES);
       admin.deleteTable(HBASE_DYNAMIC_COLUMNS_BYTES);
     } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
     }
     ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
     initTableValues();
   } finally {
     admin.close();
   }
 }
예제 #9
0
  public static void createTableInner(String tableName, int splitSize, String splitPrefix)
      throws Exception {
    Configuration conf = HBaseConfiguration.create();
    HBaseAdmin hAdmin = new HBaseAdmin(conf);

    HColumnDescriptor columnDescriptor = new HColumnDescriptor(Bytes.toBytes("f"));
    columnDescriptor.setCompressionType(Algorithm.SNAPPY);
    columnDescriptor.setMaxVersions(3);

    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(tableName));
    descriptor.addFamily(columnDescriptor);
    hAdmin.createTable(descriptor, Util.genSplitKeysAlphaDig(splitPrefix, splitSize));

    hAdmin.close();
  }
예제 #10
0
파일: HBase.java 프로젝트: baeeq/heritrix3
  @Override
  public synchronized void stop() {
    isRunning = false;
    if (admin != null) {
      try {
        admin.close();
      } catch (IOException e) {
        logger.warning("problem closing HBaseAdmin " + admin + " - " + e);
      }

      admin = null;
    }
    if (conf != null) {
      HConnectionManager.deleteConnection(conf, true);
    }
  }
예제 #11
0
  // 创建一张表,通过HBaseAdmin HTableDescriptor来创建
  public static void creat(String tablename, String columnFamily) throws Exception {

    HBaseAdmin admin = new HBaseAdmin(configuration);
    if (admin.tableExists(tablename)) {
      try {
        admin.disableTable(tablename);
        admin.deleteTable(tablename);
      } catch (Exception ex) {
        ex.printStackTrace();
      }
    }
    HTableDescriptor tableDesc = new HTableDescriptor(tablename.valueOf(tablename));
    tableDesc.addFamily(new HColumnDescriptor(columnFamily));
    admin.createTable(tableDesc);
    admin.close();
    System.out.println("create table success!");
  }
예제 #12
0
파일: CreateTable.java 프로젝트: huayl/omid
  public static void createTable(Configuration hbaseConf, String tableName) throws IOException {
    HBaseAdmin admin = new HBaseAdmin(hbaseConf);

    if (!admin.tableExists(tableName)) {
      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
      HColumnDescriptor datafam = new HColumnDescriptor(HBaseTimestampStorage.TSO_FAMILY);
      datafam.setMaxVersions(3);
      desc.addFamily(datafam);
      admin.createTable(desc);
    }

    if (admin.isTableDisabled(tableName)) {
      admin.enableTable(tableName);
    }
    admin.close();
    LOG.info("Table {} created successfully", tableName);
  }
  private synchronized void initialiseTable() {
    if (this.table == null) {
      try {
        HBaseStoreManager hbaseMgr = (HBaseStoreManager) storeMgr;
        Configuration config = hbaseMgr.getHbaseConfig();
        HBaseAdmin admin = new HBaseAdmin(config);
        try {
          if (!admin.tableExists(this.tableName)) {
            if (!storeMgr.getSchemaHandler().isAutoCreateTables()) {
              throw new NucleusUserException(Localiser.msg("040011", tableName));
            }

            NucleusLogger.VALUEGENERATION.debug(
                "IncrementGenerator: Creating Table '" + this.tableName + "'");
            HTableDescriptor ht = new HTableDescriptor(this.tableName);
            HColumnDescriptor hcd = new HColumnDescriptor(INCREMENT_COL_NAME);
            hcd.setCompressionType(Algorithm.NONE);
            hcd.setMaxVersions(1);
            ht.addFamily(hcd);
            admin.createTable(ht);
          }
        } finally {
          admin.close();
        }

        this.table = new HTable(config, this.tableName);
        if (!this.table.exists(new Get(Bytes.toBytes(key)))) {
          long initialValue = 0;
          if (properties.containsKey("key-initial-value")) {
            initialValue = Long.valueOf(properties.getProperty("key-initial-value")) - 1;
          }
          this.table.put(
              new Put(Bytes.toBytes(key))
                  .add(
                      Bytes.toBytes(INCREMENT_COL_NAME),
                      Bytes.toBytes(INCREMENT_COL_NAME),
                      Bytes.toBytes(initialValue)));
        }
      } catch (IOException ex) {
        NucleusLogger.VALUEGENERATION.fatal("Error instantiating IncrementGenerator", ex);
      }
    }
  }
 @BeforeClass
 public static void doBeforeTestSetup() throws Exception {
   HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
   try {
     try {
       admin.disableTable(HBASE_NATIVE_BYTES);
       admin.deleteTable(HBASE_NATIVE_BYTES);
     } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
     }
     HTableDescriptor descriptor = new HTableDescriptor(HBASE_NATIVE_BYTES);
     HColumnDescriptor columnDescriptor = new HColumnDescriptor(FAMILY_NAME);
     columnDescriptor.setKeepDeletedCells(true);
     descriptor.addFamily(columnDescriptor);
     admin.createTable(descriptor, SPLITS);
     initTableValues();
   } finally {
     admin.close();
   }
 }
예제 #15
0
 /**
  * 创建一张表
  *
  * @param table
  * @throws ParamIsNullException 参数为空
  * @throws TableExistsException 表已存在
  */
 public static void createTable(HTableDescriptor table)
     throws ParamIsNullException, TableExistsException {
   if (null == table) {
     throw new ParamIsNullException("参数不能为空");
   }
   logger.info("create table begin... , table:{}", table.toString());
   Connection conn = null;
   HBaseAdmin admin = null;
   String tableName = table.getNameAsString();
   try {
     logger.info("获取connection");
     conn = ConnectionFactory.createConnection(conf);
     logger.info("获取admin");
     admin = (HBaseAdmin) conn.getAdmin();
     /** 表已存在 */
     if (admin.tableExists(Bytes.toBytes(tableName))) {
       throw new TableExistsException(tableName);
     }
     logger.info("create...");
     admin.createTable(table);
     logger.info("table create success, tableName:{}", tableName);
   } catch (IOException e) {
     logger.error("table create fail, tableName:{}, errMsg:{}", tableName, e);
   } finally {
     if (null != admin) {
       try {
         admin.close();
       } catch (IOException e) {
         logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
       }
     }
     if (null != conn) {
       try {
         conn.close();
       } catch (IOException e) {
         logger.error("Connection close exception, errMsg:{}", e.getMessage());
       }
     }
   }
 }
예제 #16
0
 private HTableInterface getTrxMetaTable() throws IOException {
   HTableInterface meta = metaTable.get();
   if (meta != null) {
     return meta;
   }
   synchronized (metaTable) {
     meta = metaTable.get();
     if (meta != null) {
       return meta;
     }
     HBaseAdmin admin = new HBaseAdmin(conf);
     if (!admin.tableExists(DominoConst.TRANSACTION_META)) {
       while (true) {
         try {
           admin.createTable(DominoConst.TRANSACTION_META_DESCRIPTOR);
         } catch (PleaseHoldException phe) {
           LOG.info("Failed to create transaction meta table: Got a PleaseHoldException.");
           try {
             Thread.sleep(200);
           } catch (InterruptedException ie) {
             break;
           }
           continue;
         } catch (IOException e) {
           LOG.warn("Failed to create transaction meta table. ", e);
         }
         break;
       }
     }
     admin.close();
     try {
       meta = env.getTable(DominoConst.TRANSACTION_META.getBytes(DominoConst.META_CHARSET));
       metaTable.set(meta);
     } catch (IOException e) {
       LOG.error("Failed to open transaction meta table: {}.", e.toString());
       throw e;
     }
   }
   return meta;
 }
예제 #17
0
  @Test
  public void testPreSplit() throws Exception {
    byte[][] splits = new byte[][] {Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")};
    DatasetProperties props =
        DatasetProperties.builder().add("hbase.splits", new Gson().toJson(splits)).build();
    String presplittedTable = "presplitted";
    getTableAdmin(CONTEXT1, presplittedTable, props).create();

    HBaseAdmin hBaseAdmin = testHBase.getHBaseAdmin();
    try {
      List<HRegionInfo> regions =
          hBaseTableUtil.getTableRegions(
              hBaseAdmin, TableId.from(NAMESPACE1.getId(), presplittedTable));
      // note: first region starts at very first row key, so we have one extra to the splits count
      Assert.assertEquals(4, regions.size());
      Assert.assertArrayEquals(Bytes.toBytes("a"), regions.get(1).getStartKey());
      Assert.assertArrayEquals(Bytes.toBytes("b"), regions.get(2).getStartKey());
      Assert.assertArrayEquals(Bytes.toBytes("c"), regions.get(3).getStartKey());
    } finally {
      hBaseAdmin.close();
    }
  }
예제 #18
0
 /**
  * 添加一个列族
  *
  * @param tableName
  * @param family 列族
  * @throws ParamIsNullException 参数为空
  * @throws TableNotFoundException 表不存在/获取表连接失败
  */
 public static void addColumnFamily(String tableName, HColumnDescriptor family)
     throws ParamIsNullException, TableNotFoundException {
   if (null == tableName) {
     throw new ParamIsNullException("tableName不能为空");
   }
   if (null == family) {
     throw new ParamIsNullException("HColumnDescriptor不能为空");
   }
   Connection conn = null;
   HBaseAdmin admin = null;
   try {
     conn = ConnectionFactory.createConnection(conf);
     admin = (HBaseAdmin) conn.getAdmin();
     /** 表不存在 */
     if (!admin.tableExists(Bytes.toBytes(tableName))) {
       throw new TableNotFoundException(tableName);
     }
     HTableDescriptor table = admin.getTableDescriptor(Bytes.toBytes(tableName));
     table.addFamily(family);
   } catch (IOException e) {
     logger.error("获取Hbase连接发生异常, errMsg:{}", e.getMessage());
   } finally {
     if (null != admin) {
       try {
         admin.close();
       } catch (IOException e) {
         logger.error("HBaseAdmin close exception, errMsg:{}", e.getMessage());
       }
     }
     if (null != conn) {
       try {
         conn.close();
       } catch (IOException e) {
         logger.error("Connection close exception, errMsg:{}", e.getMessage());
       }
     }
   }
 }
예제 #19
0
  private void doIncrementalLoadTest(boolean shouldChangeRegions) throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    byte[][] splitKeys = generateRandomSplitKeys(4);
    HBaseAdmin admin = null;
    try {
      util.startMiniCluster();
      Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
      admin = util.getHBaseAdmin();
      HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = -1;
      try (RegionLocator r = table.getRegionLocator()) {
        numRegions = r.getStartKeys().length;
      }
      assertEquals("Should make 5 regions", numRegions, 5);

      // Generate the bulk load files
      util.startMiniMapReduceCluster();
      runIncrementalPELoad(conf, table, testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);
        while (table.getRegionLocations().size() != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      if (admin != null) admin.close();
      util.shutdownMiniMapReduceCluster();
      util.shutdownMiniCluster();
    }
  }
예제 #20
0
  @Ignore
  @Test
  public void testNonTxToTxTableFailure() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    // Put table in SYSTEM schema to prevent attempts to update the cache after we disable
    // SYSTEM.CATALOG
    conn.createStatement()
        .execute("CREATE TABLE SYSTEM.NON_TX_TABLE(k INTEGER PRIMARY KEY, v VARCHAR)");
    conn.createStatement().execute("UPSERT INTO SYSTEM.NON_TX_TABLE VALUES (1)");
    conn.commit();
    // Reset empty column value to an empty value like it is pre-transactions
    HTableInterface htable =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTable(Bytes.toBytes("SYSTEM.NON_TX_TABLE"));
    Put put = new Put(PInteger.INSTANCE.toBytes(1));
    put.add(
        QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
        QueryConstants.EMPTY_COLUMN_BYTES,
        ByteUtil.EMPTY_BYTE_ARRAY);
    htable.put(put);

    HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
    try {
      // This will succeed initially in updating the HBase metadata, but then will fail when
      // the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
      // the coprocessors back to the non transactional ones.
      conn.createStatement().execute("ALTER TABLE SYSTEM.NON_TX_TABLE SET TRANSACTIONAL=true");
      fail();
    } catch (SQLException e) {
      assertTrue(
          e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
    } finally {
      admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
      admin.close();
    }

    ResultSet rs =
        conn.createStatement().executeQuery("SELECT k FROM SYSTEM.NON_TX_TABLE WHERE v IS NULL");
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertFalse(rs.next());

    htable =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTable(Bytes.toBytes("SYSTEM.NON_TX_TABLE"));
    assertFalse(
        htable
            .getTableDescriptor()
            .getCoprocessors()
            .contains(TransactionProcessor.class.getName()));
    assertEquals(
        1,
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("SYSTEM.NON_TX_TABLE"))
            .getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)
            .getMaxVersions());
  }
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.startMiniCluster(3);
    REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
    context =
        JAXBContext.newInstance(
            CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class);
    marshaller = context.createMarshaller();
    unmarshaller = context.createUnmarshaller();
    client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    if (!admin.tableExists(TABLE)) {
      HTableDescriptor htd = new HTableDescriptor(TABLE);
      htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
      htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
      admin.createTable(htd);
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
      // Insert first half
      for (byte[] ROW : ROWS_ONE) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_ONE) {
          p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
        }
        table.put(p);
      }
      for (byte[] ROW : ROWS_TWO) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_TWO) {
          p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
        }
        table.put(p);
      }

      // Insert second half (reverse families)
      for (byte[] ROW : ROWS_ONE) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_ONE) {
          p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
        }
        table.put(p);
      }
      for (byte[] ROW : ROWS_TWO) {
        Put p = new Put(ROW);
        p.setWriteToWAL(false);
        for (byte[] QUALIFIER : QUALIFIERS_TWO) {
          p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
        }
        table.put(p);
      }

      // Delete the second qualifier from all rows and families
      for (byte[] ROW : ROWS_ONE) {
        Delete d = new Delete(ROW);
        d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
        d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
        table.delete(d);
      }
      for (byte[] ROW : ROWS_TWO) {
        Delete d = new Delete(ROW);
        d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
        d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
        table.delete(d);
      }
      colsPerRow -= 2;

      // Delete the second rows from both groups, one column at a time
      for (byte[] QUALIFIER : QUALIFIERS_ONE) {
        Delete d = new Delete(ROWS_ONE[1]);
        d.deleteColumns(FAMILIES[0], QUALIFIER);
        d.deleteColumns(FAMILIES[1], QUALIFIER);
        table.delete(d);
      }
      for (byte[] QUALIFIER : QUALIFIERS_TWO) {
        Delete d = new Delete(ROWS_TWO[1]);
        d.deleteColumns(FAMILIES[0], QUALIFIER);
        d.deleteColumns(FAMILIES[1], QUALIFIER);
        table.delete(d);
      }
      numRows -= 2;
      table.close();
    }
    admin.close();
  }
예제 #22
0
  @Test
  public void testEnableIncrements() throws Exception {
    // setup a table with increments disabled and with it enabled
    String disableTableName = "incr-disable";
    String enabledTableName = "incr-enable";
    TableId disabledTableId = TableId.from(NAMESPACE1.getId(), disableTableName);
    TableId enabledTableId = TableId.from(NAMESPACE1.getId(), enabledTableName);
    HBaseTableAdmin disabledAdmin =
        getTableAdmin(CONTEXT1, disableTableName, DatasetProperties.EMPTY);
    disabledAdmin.create();
    HBaseAdmin admin = testHBase.getHBaseAdmin();

    DatasetProperties props =
        DatasetProperties.builder().add(Table.PROPERTY_READLESS_INCREMENT, "true").build();
    HBaseTableAdmin enabledAdmin = getTableAdmin(CONTEXT1, enabledTableName, props);
    enabledAdmin.create();

    try {

      try {
        HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(admin, disabledTableId);
        List<String> cps = htd.getCoprocessors();
        assertFalse(cps.contains(IncrementHandler.class.getName()));

        htd = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId);
        cps = htd.getCoprocessors();
        assertTrue(cps.contains(IncrementHandler.class.getName()));
      } finally {
        admin.close();
      }

      BufferingTable table = getTable(CONTEXT1, enabledTableName, ConflictDetection.COLUMN);
      byte[] row = Bytes.toBytes("row1");
      byte[] col = Bytes.toBytes("col1");
      DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
      Transaction tx = txSystemClient.startShort();
      table.startTx(tx);
      table.increment(row, col, 10);
      table.commitTx();
      // verify that value was written as a delta value
      final byte[] expectedValue =
          Bytes.add(IncrementHandlerState.DELTA_MAGIC_PREFIX, Bytes.toBytes(10L));
      final AtomicBoolean foundValue = new AtomicBoolean();
      byte[] enabledTableNameBytes =
          hBaseTableUtil.getHTableDescriptor(admin, enabledTableId).getName();
      testHBase.forEachRegion(
          enabledTableNameBytes,
          new Function<HRegion, Object>() {
            @Nullable
            @Override
            public Object apply(@Nullable HRegion hRegion) {
              Scan scan = new Scan();
              try {
                RegionScanner scanner = hRegion.getScanner(scan);
                List<Cell> results = Lists.newArrayList();
                boolean hasMore;
                do {
                  hasMore = scanner.next(results);
                  for (Cell cell : results) {
                    if (CellUtil.matchingValue(cell, expectedValue)) {
                      foundValue.set(true);
                    }
                  }
                } while (hasMore);
              } catch (IOException ioe) {
                fail("IOException scanning region: " + ioe.getMessage());
              }
              return null;
            }
          });
      assertTrue(
          "Should have seen the expected encoded delta value in the "
              + enabledTableName
              + " table region",
          foundValue.get());
    } finally {
      disabledAdmin.drop();
      enabledAdmin.drop();
    }
  }
 @AfterClass
 public static void shutdownAfterClass() throws Exception {
   testUtil.shutdownMiniCluster();
   hBaseAdmin.close();
 }