Beispiel #1
0
 private void endMetrics(String method) {
   Metrics metrics = MetricsFactory.getInstance();
   try {
     if (metrics != null) {
       metrics.endStoredScope(method);
     }
   } catch (IOException e) {
     LOG.warn("Error recording metrics", e);
   }
 }
  @BeforeClass
  public static void before() throws Exception {
    int port = MetaStoreUtils.findFreePort();

    hiveConf = new HiveConf(TestMetaStoreMetrics.class);
    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_METRICS, true);
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);

    MetricsFactory.close();
    MetricsFactory.init(hiveConf);
    metrics = (CodahaleMetrics) MetricsFactory.getInstance();

    // Increments one HMS connection
    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);

    // Increments one HMS connection (Hive.get())
    SessionState.start(new CliSessionState(hiveConf));
    driver = new Driver(hiveConf);
  }
Beispiel #3
0
 public void closeOperation(OperationHandle opHandle) throws HiveSQLException {
   Operation operation = removeOperation(opHandle);
   if (operation == null) {
     throw new HiveSQLException("Operation does not exist!");
   }
   Metrics metrics = MetricsFactory.getInstance();
   if (metrics != null) {
     try {
       metrics.decrementCounter(MetricsConstant.OPEN_OPERATIONS);
     } catch (Exception e) {
       LOG.warn("Error Reporting close operation to Metrics system", e);
     }
   }
   operation.close();
 }
  @Test
  public void testMetaDataCounts() throws Exception {
    // 1 databases created
    driver.run("create database testdb1");

    // 4 tables
    driver.run("create table testtbl1 (key string)");
    driver.run("create table testtblpart (key string) partitioned by (partkey string)");
    driver.run("use testdb1");
    driver.run("create table testtbl2 (key string)");
    driver.run("create table testtblpart2 (key string) partitioned by (partkey string)");

    // 6 partitions
    driver.run("alter table default.testtblpart add partition (partkey='a')");
    driver.run("alter table default.testtblpart add partition (partkey='b')");
    driver.run("alter table default.testtblpart add partition (partkey='c')");
    driver.run("alter table testdb1.testtblpart2 add partition (partkey='a')");
    driver.run("alter table testdb1.testtblpart2 add partition (partkey='b')");
    driver.run("alter table testdb1.testtblpart2 add partition (partkey='c')");

    // create and drop some additional metadata, to test drop counts.
    driver.run("create database tempdb");
    driver.run("use tempdb");

    driver.run("create table delete_by_table (key string) partitioned by (partkey string)");
    driver.run("alter table delete_by_table add partition (partkey='temp')");
    driver.run("drop table delete_by_table");

    driver.run("create table delete_by_part (key string) partitioned by (partkey string)");
    driver.run("alter table delete_by_part add partition (partkey='temp')");
    driver.run("alter table delete_by_part drop partition (partkey='temp')");

    driver.run("create table delete_by_db (key string) partitioned by (partkey string)");
    driver.run("alter table delete_by_db add partition (partkey='temp')");
    driver.run("use default");
    driver.run("drop database tempdb cascade");

    // give timer thread a chance to print the metrics
    CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
    String json = metrics.dumpJson();
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_DATABASES, 2);
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_TABLES, 7);
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_PARTITIONS, 9);

    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.COUNTER, MetricsConstant.DELETE_TOTAL_DATABASES, 1);
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.COUNTER, MetricsConstant.DELETE_TOTAL_TABLES, 3);
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.COUNTER, MetricsConstant.DELETE_TOTAL_PARTITIONS, 3);

    // to test initial metadata count metrics.
    hiveConf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, ObjectStore.class.getName());
    HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("test", hiveConf, false);
    baseHandler.init();
    baseHandler.updateMetrics();

    // 1 new db + default
    json = metrics.dumpJson();
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.GAUGE, MetricsConstant.INIT_TOTAL_DATABASES, 2);
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.GAUGE, MetricsConstant.INIT_TOTAL_TABLES, 4);
    MetricsTestUtils.verifyMetricsJson(
        json, MetricsTestUtils.GAUGE, MetricsConstant.INIT_TOTAL_PARTITIONS, 6);
  }