public boolean hasRowKey(String tableName, byte[] rowKey) throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   boolean flag = htable.exists(new Get(rowKey));
   htable.close();
   log.info(tableName + " contained key " + Bytes.toString(rowKey) + " flag is -[" + flag + "");
   return flag;
 }
  public static void deleteTest(String tableStr) {
    try {
      Configuration conf = HBaseConfiguration.create();
      byte[] tableName = Bytes.toBytes(tableStr);

      HConnection hConnection = HConnectionManager.createConnection(conf);
      HTableInterface table = hConnection.getTable(tableName);

      byte[] startRow = Bytes.toBytes("rowKey_1");
      byte[] stopRow = Bytes.toBytes("rowKey_3");
      byte[] family = f0;

      Scan scan = new Scan();
      scan.addFamily(family);
      scan.setMaxVersions(1);

      //            scan.setStartRow(startRow);
      //            scan.setStopRow(stopRow);

      ResultScanner scanner = table.getScanner(scan);
      Result result = scanner.next();
      List<Delete> delete = new ArrayList<Delete>();
      while (result != null) {
        Delete del = new Delete(result.getRow());
        delete.add(del);
        result = scanner.next();
      }
      table.delete(delete);
      System.out.println("delete done");
      table.close(); // very important
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
  public static void writeTest(String tableStr) {
    try {
      Configuration conf = HBaseConfiguration.create();
      byte[] tableName = Bytes.toBytes(tableStr);
      HConnection hConnection = HConnectionManager.createConnection(conf);
      HTableInterface table = hConnection.getTable(tableName);
      byte[] family = f0;

      List<Put> puts = new ArrayList<Put>();
      for (int k = 0; k < 10; k++) // 写10行数据
      {
        byte[] rowkey = Bytes.toBytes("rowKey_" + k);
        Put p = new Put(rowkey);

        byte[] value_id = Bytes.toBytes("123456");
        byte[] value_user = Bytes.toBytes("mengqinghao" + k);

        p.add(family, qualifier_id, value_id);
        p.add(family, qualifier_user, value_user);

        puts.add(p);
      }
      table.put(puts);
      System.out.println("Puts done: " + puts.size());

      table.close(); // very important
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
  public static void readTest(String tableStr, String row) {
    try {
      Configuration conf = HBaseConfiguration.create();
      byte[] tableName = Bytes.toBytes(tableStr);
      HConnection hConnection = HConnectionManager.createConnection(conf);
      HTableInterface table = hConnection.getTable(tableName);

      byte[] rowkey = Bytes.toBytes(row);
      Get get = new Get(rowkey);
      get.addFamily(f0);

      Result result = table.get(get);
      NavigableMap<byte[], byte[]> m = result.getFamilyMap(f0);

      if (m == null || m.isEmpty()) {
        System.err.println("Empty." + m);
        return;
      }

      for (Map.Entry<byte[], byte[]> entry : m.entrySet()) {
        String qualifier = Bytes.toString(entry.getKey());
        String value = Bytes.toString(entry.getValue());
        System.out.println(qualifier + ":" + value);
      }
      table.close(); // very important
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
Example #5
0
 @Override
 protected void cleanup(Context context) throws IOException, InterruptedException {
   super.cleanup(context);
   table.flushCommits();
   table.close();
   connection.close();
 }
Example #6
0
  private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
    HTableInterface hTable =
        services.getTable(
            SchemaUtil.getTableNameAsBytes(
                HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME, HBASE_DYNAMIC_COLUMNS));
    try {
      // Insert rows using standard HBase mechanism with standard HBase "types"
      List<Row> mutations = new ArrayList<Row>();
      byte[] dv = Bytes.toBytes("DV");
      byte[] first = Bytes.toBytes("F");
      byte[] f1v1 = Bytes.toBytes("F1V1");
      byte[] f1v2 = Bytes.toBytes("F1V2");
      byte[] f2v1 = Bytes.toBytes("F2V1");
      byte[] f2v2 = Bytes.toBytes("F2V2");
      byte[] key = Bytes.toBytes("entry1");

      Put put = new Put(key);
      put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
      put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
      put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1"));
      put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2"));
      put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1"));
      put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2"));
      mutations.add(put);

      hTable.batch(mutations);

    } finally {
      hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
  }
 // @Override
 public void cleanup() {
   try {
     eventsTable.close();
     tableFactory.cleanup();
   } catch (Exception e) {
     LOG.error("Error closing connections", e);
   }
 }
 public <V> void addObject(String tableName, V v, HbaseMapper<V> mapper) throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   Put put = mapper.mapPut(v);
   log.info("put is -[" + put + "]");
   htable.put(put);
   log.info("put one object to " + tableName + ", put info -[" + put + "]");
   htable.close();
 }
Example #9
0
 @Override
 public void stop(CoprocessorEnvironment env) throws IOException {
   HTableInterface meta = metaTable.get();
   if (meta != null) meta.close();
   // this.env = null;
   // this.conf = null;
   this.region = null;
 }
 public <V> V execute(String tableName, HbaseCallBack<V> callback) throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   log.info("execute call back tableName is -[" + tableName + "]");
   V v = callback.doInTable(htable);
   htable.close();
   log.info("execute callback from " + tableName + ",result is " + v);
   return v;
 }
 public <V> List<V> getObjects(String tableName, V object, HbaseMapper<V> mapper)
     throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   Scan scan = mapper.mapScan(object);
   log.info("scan is -[" + scan + "]");
   ResultScanner resultScanner = htable.getScanner(scan);
   List<V> result = trasfer(resultScanner, mapper);
   htable.close();
   log.info("get " + result.size() + " objects from " + tableName + "");
   return result;
 }
 public <V> void addObjects(String tableName, List<V> objects, HbaseMapper<V> mapper)
     throws Exception {
   if (CollectionUtils.isEmpty(objects)) {
     return;
   }
   HTableInterface htable = dataSource.getConnection(tableName);
   List<Put> puts = new ArrayList<Put>();
   for (V v : objects) {
     puts.add(mapper.mapPut(v));
   }
   htable.put(puts);
   log.info("put " + puts.size() + " objects to " + tableName);
   htable.close();
 }
 public <V> void deleteObjects(String tableName, List<V> objects, HbaseMapper<V> mapper)
     throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   if (CollectionUtils.isEmpty(objects)) {
     return;
   }
   List<Delete> deletes = new ArrayList<Delete>();
   for (V v : objects) {
     deletes.add(mapper.mapDelete(v));
   }
   htable.delete(deletes);
   log.info("delete " + deletes.size() + " objects from " + tableName + "");
   htable.close();
 }
  @Override
  public Map<String, Object> readRow(String tableName, Object keyObject) throws Exception {
    if (!(keyObject instanceof Map)
        && !(keyObject instanceof String)
        && !(keyObject instanceof byte[])) {
      throw new IllegalArgumentException(
          "Unsupported key type - " + keyObject.getClass().getName());
    }

    Map<String, Object> result;

    HCatTable table = hcatClient.getTable("default", tableName);
    String hbaseTableName = HiveUtils.getTableName(table);

    HTableInterface tableInterface = tableFactory.getTable(hbaseConfiguration, hbaseTableName);

    try {
      List<HCatFieldSchema> columns = table.getCols();

      HCatFieldSchema keyColumn = columns.get(0);

      // we use the serializer to build the row key
      HiveSerializer serializer = new HiveSerializer(table);
      final byte[] rowKey;

      if (keyObject instanceof Map) {
        rowKey = serializer.serializeHiveType(keyColumn, null, keyObject, 0);
      } else if (keyObject instanceof String) {
        rowKey = Bytes.toBytes((String) keyObject);
      } else {
        rowKey = (byte[]) keyObject;
      }

      Get get = new Get(rowKey);
      get.setCacheBlocks(true);
      get.setMaxVersions(1);

      Result dbResult = tableInterface.get(get);

      HiveDeserializer deserializer = new HiveDeserializer(table, dbResult);

      result = deserializer.deserialize();

      result.put("__rowkey", rowKey);
    } finally {
      tableInterface.close();
    }

    return result;
  }
 public <V> List<V> list(
     String tableName, byte[] startKey, byte[] stopKey, Filter filter, HbaseMapper<V> mapper)
     throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   Scan scan = new Scan();
   scan.setStartRow(startKey);
   scan.setStopRow(stopKey);
   scan.setFilter(filter);
   log.info("scan is -[" + scan + "]");
   ResultScanner resultScanner = htable.getScanner(scan);
   List<V> result = trasfer(resultScanner, mapper);
   htable.close();
   log.info("list " + result.size() + " objects from " + tableName);
   return result;
 }
 public <V> void deleteObject(String tableName, V object, HbaseMapper<V> mapper) throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   Delete delete = mapper.mapDelete(object);
   log.info("delete is -[" + delete + "]");
   htable.delete(delete);
   log.info(
       "delete one object from "
           + tableName
           + ",rowKeyString is -["
           + mapper.generateRowKeyString(object)
           + "] ,delete info -["
           + delete
           + "]");
   htable.close();
 }
 public void increment(
     String tableName, byte[] rowKey, byte[] column_family, byte[] qualify, long amount)
     throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   htable.incrementColumnValue(rowKey, column_family, qualify, amount);
   log.info(
       "increment "
           + amount
           + " to table="
           + tableName
           + ",family="
           + column_family
           + ",qualify="
           + qualify);
   htable.close();
 }
 public <V> V getObject(String tableName, V v, HbaseMapper<V> mapper) throws Exception {
   HTableInterface htable = dataSource.getConnection(tableName);
   Get get = mapper.mapGet(v);
   log.info("get is -[" + get + "]");
   Result result = htable.get(get);
   V r = null;
   if (!result.isEmpty()) {
     r = mapper.mapApi(result);
     log.info(
         "get one objects from "
             + tableName
             + ",rowKeyString is-["
             + mapper.getRowKeyString(result)
             + "]");
   }
   htable.close();
   return r;
 }
Example #19
0
    private void loadIPs() {
      dns = new HashMap(100000000); // ���貢��
      unknownHosts = new HashMap(1000000);
      querying = new HashMap(100000);

      try {
        int statsCommit = 500000;

        HConnection connection = HConnectionManager.createConnection(HBaseConfiguration.create());
        HTableInterface fetchFailTable = connection.getTable("fetchFail");
        Scan scan = new Scan();
        scan.setCaching(statsCommit);

        List<Filter> filters = new ArrayList<Filter>();
        Filter filter = new ColumnPrefixFilter(Bytes.toBytes("ip"));
        filters.add(filter);
        FilterList filterList = new FilterList(filters);
        scan.setFilter(filterList);

        ResultScanner rs = fetchFailTable.getScanner(scan);
        long cnt = 0;
        for (Result r : rs) {
          NavigableMap<byte[], byte[]> map = r.getFamilyMap(Bytes.toBytes("cf"));
          String ip = Bytes.toString(map.get(Bytes.toBytes("ip")));
          String host = Bytes.toString(r.getRow()).split("��")[0];
          if (host != null && ip != null) {
            dns.put(host, ip);
          }

          if (++cnt % statsCommit == 0) {
            LOG.info("loadIPs url=" + Bytes.toString(r.getRow()) + " cnt=" + cnt);
          }
        }
        rs.close();
        fetchFailTable.close();
        LOG.info("load hostip cache=" + dns.size());

        connection.close();
      } catch (Exception e) {
        e.printStackTrace();
      } finally {
        //
      }
    }
Example #20
0
  public static void scanTest(String tableStr) {
    try {
      Configuration conf = HBaseConfiguration.create();
      byte[] tableName = Bytes.toBytes(tableStr);
      HConnection hConnection = HConnectionManager.createConnection(conf);
      HTableInterface table = hConnection.getTable(tableName);

      byte[] startRow = Bytes.toBytes("rowKey_0");
      byte[] stopRow = Bytes.toBytes("rowKey_6");
      byte[] family = f0;

      Scan scan = new Scan();
      scan.addFamily(family);
      scan.setMaxVersions(1);

      //            scan.setStartRow(startRow);
      //            scan.setStopRow(stopRow);
      int count = 0;
      ResultScanner scanner = table.getScanner(scan);
      Result result = scanner.next();
      while (result != null) {
        String rowKey = Bytes.toString(result.getRow());
        NavigableMap<byte[], byte[]> m = result.getFamilyMap(family);

        if (m == null || m.isEmpty()) {
          System.err.println("Empty." + m);
          return;
        }
        for (Map.Entry<byte[], byte[]> entry : m.entrySet()) {
          String qualifier = Bytes.toString(entry.getKey());
          String value = Bytes.toString(entry.getValue());
          System.out.println(rowKey + ":" + qualifier + ":" + value);
        }
        result = scanner.next();
        count++;
        System.out.println("-----------------------------");
      }
      table.close(); // very important
      System.out.println("count:" + count);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
Example #21
0
  public static void sxr(String file) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    HTablePool tablePool = new HTablePool(conf, 100);
    HTableInterface cardTable = tablePool.getTable("dis_sxbzxr_new_card");
    HTableInterface nameTable = tablePool.getTable("dis_sxbzxr_new_name");

    File f = new File(file);
    BufferedReader br = new BufferedReader(new FileReader(f));
    String line;
    String[] subs;
    String name, card;
    long cnt = 0;
    long findCard = 0;
    long findName = 0;
    while ((line = br.readLine()) != null) {
      subs = line.split(",");
      name = subs[0];
      card = subs[1];
      ++cnt;

      Get get = new Get(Bytes.toBytes(card));
      get.addFamily(Bytes.toBytes("f"));
      Result result = cardTable.get(get);
      if (!result.isEmpty()) {
        ++findCard;
        LOG.info("find card: " + card + ", line: " + cnt);
      }

      Get nGet = new Get(Bytes.toBytes(name));
      nGet.addFamily(Bytes.toBytes("f"));
      Result nResult = nameTable.get(nGet);
      if (!nResult.isEmpty()) {
        ++findName;
        LOG.info("find name: " + name);
      }
    }
    br.close();
    LOG.info("line: " + cnt + ", find card: " + findCard + ", find name: " + findName);

    cardTable.close();
  }
Example #22
0
  public void testPool() throws IOException {
    PoolConfig config = new PoolConfig();
    config.setMaxTotal(20);
    config.setMaxIdle(5);
    config.setMaxWaitMillis(1000);
    config.setTestOnBorrow(true);

    /* properties */
    Properties props = new Properties();
    props.setProperty("hbase.zookeeper.quorum", "host1,host2,host3");
    props.setProperty("hbase.zookeeper.property.clientPort", "2181");
    props.setProperty("hbase.master", "host1:60000");
    props.setProperty("hbase.rootdir", "hdfs://host1:9000/hbase");

    /* connection pool */
    HbaseConnectionPool pool = new HbaseConnectionPool(config, props);
    HTableInterface table = null;

    HConnection conn = pool.getConnection();
    table = conn.getTable(TableName.valueOf("relation"));

    Get get = new Get(Bytes.toBytes("rowKey"));
    Result r = table.get(get);
    for (Cell cell : r.rawCells()) {
      System.out.println(
          "Rowkey : "
              + Bytes.toString(r.getRow())
              + " Familiy:Quilifier : "
              + Bytes.toString(CellUtil.cloneQualifier(cell))
              + " Value : "
              + Bytes.toString(CellUtil.cloneValue(cell)));
    }
    table.close();
    System.out.println(table);
    pool.returnConnection(conn);

    pool.close();
  }
Example #23
0
  private PageBean getDataMapTemp(SearchParam searchParam, Integer currentPage, Integer pageSize)
      throws IOException {
    String tableName = searchParam.getTableName();
    String startRow = searchParam.getStartKey();
    String stopRow = searchParam.getEndKey();

    List<Map<String, String>> mapList = null;
    mapList = new LinkedList<Map<String, String>>();
    ResultScanner scanner = null;
    // 为分页创建的封装类对象,下面有给出具体属性 TBData tbData = null;
    PageBean tbData = new PageBean();
    Scan scan = null;
    HbaseWhereEngine whereEngine = null;
    HTableInterface table = HbaseTool.getTable(tableName); // 获取筛选对象
    if ("ca_summary_optimize".equals(tableName) && compoundFieldMap.isEmpty()) {
      compoundFieldMap = CompoundFieldConfigService.getParentComField(tableName);
    }
    try {
      if (StringUtils.isNotEmpty(searchParam.getWhere())) {
        whereEngine =
            new HbaseWhereEngine(tableName, searchParam.getFamily(), searchParam.getWhere());
      }

      String[] selectArray = null;
      if (StringUtils.isNotEmpty(searchParam.getSelect())) {
        selectArray = searchParam.getSelect().split(",");
      }
      byte[] cf = Bytes.toBytes(searchParam.getFamily());

      // if keys is not null,then we can sure which records to be selected!
      if (StringUtils.isNotEmpty(searchParam.getKeys())) {
        List<Get> getKeysList = new ArrayList<Get>();

        for (String key : searchParam.getKeys().split(",")) {
          Get get = new Get(Bytes.toBytes(key));
          getKeysList.add(get);

          if (selectArray != null) {
            for (String field : selectArray) {
              String temp[] = processField(field, searchParam.getFamily());
              if ("ca_summary_optimize".equals(tableName)
                  && compoundFieldMap.containsKey(temp[1])) {
                get.addColumn(Bytes.toBytes(temp[0]), Bytes.toBytes(compoundFieldMap.get(temp[1])));
              } else {
                get.addColumn(Bytes.toBytes(temp[0]), Bytes.toBytes(temp[1]));
              }
            }
          }

          if (selectArray != null && whereEngine != null) {
            Set<String> varSet = whereEngine.getVarSet();
            for (String var : varSet) {
              get.addColumn(cf, Bytes.toBytes(var));
            }
          }
        }

        Result[] resultsFromKeys = table.get(getKeysList);

        for (Result rr : resultsFromKeys) {
          if (!rr.isEmpty()) {
            if (whereEngine != null && !whereEngine.meetCondition(rr)) {
              continue;
            }
            Map<String, String> map = new TreeMap<String, String>();

            map.put("_id", Bytes.toString(rr.getRow()));
            for (String field : selectArray) {
              String value = HbaseWhereEngine.getColumnValue(tableName, cf, rr, field);
              if (!field.equals("id")) {
                map.put(field, value);
              }
            }
            mapList.add(map);
          }
        }
        pageSize = mapList.size();
        tbData.setCurrentPage(currentPage);
        tbData.setLength(pageSize);
        tbData.setTotalRecords(mapList.size());
      } else { // if keys is null,we select some records between startKey and end key or top
               // pageSize !
        // 获取最大返回结果数量
        if (pageSize == null || pageSize == 0L) pageSize = 100;
        if (currentPage == null || currentPage == 0) currentPage = 1;
        // 计算起始页和结束页

        Integer firstPage = (currentPage - 1) * pageSize;

        Integer endPage = firstPage + pageSize;

        scan = getScan(startRow, stopRow);
        // 给筛选对象放入过滤器(true标识分页,具体方法在下面)
        scan.setFilter(packageFilters(searchParam, true));

        // 缓存1000条数据
        scan.setCaching(1000);
        scan.setCacheBlocks(false);

        if (selectArray != null) {
          for (String field : selectArray) {
            String temp[] = processField(field, searchParam.getFamily());
            if ("ca_summary_optimize".equals(tableName) && compoundFieldMap.containsKey(temp[1])) {
              scan.addColumn(Bytes.toBytes(temp[0]), Bytes.toBytes(compoundFieldMap.get(temp[1])));
            } else {
              scan.addColumn(Bytes.toBytes(temp[0]), Bytes.toBytes(temp[1]));
            }
          }
        }

        if (selectArray != null && whereEngine != null) {
          Set<String> varSet = whereEngine.getVarSet();
          for (String var : varSet) {
            scan.addColumn(cf, Bytes.toBytes(var));
          }
        }

        scanner = table.getScanner(scan);
        int i = 0;
        List<byte[]> rowList = new LinkedList<byte[]>();
        // 遍历扫描器对象, 并将需要查询出来的数据row key取出
        for (Result result : scanner) {
          String row = toStr(result.getRow());
          if (i >= firstPage && i < endPage) { // filter firstPage
            rowList.add(getBytes(row));
          }
          if (i >= endPage) {
            break;
          }
          i++;
        }
        // 获取取出的row key的GET对象
        List<Get> getList = getList(rowList, cf, selectArray, tableName);
        Result[] results = table.get(getList);
        for (Result result : results) {
          if (whereEngine != null && !whereEngine.meetCondition(result)) {
            continue;
          }
          Map<byte[], byte[]> fmap = packFamilyMap(tableName, result, cf, selectArray);
          Map<String, String> rmap = packRowMap(fmap);
          rmap.put("_id", toStr(result.getRow()));
          mapList.add(rmap);
        }

        // 封装分页对象

        tbData.setCurrentPage(currentPage);
        tbData.setLength(pageSize); // (pageSize);
        tbData.setTotalRecords(i);
      }
      tbData.setResults(mapList);
    } catch (Exception e) {
    } finally {
      if (table != null) {
        table.close();
      }
      closeScanner(scanner);
    }
    compoundFieldMap.clear();
    return tbData;
  }
Example #24
0
  @SuppressWarnings("deprecation")
  public void commit() throws SQLException {
    int i = 0;
    byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    long[] serverTimeStamps = validate();
    Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> iterator =
        this.mutations.entrySet().iterator();
    List<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> committedList =
        Lists.newArrayListWithCapacity(this.mutations.size());

    // add tracing for this operation
    TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables");
    Span span = trace.getSpan();
    while (iterator.hasNext()) {
      Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry = iterator.next();
      Map<ImmutableBytesPtr, Map<PColumn, byte[]>> valuesMap = entry.getValue();
      TableRef tableRef = entry.getKey();
      PTable table = tableRef.getTable();
      table.getIndexMaintainers(tempPtr);
      boolean hasIndexMaintainers = tempPtr.getLength() > 0;
      boolean isDataTable = true;
      long serverTimestamp = serverTimeStamps[i++];
      Iterator<Pair<byte[], List<Mutation>>> mutationsIterator =
          addRowMutations(tableRef, valuesMap, serverTimestamp, false);
      while (mutationsIterator.hasNext()) {
        Pair<byte[], List<Mutation>> pair = mutationsIterator.next();
        byte[] htableName = pair.getFirst();
        List<Mutation> mutations = pair.getSecond();

        // create a span per target table
        // TODO maybe we can be smarter about the table name to string here?
        Span child =
            Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));

        int retryCount = 0;
        boolean shouldRetry = false;
        do {
          ServerCache cache = null;
          if (hasIndexMaintainers && isDataTable) {
            byte[] attribValue = null;
            byte[] uuidValue;
            if (IndexMetaDataCacheClient.useIndexMetadataCache(
                connection, mutations, tempPtr.getLength())) {
              IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
              cache = client.addIndexMetadataCache(mutations, tempPtr);
              child.addTimelineAnnotation("Updated index metadata cache");
              uuidValue = cache.getId();
              // If we haven't retried yet, retry for this case only, as it's possible that
              // a split will occur after we send the index metadata cache to all known
              // region servers.
              shouldRetry = true;
            } else {
              attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr);
              uuidValue = ServerCacheClient.generateId();
            }
            // Either set the UUID to be able to access the index metadata from the cache
            // or set the index metadata directly on the Mutation
            for (Mutation mutation : mutations) {
              if (tenantId != null) {
                mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
              }
              mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
              if (attribValue != null) {
                mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
              }
            }
          }

          SQLException sqlE = null;
          HTableInterface hTable = connection.getQueryServices().getTable(htableName);
          try {
            if (logger.isDebugEnabled()) logMutationSize(hTable, mutations);
            long startTime = System.currentTimeMillis();
            child.addTimelineAnnotation("Attempt " + retryCount);
            hTable.batch(mutations);
            child.stop();
            shouldRetry = false;
            if (logger.isDebugEnabled())
              logger.debug(
                  "Total time for batch call of  "
                      + mutations.size()
                      + " mutations into "
                      + table.getName().getString()
                      + ": "
                      + (System.currentTimeMillis() - startTime)
                      + " ms");
            committedList.add(entry);
          } catch (Exception e) {
            SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
            if (inferredE != null) {
              if (shouldRetry
                  && retryCount == 0
                  && inferredE.getErrorCode()
                      == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                // Swallow this exception once, as it's possible that we split after sending the
                // index metadata
                // and one of the region servers doesn't have it. This will cause it to have it the
                // next go around.
                // If it fails again, we don't retry.
                String msg =
                    "Swallowing exception and retrying after clearing meta cache on connection. "
                        + inferredE;
                logger.warn(msg);
                connection.getQueryServices().clearTableRegionCache(htableName);

                // add a new child span as this one failed
                child.addTimelineAnnotation(msg);
                child.stop();
                child = Tracing.child(span, "Failed batch, attempting retry");

                continue;
              }
              e = inferredE;
            }
            // Throw to client with both what was committed so far and what is left to be committed.
            // That way, client can either undo what was done or try again with what was not done.
            sqlE =
                new CommitException(
                    e,
                    this,
                    new MutationState(
                        committedList, this.sizeOffset, this.maxSize, this.connection));
          } finally {
            try {
              hTable.close();
            } catch (IOException e) {
              if (sqlE != null) {
                sqlE.setNextException(ServerUtil.parseServerException(e));
              } else {
                sqlE = ServerUtil.parseServerException(e);
              }
            } finally {
              try {
                if (cache != null) {
                  cache.close();
                }
              } finally {
                if (sqlE != null) {
                  throw sqlE;
                }
              }
            }
          }
        } while (shouldRetry && retryCount++ < 1);
        isDataTable = false;
      }
      numRows -= entry.getValue().size();
      iterator.remove(); // Remove batches as we process them
    }
    trace.close();
    assert (numRows == 0);
    assert (this.mutations.isEmpty());
  }
  private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
    HTableInterface hTable =
        services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
    try {
      // Insert rows using standard HBase mechanism with standard HBase "types"
      List<Row> mutations = new ArrayList<Row>();
      byte[] family = Bytes.toBytes("1");
      byte[] uintCol = Bytes.toBytes("UINT_COL");
      byte[] ulongCol = Bytes.toBytes("ULONG_COL");
      byte[] key, bKey;
      Put put;

      key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
      put = new Put(key);
      put.add(family, uintCol, ts - 2, Bytes.toBytes(5));
      put.add(family, ulongCol, ts - 2, Bytes.toBytes(50L));
      mutations.add(put);
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(10));
      put.add(family, ulongCol, ts, Bytes.toBytes(100L));
      mutations.add(put);

      bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
      put = new Put(key);
      put.add(family, uintCol, ts - 4, Bytes.toBytes(5000));
      put.add(family, ulongCol, ts - 4, Bytes.toBytes(50000L));
      mutations.add(put);
      @SuppressWarnings(
          "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed
                         // (HBASE-7870).
      // FIXME: the version of the Delete constructor without the lock args was introduced
      // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
      // of the client.
      Delete del = new Delete(key, ts - 2, null);
      mutations.add(del);
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(2000));
      put.add(family, ulongCol, ts, Bytes.toBytes(20000L));
      mutations.add(put);

      key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(3000));
      put.add(family, ulongCol, ts, Bytes.toBytes(30000L));
      mutations.add(put);

      key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(4000));
      put.add(family, ulongCol, ts, Bytes.toBytes(40000L));
      mutations.add(put);

      hTable.batch(mutations);

      Result r = hTable.get(new Get(bKey));
      assertFalse(r.isEmpty());
    } finally {
      hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_NATIVE, null, ts + 1);
  }