Esempio n. 1
0
 public Map<String, Long> getRegionSizes(String tableName) {
   Map<String, Long> regions = new HashMap<>();
   try {
     final Table table = connection.getTable(TableName.valueOf(tableName));
     RegionLocator regionLocator = connection.getRegionLocator(table.getName());
     List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
     Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
     for (HRegionLocation regionInfo : tableRegionInfos) {
       tableRegions.add(regionInfo.getRegionInfo().getRegionName());
     }
     ClusterStatus clusterStatus = connection.getAdmin().getClusterStatus();
     Collection<ServerName> servers = clusterStatus.getServers();
     final long megaByte = 1024L * 1024L;
     for (ServerName serverName : servers) {
       ServerLoad serverLoad = clusterStatus.getLoad(serverName);
       for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
         byte[] regionId = regionLoad.getName();
         if (tableRegions.contains(regionId)) {
           long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
           regions.put(regionLoad.getNameAsString(), regionSizeBytes);
         }
       }
     }
   } catch (IOException e) {
     e.printStackTrace();
   }
   return regions;
 }
Esempio n. 2
0
  /**
   * The put request will be buffered by its corresponding buffer queue. And the put request will be
   * retried before dropping the request. Return false if the queue is already full.
   *
   * @return true if the request can be accepted by its corresponding buffer queue.
   */
  public boolean put(final TableName tableName, final Put put, int maxAttempts) {
    if (maxAttempts <= 0) {
      return false;
    }

    try {
      HTable.validatePut(put, maxKeyValueSize);
      // Allow mocking to get at the connection, but don't expose the connection to users.
      ClusterConnection conn = (ClusterConnection) getConnection();
      // AsyncProcess in the FlushWorker should take care of refreshing the location cache
      // as necessary. We shouldn't have to do that here.
      HRegionLocation loc = conn.getRegionLocation(tableName, put.getRow(), false);
      if (loc != null) {
        // Add the put pair into its corresponding queue.
        LinkedBlockingQueue<PutStatus> queue = getQueue(loc);

        // Generate a MultiPutStatus object and offer it into the queue
        PutStatus s = new PutStatus(loc.getRegionInfo(), put, maxAttempts);

        return queue.offer(s);
      }
    } catch (IOException e) {
      LOG.debug("Cannot process the put " + put, e);
    }
    return false;
  }
Esempio n. 3
0
    private void initialize(Map<HRegionLocation, FlushWorker> serverToFlushWorkerMap) {
      if (serverToFlushWorkerMap == null) {
        return;
      }

      long averageCalcSum = 0;
      int averageCalcCount = 0;
      for (Map.Entry<HRegionLocation, FlushWorker> entry : serverToFlushWorkerMap.entrySet()) {
        HRegionLocation addr = entry.getKey();
        FlushWorker worker = entry.getValue();

        long bufferedCounter = worker.getTotalBufferedCount();
        long failedCounter = worker.getTotalFailedCount();
        long serverMaxLatency = worker.getMaxLatency();
        AtomicAverageCounter averageCounter = worker.getAverageLatencyCounter();
        // Get sum and count pieces separately to compute overall average
        SimpleEntry<Long, Integer> averageComponents = averageCounter.getComponents();
        long serverAvgLatency = averageCounter.getAndReset();

        this.totalBufferedPutCounter += bufferedCounter;
        this.totalFailedPutCounter += failedCounter;
        if (serverMaxLatency > this.maxLatency) {
          this.maxLatency = serverMaxLatency;
        }
        averageCalcSum += averageComponents.getKey();
        averageCalcCount += averageComponents.getValue();

        this.serverToBufferedCounterMap.put(addr.getHostnamePort(), bufferedCounter);
        this.serverToFailedCounterMap.put(addr.getHostnamePort(), failedCounter);
        this.serverToAverageLatencyMap.put(addr.getHostnamePort(), serverAvgLatency);
        this.serverToMaxLatencyMap.put(addr.getHostnamePort(), serverMaxLatency);
      }
      this.overallAverageLatency = averageCalcCount != 0 ? averageCalcSum / averageCalcCount : 0;
    }
  /**
   * Do the commit. This is the 2nd phase of the 2-phase protocol.
   *
   * @param transactionState
   * @throws CommitUnsuccessfulException
   */
  void doCommit(final TransactionState transactionState) throws CommitUnsuccessfulException {
    try {
      LOG.trace("Commiting [" + transactionState.getTransactionId() + "]");

      transactionLogger.setStatusForTransaction(
          transactionState.getTransactionId(), TransactionLogger.TransactionStatus.COMMITTED);

      for (HRegionLocation location : transactionState.getParticipatingRegions()) {
        if (transactionState.getRegionsToIngore().contains(location)) {
          continue;
        }
        TransactionalRegionInterface transactionalRegionServer =
            (TransactionalRegionInterface)
                connection.getHRegionConnection(location.getServerAddress());
        transactionalRegionServer.commit(
            location.getRegionInfo().getRegionName(), transactionState.getTransactionId());
      }
    } catch (Exception e) {
      LOG.info(
          "Commit of transaction [" + transactionState.getTransactionId() + "] was unsucsessful",
          e);
      // This happens on a NSRE that is triggered by a split
      try {
        abort(transactionState);
      } catch (Exception abortException) {
        LOG.warn("Exeption durring abort", abortException);
      }
      throw new CommitUnsuccessfulException(e);
    }
    transactionLogger.forgetTransaction(transactionState.getTransactionId());
  }
  /**
   * Abort a s transaction.
   *
   * @param transactionState
   * @throws IOException
   */
  public void abort(final TransactionState transactionState) throws IOException {
    transactionLogger.setStatusForTransaction(
        transactionState.getTransactionId(), TransactionLogger.TransactionStatus.ABORTED);

    for (HRegionLocation location : transactionState.getParticipatingRegions()) {
      if (transactionState.getRegionsToIngore().contains(location)) {
        continue;
      }
      try {
        TransactionalRegionInterface transactionalRegionServer =
            (TransactionalRegionInterface)
                connection.getHRegionConnection(location.getServerAddress());

        transactionalRegionServer.abortTransaction(
            location.getRegionInfo().getRegionName(), transactionState.getTransactionId());
      } catch (UnknownTransactionException e) {
        LOG.info(
            "Got unknown transaciton exception durring abort. Transaction: ["
                + transactionState.getTransactionId()
                + "], region: ["
                + location.getRegionInfo().getRegionNameAsString()
                + "]. Ignoring.");
      } catch (NotServingRegionException e) {
        LOG.info(
            "Got NSRE durring abort. Transaction: ["
                + transactionState.getTransactionId()
                + "], region: ["
                + location.getRegionInfo().getRegionNameAsString()
                + "]. Ignoring.");
      }
    }
    transactionLogger.forgetTransaction(transactionState.getTransactionId());
  }
Esempio n. 6
0
 static boolean canUpdate(HRegionLocation loc, HRegionLocation oldLoc) {
   // Do not need to update if no such location, or the location is newer, or the location is not
   // same with us
   return oldLoc != null
       && oldLoc.getSeqNum() <= loc.getSeqNum()
       && oldLoc.getServerName().equals(loc.getServerName());
 }
 /**
  * Remove the cached table from all region servers
  *
  * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable,
  *     Scan, Set)})
  * @param servers list of servers upon which table was cached (filled in by {@link
  *     #addHashCache(HTable, Scan, Set)})
  * @throws SQLException
  * @throws IllegalStateException if hashed table cannot be removed on any region server on which
  *     it was added
  */
 private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers)
     throws SQLException {
   ConnectionQueryServices services = connection.getQueryServices();
   Throwable lastThrowable = null;
   TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
   byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
   HTableInterface iterateOverTable = services.getTable(tableName);
   List<HRegionLocation> locations = services.getAllTableRegions(tableName);
   Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
   /**
    * Allow for the possibility that the region we based where to send our cache has split and been
    * relocated to another region server *after* we sent it, but before we removed it. To
    * accommodate this, we iterate through the current metadata boundaries and remove the cache
    * once for each server that we originally sent to.
    */
   if (LOG.isDebugEnabled()) {
     LOG.debug("Removing Cache " + cacheId + " from servers.");
   }
   for (HRegionLocation entry : locations) {
     if (remainingOnServers.contains(entry)) { // Call once per server
       try {
         byte[] key = entry.getRegionInfo().getStartKey();
         iterateOverTable.coprocessorService(
             ServerCachingService.class,
             key,
             key,
             new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
               @Override
               public RemoveServerCacheResponse call(ServerCachingService instance)
                   throws IOException {
                 ServerRpcController controller = new ServerRpcController();
                 BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback =
                     new BlockingRpcCallback<RemoveServerCacheResponse>();
                 RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
                 if (connection.getTenantId() != null) {
                   builder.setTenantId(
                       HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
                 }
                 builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
                 instance.removeServerCache(controller, builder.build(), rpcCallback);
                 if (controller.getFailedOn() != null) {
                   throw controller.getFailedOn();
                 }
                 return rpcCallback.get();
               }
             });
         remainingOnServers.remove(entry);
       } catch (Throwable t) {
         lastThrowable = t;
         LOG.error("Error trying to remove hash cache for " + entry, t);
       }
     }
   }
   if (!remainingOnServers.isEmpty()) {
     LOG.warn("Unable to remove hash cache for " + remainingOnServers, lastThrowable);
   }
 }
Esempio n. 8
0
  /**
   * Returns statistics on this table as a tabular result set. Used for the SHOW TABLE STATS
   * statement. The schema of the returned TResultSet is set inside this method.
   */
  public TResultSet getTableStats() {
    TResultSet result = new TResultSet();
    TResultSetMetadata resultSchema = new TResultSetMetadata();
    result.setSchema(resultSchema);
    resultSchema.addToColumns(new TColumn("Region Location", ColumnType.STRING.toThrift()));
    resultSchema.addToColumns(new TColumn("Start RowKey", ColumnType.STRING.toThrift()));
    resultSchema.addToColumns(new TColumn("Est. #Rows", ColumnType.BIGINT.toThrift()));
    resultSchema.addToColumns(new TColumn("Size", ColumnType.STRING.toThrift()));

    // TODO: Consider fancier stats maintenance techniques for speeding up this process.
    // Currently, we list all regions and perform a mini-scan of each of them to
    // estimate the number of rows, the data size, etc., which is rather expensive.
    try {
      long totalNumRows = 0;
      long totalHdfsSize = 0;
      List<HRegionLocation> regions =
          HBaseTable.getRegionsInRange(
              hTable_, HConstants.EMPTY_END_ROW, HConstants.EMPTY_START_ROW);
      for (HRegionLocation region : regions) {
        TResultRowBuilder rowBuilder = new TResultRowBuilder();
        HRegionInfo regionInfo = region.getRegionInfo();
        Pair<Long, Long> estRowStats =
            getEstimatedRowStats(regionInfo.getStartKey(), regionInfo.getEndKey());

        long numRows = estRowStats.first.longValue();
        long hdfsSize = getHdfsSize(regionInfo);
        totalNumRows += numRows;
        totalHdfsSize += hdfsSize;

        // Add the region location, start rowkey, number of rows and raw Hdfs size.
        rowBuilder
            .add(String.valueOf(region.getHostname()))
            .add(Bytes.toString(regionInfo.getStartKey()))
            .add(numRows)
            .addBytes(hdfsSize);
        result.addToRows(rowBuilder.get());
      }

      // Total num rows and raw Hdfs size.
      if (regions.size() > 1) {
        TResultRowBuilder rowBuilder = new TResultRowBuilder();
        rowBuilder.add("Total").add("").add(totalNumRows).addBytes(totalHdfsSize);
        result.addToRows(rowBuilder.get());
      }
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
    return result;
  }
 private void addLocation(final TransactionState transactionState, HRegionLocation location) {
   if (LOG.isTraceEnabled()) LOG.trace("addLocation ENTRY");
   if (transactionState.addRegion(location)) {
     if (LOG.isTraceEnabled())
       LOG.trace(
           "addLocation added region ["
               + location.getRegionInfo().getRegionNameAsString()
               + " endKey: "
               + Hex.encodeHexString(location.getRegionInfo().getEndKey())
               + " to TS. Beginning txn "
               + transactionState.getTransactionId()
               + " on server");
   }
   if (LOG.isTraceEnabled()) LOG.trace("addLocation EXIT");
 }
Esempio n. 10
0
  private static Map<String, List<Row>> createRsRowMap(HTable htable, List<Row> rows)
      throws IOException {

    Map<String, List<Row>> rowMap = new HashMap<String, List<Row>>();
    for (Row row : rows) {
      HRegionLocation rl = htable.getRegionLocation(row.getRow());
      String hostname = rl.getHostname();
      List<Row> recs = rowMap.get(hostname);
      if (recs == null) {
        recs = new ArrayList<Row>(INITIAL_LIST_SIZE);
        rowMap.put(hostname, recs);
      }
      recs.add(row);
    }
    return rowMap;
  }
Esempio n. 11
0
  private static Map<String, List<Put>> createRsPutMap(HTable htable, List<Put> puts)
      throws IOException {

    Map<String, List<Put>> putMap = new HashMap<String, List<Put>>();
    for (Put put : puts) {
      HRegionLocation rl = htable.getRegionLocation(put.getRow());
      String hostname = rl.getHostname();
      List<Put> recs = putMap.get(hostname);
      if (recs == null) {
        recs = new ArrayList<Put>(INITIAL_LIST_SIZE);
        putMap.put(hostname, recs);
      }
      recs.add(put);
    }
    return putMap;
  }
Esempio n. 12
0
 void updateCachedLocation(HRegionLocation loc, Throwable exception) {
   if (loc.getRegionInfo().isMetaTable()) {
     metaRegionLocator.updateCachedLocation(loc, exception);
   } else {
     nonMetaRegionLocator.updateCachedLocation(loc, exception);
   }
 }
 public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException {
   List<HRegionLocation> regions = tableSplits.get(Bytes.toString(tableName));
   if (regions != null) {
     for (HRegionLocation region : regions) {
       if (Bytes.compareTo(region.getRegionInfo().getStartKey(), row) <= 0
           && Bytes.compareTo(region.getRegionInfo().getEndKey(), row) > 0) {
         return region;
       }
     }
   }
   return new HRegionLocation(
       new HRegionInfo(
           TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
       SERVER_NAME,
       -1);
 }
Esempio n. 14
0
  private static List<HBaseMRRowRange> binRanges(
      final List<HBaseMRRowRange> inputRanges,
      final Map<HRegionLocation, Map<HRegionInfo, List<HBaseMRRowRange>>> binnedRanges,
      final RegionLocator regionLocator)
      throws IOException {

    // Loop through ranges, getting RegionLocation and RegionInfo for
    // startKey, clipping range by that regionInfo's extent, and leaving
    // remainder in the List to be region'd
    final ListIterator<HBaseMRRowRange> i = inputRanges.listIterator();
    while (i.hasNext()) {
      final HBaseMRRowRange range = i.next();
      final HRegionLocation location = regionLocator.getRegionLocation(range.getStart().getBytes());

      Map<HRegionInfo, List<HBaseMRRowRange>> regionInfoMap = binnedRanges.get(location);
      if (regionInfoMap == null) {
        regionInfoMap = new HashMap<HRegionInfo, List<HBaseMRRowRange>>();
        binnedRanges.put(location, regionInfoMap);
      }

      final HRegionInfo regionInfo = location.getRegionInfo();
      List<HBaseMRRowRange> rangeList = regionInfoMap.get(regionInfo);
      if (rangeList == null) {
        rangeList = new ArrayList<HBaseMRRowRange>();
        regionInfoMap.put(regionInfo, rangeList);
      }

      if (regionInfo.containsRange(range.getStart().getBytes(), range.getEnd().getBytes())) {
        rangeList.add(range);
        i.remove();
      } else {
        final ByteArrayRange overlappingRange =
            range.intersection(
                new ByteArrayRange(
                    new ByteArrayId(regionInfo.getStartKey()),
                    new ByteArrayId(regionInfo.getEndKey())));
        rangeList.add(new HBaseMRRowRange(overlappingRange));

        final HBaseMRRowRange uncoveredRange =
            new HBaseMRRowRange(
                new ByteArrayId(HBaseUtils.getNextPrefix(regionInfo.getEndKey())), range.getEnd());
        i.add(uncoveredRange);
      }
    }

    return inputRanges;
  }
  protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey)
      throws IOException {

    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);

    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
      regions = rl.getAllRegionLocations();
    }

    for (HRegionLocation e : regions) {
      HRegionInfo hri = e.getRegionInfo();
      ServerName hsa = e.getServerName();
      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {

        LOG.info("RegionName: " + hri.getRegionNameAsString());
        byte[] deleteRow = hri.getRegionName();
        TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);

        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
        Path rootDir = FSUtils.getRootDir(conf);
        FileSystem fs = rootDir.getFileSystem(conf);
        Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
        fs.delete(p, true);

        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
          Delete delete = new Delete(deleteRow);
          meta.delete(delete);
        }
      }
      LOG.info(hri.toString() + hsa.toString());
    }

    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
  }
Esempio n. 16
0
  /**
   * The put request will be buffered by its corresponding buffer queue. And the put request will be
   * retried before dropping the request. Return false if the queue is already full.
   *
   * @return true if the request can be accepted by its corresponding buffer queue.
   */
  public boolean put(final TableName tableName, final Put put, int retry) {
    if (retry <= 0) {
      return false;
    }

    try {
      HTable.validatePut(put, maxKeyValueSize);
      HRegionLocation loc = conn.getRegionLocation(tableName, put.getRow(), false);
      if (loc != null) {
        // Add the put pair into its corresponding queue.
        LinkedBlockingQueue<PutStatus> queue = getQueue(loc);

        // Generate a MultiPutStatus object and offer it into the queue
        PutStatus s = new PutStatus(loc.getRegionInfo(), put, retry);

        return queue.offer(s);
      }
    } catch (IOException e) {
      LOG.debug("Cannot process the put " + put, e);
    }
    return false;
  }
Esempio n. 17
0
 /**
  * This is copied from org.apache.hadoop.hbase.client.HTable. The only difference is that it does
  * not use cache when calling getRegionLocation. TODO: Remove this function and use
  * HTable.getRegionsInRange when the non-cache version has been ported to CDH (DISTRO-477). Get
  * the corresponding regions for an arbitrary range of keys.
  *
  * <p>
  *
  * @param startRow Starting row in range, inclusive
  * @param endRow Ending row in range, exclusive
  * @return A list of HRegionLocations corresponding to the regions that contain the specified
  *     range
  * @throws IOException if a remote or network exception occurs
  */
 public static List<HRegionLocation> getRegionsInRange(
     HTable hbaseTbl, final byte[] startKey, final byte[] endKey) throws IOException {
   final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW);
   if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
     throw new IllegalArgumentException(
         "Invalid range: "
             + Bytes.toStringBinary(startKey)
             + " > "
             + Bytes.toStringBinary(endKey));
   }
   final List<HRegionLocation> regionList = new ArrayList<HRegionLocation>();
   byte[] currentKey = startKey;
   // Make sure only one thread is accessing the hbaseTbl.
   synchronized (hbaseTbl) {
     do {
       // always reload region location info.
       HRegionLocation regionLocation = hbaseTbl.getRegionLocation(currentKey, true);
       regionList.add(regionLocation);
       currentKey = regionLocation.getRegionInfo().getEndKey();
     } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)
         && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0));
   }
   return regionList;
 }
Esempio n. 18
0
 static void updateCachedLoation(
     HRegionLocation loc,
     Throwable exception,
     Function<HRegionLocation, HRegionLocation> cachedLocationSupplier,
     Consumer<HRegionLocation> addToCache,
     Consumer<HRegionLocation> removeFromCache) {
   HRegionLocation oldLoc = cachedLocationSupplier.apply(loc);
   if (LOG.isDebugEnabled()) {
     LOG.debug("Try updating " + loc + ", the old value is " + oldLoc, exception);
   }
   if (!canUpdate(loc, oldLoc)) {
     return;
   }
   Throwable cause = findException(exception);
   if (LOG.isDebugEnabled()) {
     LOG.debug("The actual exception when updating " + loc, cause);
   }
   if (cause == null || !isMetaClearingException(cause)) {
     if (LOG.isDebugEnabled()) {
       LOG.debug(
           "Will not update "
               + loc
               + " because the exception is null or not the one we care about");
     }
     return;
   }
   if (cause instanceof RegionMovedException) {
     RegionMovedException rme = (RegionMovedException) cause;
     HRegionLocation newLoc =
         new HRegionLocation(loc.getRegionInfo(), rme.getServerName(), rme.getLocationSeqNum());
     if (LOG.isDebugEnabled()) {
       LOG.debug(
           "Try updating " + loc + " with the new location " + newLoc + " constructed by " + rme);
     }
     addToCache.accept(newLoc);
   } else {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Try removing " + loc + " from cache");
     }
     removeFromCache.accept(loc);
   }
 }
Esempio n. 19
0
 @Override
 public int compareTo(HRegionLocation o) {
   return serverName.compareTo(o.getServerName());
 }
Esempio n. 20
0
  public ServerCache addServerCache(
      ScanRanges keyRanges,
      final ImmutableBytesWritable cachePtr,
      final byte[] txState,
      final ServerCacheFactory cacheFactory,
      final TableRef cacheUsingTableRef)
      throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /** Execute EndPoint in parallel on each server to send compressed hash cache */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
      final PTable cacheUsingTable = cacheUsingTableRef.getTable();
      List<HRegionLocation> locations =
          services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
      int nRegions = locations.size();
      // Size these based on worst case
      futures = new ArrayList<Future<Boolean>>(nRegions);
      Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
      for (HRegionLocation entry : locations) {
        // Keep track of servers we've sent to and only send once
        byte[] regionStartKey = entry.getRegionInfo().getStartKey();
        byte[] regionEndKey = entry.getRegionInfo().getEndKey();
        if (!servers.contains(entry)
            && keyRanges.intersects(
                regionStartKey,
                regionEndKey,
                cacheUsingTable.getIndexType() == IndexType.LOCAL
                    ? ScanUtil.getRowKeyOffset(regionStartKey, regionEndKey)
                    : 0,
                true)) {
          // Call RPC once per server
          servers.add(entry);
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
          }
          final byte[] key = entry.getRegionInfo().getStartKey();
          final HTableInterface htable =
              services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
          closeables.add(htable);
          futures.add(
              executor.submit(
                  new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                      final Map<byte[], AddServerCacheResponse> results;
                      try {
                        results =
                            htable.coprocessorService(
                                ServerCachingService.class,
                                key,
                                key,
                                new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
                                  @Override
                                  public AddServerCacheResponse call(ServerCachingService instance)
                                      throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback =
                                        new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder =
                                        AddServerCacheRequest.newBuilder();
                                    if (connection.getTenantId() != null) {
                                      try {
                                        byte[] tenantIdBytes =
                                            ScanUtil.getTenantIdBytes(
                                                cacheUsingTable.getRowKeySchema(),
                                                cacheUsingTable.getBucketNum() != null,
                                                connection.getTenantId(),
                                                cacheUsingTable.isMultiTenant());
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                      } catch (SQLException e) {
                                        new IOException(e);
                                      }
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(
                                        org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder
                                        svrCacheFactoryBuider =
                                            ServerCacheFactoryProtos.ServerCacheFactory
                                                .newBuilder();
                                    svrCacheFactoryBuider.setClassName(
                                        cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(HBaseZeroCopyByteString.wrap(txState));
                                    instance.addServerCache(
                                        controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                      throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                  }
                                });
                      } catch (Throwable t) {
                        throw new Exception(t);
                      }
                      if (results != null && results.size() == 1) {
                        return results.values().iterator().next().getReturn();
                      }
                      return false;
                    }

                    /**
                     * Defines the grouping for round robin behavior. All threads spawned to process
                     * this scan will be grouped together and time sliced with other simultaneously
                     * executing parallel scans.
                     */
                    @Override
                    public Object getJobId() {
                      return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                      return NO_OP_INSTANCE;
                    }
                  }));
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                addCustomAnnotations(
                    "NOT adding cache entry to be sent for "
                        + entry
                        + " since one already exists for that entry",
                    connection));
          }
        }
      }

      hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
      // Execute in parallel
      int timeoutMs =
          services
              .getProps()
              .getInt(
                  QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
                  QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
      for (Future<Boolean> future : futures) {
        future.get(timeoutMs, TimeUnit.MILLISECONDS);
      }

      cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
      success = true;
    } catch (SQLException e) {
      firstException = e;
    } catch (Exception e) {
      firstException = new SQLException(e);
    } finally {
      try {
        if (!success) {
          SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
          for (Future<Boolean> future : futures) {
            future.cancel(true);
          }
        }
      } finally {
        try {
          Closeables.closeAll(closeables);
        } catch (IOException e) {
          if (firstException == null) {
            firstException = new SQLException(e);
          }
        } finally {
          if (firstException != null) {
            throw firstException;
          }
        }
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
  }
  /**
   * Prepare to commit a transaction.
   *
   * @param transactionState
   * @return commitStatusCode (see {@link TransactionalRegionInterface})
   * @throws IOException
   * @throws CommitUnsuccessfulException
   */
  public int prepareCommit(final TransactionState transactionState)
      throws CommitUnsuccessfulException, IOException {
    boolean allReadOnly = true;
    try {

      for (HRegionLocation location : transactionState.getParticipatingRegions()) {

        TransactionalRegionInterface transactionalRegionServer =
            (TransactionalRegionInterface)
                connection.getHRegionConnection(location.getServerAddress());
        int commitStatus =
            transactionalRegionServer.commitRequest(
                location.getRegionInfo().getRegionName(), transactionState.getTransactionId());
        boolean canCommit = true;
        switch (commitStatus) {
          case TransactionalRegionInterface.COMMIT_OK:
            allReadOnly = false;
            break;
          case TransactionalRegionInterface.COMMIT_OK_READ_ONLY:
            transactionState.addRegionToIgnore(location); // No need to doCommit for read-onlys
            break;
          case TransactionalRegionInterface.COMMIT_UNSUCESSFUL:
            canCommit = false;
            transactionState.addRegionToIgnore(location); // No need to re-abort.
            break;
          default:
            throw new CommitUnsuccessfulException(
                "Unexpected return code from prepareCommit: " + commitStatus);
        }

        if (LOG.isTraceEnabled()) {
          LOG.trace(
              "Region ["
                  + location.getRegionInfo().getRegionNameAsString()
                  + "] votes "
                  + (canCommit ? "to commit" : "to abort")
                  + " transaction "
                  + transactionState.getTransactionId());
        }

        if (!canCommit) {
          LOG.debug("Aborting [" + transactionState.getTransactionId() + "]");
          abort(transactionState);
          return TransactionalRegionInterface.COMMIT_UNSUCESSFUL;
        }
      }
    } catch (Exception e) {
      LOG.debug(
          "Commit of transaction [" + transactionState.getTransactionId() + "] was unsucsessful",
          e);
      // This happens on a NSRE that is triggered by a split
      try {
        abort(transactionState);
      } catch (Exception abortException) {
        LOG.warn("Exeption durring abort", abortException);
      }
      throw new CommitUnsuccessfulException(e);
    }
    return allReadOnly
        ? TransactionalRegionInterface.COMMIT_OK_READ_ONLY
        : TransactionalRegionInterface.COMMIT_OK;
  }
    public void append(
        TableName tableName, byte[] encodedRegionName, byte[] row, List<Entry> entries)
        throws IOException {

      if (disabledAndDroppedTables.getIfPresent(tableName) != null) {
        if (LOG.isTraceEnabled()) {
          LOG.trace(
              "Skipping "
                  + entries.size()
                  + " entries because table "
                  + tableName
                  + " is cached as a disabled or dropped table");
          for (Entry entry : entries) {
            LOG.trace("Skipping : " + entry);
          }
        }
        sink.getSkippedEditsCounter().addAndGet(entries.size());
        return;
      }

      // If the table is disabled or dropped, we should not replay the entries, and we can skip
      // replaying them. However, we might not know whether the table is disabled until we
      // invalidate the cache and check from meta
      RegionLocations locations = null;
      boolean useCache = true;
      while (true) {
        // get the replicas of the primary region
        try {
          locations =
              RegionReplicaReplayCallable.getRegionLocations(
                  connection, tableName, row, useCache, 0);

          if (locations == null) {
            throw new HBaseIOException(
                "Cannot locate locations for " + tableName + ", row:" + Bytes.toStringBinary(row));
          }
        } catch (TableNotFoundException e) {
          if (LOG.isTraceEnabled()) {
            LOG.trace(
                "Skipping "
                    + entries.size()
                    + " entries because table "
                    + tableName
                    + " is dropped. Adding table to cache.");
            for (Entry entry : entries) {
              LOG.trace("Skipping : " + entry);
            }
          }
          disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored
          // skip this entry
          sink.getSkippedEditsCounter().addAndGet(entries.size());
          return;
        }

        // check whether we should still replay this entry. If the regions are changed, or the
        // entry is not coming from the primary region, filter it out.
        HRegionLocation primaryLocation = locations.getDefaultRegionLocation();
        if (!Bytes.equals(
            primaryLocation.getRegionInfo().getEncodedNameAsBytes(), encodedRegionName)) {
          if (useCache) {
            useCache = false;
            continue; // this will retry location lookup
          }
          if (LOG.isTraceEnabled()) {
            LOG.trace(
                "Skipping "
                    + entries.size()
                    + " entries in table "
                    + tableName
                    + " because located region "
                    + primaryLocation.getRegionInfo().getEncodedName()
                    + " is different than the original region "
                    + Bytes.toStringBinary(encodedRegionName)
                    + " from WALEdit");
            for (Entry entry : entries) {
              LOG.trace("Skipping : " + entry);
            }
          }
          sink.getSkippedEditsCounter().addAndGet(entries.size());
          return;
        }
        break;
      }

      if (locations.size() == 1) {
        return;
      }

      ArrayList<Future<ReplicateWALEntryResponse>> tasks =
          new ArrayList<Future<ReplicateWALEntryResponse>>(locations.size() - 1);

      // All passed entries should belong to one region because it is coming from the EntryBuffers
      // split per region. But the regions might split and merge (unlike log recovery case).
      for (int replicaId = 0; replicaId < locations.size(); replicaId++) {
        HRegionLocation location = locations.getRegionLocation(replicaId);
        if (!RegionReplicaUtil.isDefaultReplica(replicaId)) {
          HRegionInfo regionInfo =
              location == null
                  ? RegionReplicaUtil.getRegionInfoForReplica(
                      locations.getDefaultRegionLocation().getRegionInfo(), replicaId)
                  : location.getRegionInfo();
          RegionReplicaReplayCallable callable =
              new RegionReplicaReplayCallable(
                  connection,
                  rpcControllerFactory,
                  tableName,
                  location,
                  regionInfo,
                  row,
                  entries,
                  sink.getSkippedEditsCounter());
          Future<ReplicateWALEntryResponse> task =
              pool.submit(
                  new RetryingRpcCallable<ReplicateWALEntryResponse>(
                      rpcRetryingCallerFactory, callable, operationTimeout));
          tasks.add(task);
        }
      }

      boolean tasksCancelled = false;
      for (Future<ReplicateWALEntryResponse> task : tasks) {
        try {
          task.get();
        } catch (InterruptedException e) {
          throw new InterruptedIOException(e.getMessage());
        } catch (ExecutionException e) {
          Throwable cause = e.getCause();
          if (cause instanceof IOException) {
            // The table can be disabled or dropped at this time. For disabled tables, we have no
            // cheap mechanism to detect this case because meta does not contain this information.
            // HConnection.isTableDisabled() is a zk call which we cannot do for every replay RPC.
            // So instead we start the replay RPC with retries and
            // check whether the table is dropped or disabled which might cause
            // SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE.
            if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) {
              if (LOG.isTraceEnabled()) {
                LOG.trace(
                    "Skipping "
                        + entries.size()
                        + " entries in table "
                        + tableName
                        + " because received exception for dropped or disabled table",
                    cause);
                for (Entry entry : entries) {
                  LOG.trace("Skipping : " + entry);
                }
              }
              disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later.
              if (!tasksCancelled) {
                sink.getSkippedEditsCounter().addAndGet(entries.size());
                tasksCancelled = true; // so that we do not add to skipped counter again
              }
              continue;
            }
            // otherwise rethrow
            throw (IOException) cause;
          }
          // unexpected exception
          throw new IOException(cause);
        }
      }
    }
Esempio n. 23
0
 public int getPgPort(HRegionLocation loc) {
   return pgPortMap.get(loc.getHostnamePort());
 }
Esempio n. 24
0
    @Override
    public void run() {
      int failedCount = 0;
      try {
        long start = EnvironmentEdgeManager.currentTime();

        // drain all the queued puts into the tmp list
        processingList.clear();
        queue.drainTo(processingList);
        if (processingList.size() == 0) {
          // Nothing to flush
          return;
        }

        currentProcessingCount.set(processingList.size());
        // failedCount is decreased whenever a Put is success or resubmit.
        failedCount = processingList.size();

        List<Action<Row>> retainedActions = new ArrayList<>(processingList.size());
        MultiAction<Row> actions = new MultiAction<>();
        for (int i = 0; i < processingList.size(); i++) {
          PutStatus putStatus = processingList.get(i);
          Action<Row> action = new Action<Row>(putStatus.put, i);
          actions.add(putStatus.regionInfo.getRegionName(), action);
          retainedActions.add(action);
        }

        // Process this multi-put request
        List<PutStatus> failed = null;
        Object[] results = new Object[actions.size()];
        ServerName server = addr.getServerName();
        Map<ServerName, MultiAction<Row>> actionsByServer =
            Collections.singletonMap(server, actions);
        try {
          AsyncRequestFuture arf =
              ap.submitMultiActions(
                  null,
                  retainedActions,
                  0L,
                  null,
                  results,
                  true,
                  null,
                  null,
                  actionsByServer,
                  null);
          arf.waitUntilDone();
          if (arf.hasError()) {
            // We just log and ignore the exception here since failed Puts will be resubmit again.
            LOG.debug(
                "Caught some exceptions when flushing puts to region server "
                    + addr.getHostnamePort(),
                arf.getErrors());
          }
        } finally {
          for (int i = 0; i < results.length; i++) {
            if (results[i] instanceof Result) {
              failedCount--;
            } else {
              if (failed == null) {
                failed = new ArrayList<PutStatus>();
              }
              failed.add(processingList.get(i));
            }
          }
        }

        if (failed != null) {
          // Resubmit failed puts
          for (PutStatus putStatus : failed) {
            if (resubmitFailedPut(putStatus, this.addr)) {
              failedCount--;
            }
          }
        }

        long elapsed = EnvironmentEdgeManager.currentTime() - start;
        // Update latency counters
        averageLatency.add(elapsed);
        if (elapsed > maxLatency.get()) {
          maxLatency.set(elapsed);
        }

        // Log some basic info
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              "Processed "
                  + currentProcessingCount
                  + " put requests for "
                  + addr.getHostnamePort()
                  + " and "
                  + failedCount
                  + " failed"
                  + ", latency for this send: "
                  + elapsed);
        }

        // Reset the current processing put count
        currentProcessingCount.set(0);
      } catch (RuntimeException e) {
        // To make findbugs happy
        // Log all the exceptions and move on
        LOG.debug(
            "Caught some exceptions "
                + e
                + " when flushing puts to region server "
                + addr.getHostnamePort(),
            e);
      } catch (Exception e) {
        if (e instanceof InterruptedException) {
          Thread.currentThread().interrupt();
        }
        // Log all the exceptions and move on
        LOG.debug(
            "Caught some exceptions "
                + e
                + " when flushing puts to region server "
                + addr.getHostnamePort(),
            e);
      } finally {
        // Update the totalFailedCount
        this.totalFailedPutCount.addAndGet(failedCount);
      }
    }
Esempio n. 25
0
  /**
   * Get an estimate of the number of rows and bytes per row in regions between startRowKey and
   * endRowKey. The more store files there are the more this will be off. Also, this does not take
   * into account any rows that are in the memstore.
   *
   * <p>The values computed here should be cached so that in high qps workloads the nn is not
   * overwhelmed. Could be done in load(); Synchronized to make sure that only one thread at a time
   * is using the htable.
   *
   * @param startRowKey First row key in the range
   * @param endRowKey Last row key in the range
   * @return The estimated number of rows in the regions between the row keys (first) and the
   *     estimated row size in bytes (second).
   */
  public synchronized Pair<Long, Long> getEstimatedRowStats(byte[] startRowKey, byte[] endRowKey) {
    Preconditions.checkNotNull(startRowKey);
    Preconditions.checkNotNull(endRowKey);

    long rowSize = 0;
    long rowCount = 0;
    long hdfsSize = 0;
    boolean isCompressed = false;

    try {
      // Check to see if things are compressed.
      // If they are we'll estimate a compression factor.
      if (columnFamilies_ == null) {
        columnFamilies_ = hTable_.getTableDescriptor().getColumnFamilies();
      }
      Preconditions.checkNotNull(columnFamilies_);
      for (HColumnDescriptor desc : columnFamilies_) {
        isCompressed |= desc.getCompression() != Compression.Algorithm.NONE;
      }

      // For every region in the range.
      List<HRegionLocation> locations = getRegionsInRange(hTable_, startRowKey, endRowKey);
      for (HRegionLocation location : locations) {
        long currentHdfsSize = 0;
        long currentRowSize = 0;
        long currentRowCount = 0;

        HRegionInfo info = location.getRegionInfo();
        // Get the size on hdfs
        currentHdfsSize += getHdfsSize(info);

        Scan s = new Scan(info.getStartKey());
        // Get a small sample of rows
        s.setBatch(ROW_COUNT_ESTIMATE_BATCH_SIZE);
        // Try and get every version so the row's size can be used to estimate.
        s.setMaxVersions(Short.MAX_VALUE);
        // Don't cache the blocks as we don't think these are
        // necessarily important blocks.
        s.setCacheBlocks(false);
        // Try and get deletes too so their size can be counted.
        s.setRaw(true);
        ResultScanner rs = hTable_.getScanner(s);
        try {
          // And get the the ROW_COUNT_ESTIMATE_BATCH_SIZE fetched rows
          // for a representative sample
          for (int i = 0; i < ROW_COUNT_ESTIMATE_BATCH_SIZE; i++) {
            Result r = rs.next();
            if (r == null) break;
            currentRowCount += 1;
            for (KeyValue kv : r.list()) {
              // some extra row size added to make up for shared overhead
              currentRowSize +=
                  kv.getRowLength() // row key
                      + 4 // row key length field
                      + kv.getFamilyLength() // Column family bytes
                      + 4 // family length field
                      + kv.getQualifierLength() // qualifier bytes
                      + 4 // qualifier length field
                      + kv.getValueLength() // length of the value
                      + 4 // value length field
                      + 10; // extra overhead for hfile index, checksums, metadata, etc
            }
          }
          // add these values to the cumulative totals in one shot just
          // in case there was an error in between getting the hdfs
          // size and the row/column sizes.
          hdfsSize += currentHdfsSize;
          rowCount += currentRowCount;
          rowSize += currentRowSize;
        } finally {
          rs.close();
        }
      }
    } catch (IOException ioe) {
      // Print the stack trace, but we'll ignore it
      // as this is just an estimate.
      // TODO: Put this into the per query log.
      LOG.error("Error computing HBase row count estimate", ioe);
    }

    // If there are no rows then no need to estimate.
    if (rowCount == 0) return new Pair<Long, Long>(0L, 0L);

    // if something went wrong then set a signal value.
    if (rowSize <= 0 || hdfsSize <= 0) return new Pair<Long, Long>(-1L, -1L);

    // estimate the number of rows.
    double bytesPerRow = rowSize / (double) rowCount;
    long estimatedRowCount = (long) ((isCompressed ? 2 : 1) * (hdfsSize / bytesPerRow));

    return new Pair<Long, Long>(estimatedRowCount, (long) bytesPerRow);
  }
  /**
   * Looking forward to TransactionalRegion-side implementation
   *
   * @param transactionState
   * @param deletes
   * @throws IOException
   */
  public void delete(final TransactionState transactionState, List<Delete> deletes)
      throws IOException {
    long transactionId = transactionState.getTransactionId();
    if (LOG.isTraceEnabled())
      LOG.trace(
          "Enter TransactionalTable.delete[] <List> size: "
              + deletes.size()
              + ", transid: "
              + transactionId);
    // collect all rows from same region
    final Map<TransactionRegionLocation, List<Delete>> rows =
        new HashMap<TransactionRegionLocation, List<Delete>>();
    HRegionLocation hlocation = null;
    TransactionRegionLocation location = null;
    List<Delete> list = null;
    int size = 0;
    for (Delete del : deletes) {
      hlocation = this.getRegionLocation(del.getRow(), false);
      location =
          new TransactionRegionLocation(hlocation.getRegionInfo(), hlocation.getServerName());
      if (LOG.isTraceEnabled())
        LOG.trace(
            "delete <List> with trRegion ["
                + location.getRegionInfo().getEncodedName()
                + "], endKey: "
                + Hex.encodeHexString(location.getRegionInfo().getEndKey())
                + " and transaction ["
                + transactionId
                + "], delete number: "
                + size);
      if (!rows.containsKey(location)) {
        if (LOG.isTraceEnabled())
          LOG.trace(
              "delete adding new <List> for region ["
                  + location.getRegionInfo().getRegionNameAsString()
                  + "], endKey: "
                  + Hex.encodeHexString(location.getRegionInfo().getEndKey())
                  + " and transaction ["
                  + transactionId
                  + "], delete number: "
                  + size);
        list = new ArrayList<Delete>();
        rows.put(location, list);
      } else {
        list = rows.get(location);
      }
      list.add(del);
      size++;
    }

    final List<Delete> rowsInSameRegion = new ArrayList<Delete>();
    for (Map.Entry<TransactionRegionLocation, List<Delete>> entry : rows.entrySet()) {
      rowsInSameRegion.clear();
      rowsInSameRegion.addAll(entry.getValue());
      final String regionName = entry.getKey().getRegionInfo().getRegionNameAsString();

      Batch.Call<TrxRegionService, DeleteMultipleTransactionalResponse> callable =
          new Batch.Call<TrxRegionService, DeleteMultipleTransactionalResponse>() {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<DeleteMultipleTransactionalResponse> rpcCallback =
                new BlockingRpcCallback<DeleteMultipleTransactionalResponse>();

            @Override
            public DeleteMultipleTransactionalResponse call(TrxRegionService instance)
                throws IOException {
              org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos
                      .DeleteMultipleTransactionalRequest.Builder
                  builder = DeleteMultipleTransactionalRequest.newBuilder();
              builder.setTransactionId(transactionState.getTransactionId());
              builder.setRegionName(ByteString.copyFromUtf8(regionName));

              for (Delete delete : rowsInSameRegion) {
                MutationProto m1 = ProtobufUtil.toMutation(MutationType.DELETE, delete);
                builder.addDelete(m1);
              }

              instance.deleteMultiple(controller, builder.build(), rpcCallback);
              return rpcCallback.get();
            }
          };

      DeleteMultipleTransactionalResponse result = null;
      try {
        int retryCount = 0;
        boolean retry = false;
        do {
          Iterator<Map.Entry<byte[], DeleteMultipleTransactionalResponse>> it =
              super.coprocessorService(
                      TrxRegionService.class,
                      entry.getValue().get(0).getRow(),
                      entry.getValue().get(0).getRow(),
                      callable)
                  .entrySet()
                  .iterator();
          if (it.hasNext()) {
            result = it.next().getValue();
            retry = false;
          }

          if (result == null || result.getException().contains("closing region")) {
            Thread.sleep(TransactionalTable.delay);
            retry = true;
            transactionState.setRetried(true);
            retryCount++;
          }
        } while (retryCount < TransactionalTable.retries && retry == true);

      } catch (Throwable e) {
        if (LOG.isErrorEnabled()) LOG.error("ERROR while calling deleteMultipleTransactional ", e);
        throw new IOException("ERROR while calling deleteMultipleTransactional", e);
      }

      if (result == null) throw new IOException(retryErrMsg);
      else if (result.hasException()) throw new IOException(result.getException());
    }
  }