public ServerCache addServerCache( ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException { ConnectionQueryServices services = connection.getQueryServices(); MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength()); List<Closeable> closeables = new ArrayList<Closeable>(); closeables.add(chunk); ServerCache hashCacheSpec = null; SQLException firstException = null; final byte[] cacheId = generateId(); /** Execute EndPoint in parallel on each server to send compressed hash cache */ // TODO: generalize and package as a per region server EndPoint caller // (ideally this would be functionality provided by the coprocessor framework) boolean success = false; ExecutorService executor = services.getExecutor(); List<Future<Boolean>> futures = Collections.emptyList(); try { final PTable cacheUsingTable = cacheUsingTableRef.getTable(); List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes()); int nRegions = locations.size(); // Size these based on worst case futures = new ArrayList<Future<Boolean>>(nRegions); Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions); for (HRegionLocation entry : locations) { // Keep track of servers we've sent to and only send once byte[] regionStartKey = entry.getRegionInfo().getStartKey(); byte[] regionEndKey = entry.getRegionInfo().getEndKey(); if (!servers.contains(entry) && keyRanges.intersects( regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL ? ScanUtil.getRowKeyOffset(regionStartKey, regionEndKey) : 0, true)) { // Call RPC once per server servers.add(entry); if (LOG.isDebugEnabled()) { LOG.debug( addCustomAnnotations("Adding cache entry to be sent for " + entry, connection)); } final byte[] key = entry.getRegionInfo().getStartKey(); final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes()); closeables.add(htable); futures.add( executor.submit( new JobCallable<Boolean>() { @Override public Boolean call() throws Exception { final Map<byte[], AddServerCacheResponse> results; try { results = htable.coprocessorService( ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() { @Override public AddServerCacheResponse call(ServerCachingService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>(); AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder(); if (connection.getTenantId() != null) { try { byte[] tenantIdBytes = ScanUtil.getTenantIdBytes( cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.isMultiTenant()); builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); } catch (SQLException e) { new IOException(e); } } builder.setCacheId(ByteStringer.wrap(cacheId)); builder.setCachePtr( org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr)); ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory .newBuilder(); svrCacheFactoryBuider.setClassName( cacheFactory.getClass().getName()); builder.setCacheFactory(svrCacheFactoryBuider.build()); builder.setTxState(HBaseZeroCopyByteString.wrap(txState)); instance.addServerCache( controller, builder.build(), rpcCallback); if (controller.getFailedOn() != null) { throw controller.getFailedOn(); } return rpcCallback.get(); } }); } catch (Throwable t) { throw new Exception(t); } if (results != null && results.size() == 1) { return results.values().iterator().next().getReturn(); } return false; } /** * Defines the grouping for round robin behavior. All threads spawned to process * this scan will be grouped together and time sliced with other simultaneously * executing parallel scans. */ @Override public Object getJobId() { return ServerCacheClient.this; } @Override public TaskExecutionMetricsHolder getTaskExecutionMetric() { return NO_OP_INSTANCE; } })); } else { if (LOG.isDebugEnabled()) { LOG.debug( addCustomAnnotations( "NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection)); } } } hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength()); // Execute in parallel int timeoutMs = services .getProps() .getInt( QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); for (Future<Boolean> future : futures) { future.get(timeoutMs, TimeUnit.MILLISECONDS); } cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef); success = true; } catch (SQLException e) { firstException = e; } catch (Exception e) { firstException = new SQLException(e); } finally { try { if (!success) { SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec)); for (Future<Boolean> future : futures) { future.cancel(true); } } } finally { try { Closeables.closeAll(closeables); } catch (IOException e) { if (firstException == null) { firstException = new SQLException(e); } } finally { if (firstException != null) { throw firstException; } } } } if (LOG.isDebugEnabled()) { LOG.debug( addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection)); } return hashCacheSpec; }