示例#1
0
  public ServerCache addServerCache(
      ScanRanges keyRanges,
      final ImmutableBytesWritable cachePtr,
      final byte[] txState,
      final ServerCacheFactory cacheFactory,
      final TableRef cacheUsingTableRef)
      throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /** Execute EndPoint in parallel on each server to send compressed hash cache */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
      final PTable cacheUsingTable = cacheUsingTableRef.getTable();
      List<HRegionLocation> locations =
          services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
      int nRegions = locations.size();
      // Size these based on worst case
      futures = new ArrayList<Future<Boolean>>(nRegions);
      Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
      for (HRegionLocation entry : locations) {
        // Keep track of servers we've sent to and only send once
        byte[] regionStartKey = entry.getRegionInfo().getStartKey();
        byte[] regionEndKey = entry.getRegionInfo().getEndKey();
        if (!servers.contains(entry)
            && keyRanges.intersects(
                regionStartKey,
                regionEndKey,
                cacheUsingTable.getIndexType() == IndexType.LOCAL
                    ? ScanUtil.getRowKeyOffset(regionStartKey, regionEndKey)
                    : 0,
                true)) {
          // Call RPC once per server
          servers.add(entry);
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
          }
          final byte[] key = entry.getRegionInfo().getStartKey();
          final HTableInterface htable =
              services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
          closeables.add(htable);
          futures.add(
              executor.submit(
                  new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                      final Map<byte[], AddServerCacheResponse> results;
                      try {
                        results =
                            htable.coprocessorService(
                                ServerCachingService.class,
                                key,
                                key,
                                new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
                                  @Override
                                  public AddServerCacheResponse call(ServerCachingService instance)
                                      throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback =
                                        new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder =
                                        AddServerCacheRequest.newBuilder();
                                    if (connection.getTenantId() != null) {
                                      try {
                                        byte[] tenantIdBytes =
                                            ScanUtil.getTenantIdBytes(
                                                cacheUsingTable.getRowKeySchema(),
                                                cacheUsingTable.getBucketNum() != null,
                                                connection.getTenantId(),
                                                cacheUsingTable.isMultiTenant());
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                      } catch (SQLException e) {
                                        new IOException(e);
                                      }
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(
                                        org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder
                                        svrCacheFactoryBuider =
                                            ServerCacheFactoryProtos.ServerCacheFactory
                                                .newBuilder();
                                    svrCacheFactoryBuider.setClassName(
                                        cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(HBaseZeroCopyByteString.wrap(txState));
                                    instance.addServerCache(
                                        controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                      throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                  }
                                });
                      } catch (Throwable t) {
                        throw new Exception(t);
                      }
                      if (results != null && results.size() == 1) {
                        return results.values().iterator().next().getReturn();
                      }
                      return false;
                    }

                    /**
                     * Defines the grouping for round robin behavior. All threads spawned to process
                     * this scan will be grouped together and time sliced with other simultaneously
                     * executing parallel scans.
                     */
                    @Override
                    public Object getJobId() {
                      return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                      return NO_OP_INSTANCE;
                    }
                  }));
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                addCustomAnnotations(
                    "NOT adding cache entry to be sent for "
                        + entry
                        + " since one already exists for that entry",
                    connection));
          }
        }
      }

      hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
      // Execute in parallel
      int timeoutMs =
          services
              .getProps()
              .getInt(
                  QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
                  QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
      for (Future<Boolean> future : futures) {
        future.get(timeoutMs, TimeUnit.MILLISECONDS);
      }

      cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
      success = true;
    } catch (SQLException e) {
      firstException = e;
    } catch (Exception e) {
      firstException = new SQLException(e);
    } finally {
      try {
        if (!success) {
          SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
          for (Future<Boolean> future : futures) {
            future.cancel(true);
          }
        }
      } finally {
        try {
          Closeables.closeAll(closeables);
        } catch (IOException e) {
          if (firstException == null) {
            firstException = new SQLException(e);
          }
        } finally {
          if (firstException != null) {
            throw firstException;
          }
        }
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
  }
示例#2
0
  private static QueryPlan addPlan(
      PhoenixStatement statement,
      SelectStatement select,
      PTable index,
      List<? extends PDatum> targetColumns,
      ParallelIteratorFactory parallelIteratorFactory,
      QueryPlan dataPlan,
      boolean isHinted)
      throws SQLException {
    int nColumns = dataPlan.getProjector().getColumnCount();
    String tableAlias = dataPlan.getTableRef().getTableAlias();
    String alias =
        tableAlias == null
            ? null
            : '"' + tableAlias + '"'; // double quote in case it's case sensitive
    String schemaName = index.getParentSchemaName().getString();
    schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"';

    String tableName = '"' + index.getTableName().getString() + '"';
    TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName));
    SelectStatement indexSelect = FACTORY.select(select, table);
    ColumnResolver resolver =
        FromCompiler.getResolverForQuery(indexSelect, statement.getConnection());
    // We will or will not do tuple projection according to the data plan.
    boolean isProjected =
        dataPlan.getContext().getResolver().getTables().get(0).getTable().getType()
            == PTableType.PROJECTED;
    // Check index state of now potentially updated index table to make sure it's active
    if (PIndexState.ACTIVE.equals(resolver.getTables().get(0).getTable().getIndexState())) {
      try {
        // translate nodes that match expressions that are indexed to the associated column parse
        // node
        indexSelect =
            ParseNodeRewriter.rewrite(
                indexSelect,
                new IndexExpressionParseNodeRewriter(
                    index, statement.getConnection(), indexSelect.getUdfParseNodes()));
        QueryCompiler compiler =
            new QueryCompiler(
                statement,
                indexSelect,
                resolver,
                targetColumns,
                parallelIteratorFactory,
                dataPlan.getContext().getSequenceManager(),
                isProjected);

        QueryPlan plan = compiler.compile();
        // If query doesn't have where clause and some of columns to project are missing
        // in the index then we need to get missing columns from main table for each row in
        // local index. It's like full scan of both local index and data table which is inefficient.
        // Then we don't use the index. If all the columns to project are present in the index
        // then we can use the index even the query doesn't have where clause.
        if (index.getIndexType() == IndexType.LOCAL
            && indexSelect.getWhere() == null
            && !plan.getContext().getDataColumns().isEmpty()) {
          return null;
        }
        // Checking number of columns handles the wildcard cases correctly, as in that case the
        // index
        // must contain all columns from the data table to be able to be used.
        if (plan.getTableRef().getTable().getIndexState() == PIndexState.ACTIVE) {
          if (plan.getProjector().getColumnCount() == nColumns) {
            return plan;
          } else if (index.getIndexType() == IndexType.GLOBAL) {
            throw new ColumnNotFoundException("*");
          }
        }
      } catch (ColumnNotFoundException e) {
        /* Means that a column is being used that's not in our index.
         * Since we currently don't keep stats, we don't know the selectivity of the index.
         * For now, if this is a hinted plan, we will try rewriting the query as a subquery;
         * otherwise we just don't use this index (as opposed to trying to join back from
         * the index table to the data table.
         */
        SelectStatement dataSelect = (SelectStatement) dataPlan.getStatement();
        ParseNode where = dataSelect.getWhere();
        if (isHinted && where != null) {
          StatementContext context = new StatementContext(statement, resolver);
          WhereConditionRewriter whereRewriter =
              new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context);
          where = where.accept(whereRewriter);
          if (where != null) {
            PTable dataTable = dataPlan.getTableRef().getTable();
            List<PColumn> pkColumns = dataTable.getPKColumns();
            List<AliasedNode> aliasedNodes =
                Lists.<AliasedNode>newArrayListWithExpectedSize(pkColumns.size());
            List<ParseNode> nodes = Lists.<ParseNode>newArrayListWithExpectedSize(pkColumns.size());
            boolean isSalted = dataTable.getBucketNum() != null;
            boolean isTenantSpecific =
                dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null;
            int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0);
            for (int i = posOffset; i < pkColumns.size(); i++) {
              PColumn column = pkColumns.get(i);
              String indexColName = IndexUtil.getIndexColumnName(column);
              ParseNode indexColNode =
                  new ColumnParseNode(null, '"' + indexColName + '"', indexColName);
              PDataType indexColType = IndexUtil.getIndexColumnDataType(column);
              PDataType dataColType = column.getDataType();
              if (indexColType != dataColType) {
                indexColNode = FACTORY.cast(indexColNode, dataColType, null, null);
              }
              aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode));
              nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"'));
            }
            SelectStatement innerSelect =
                FACTORY.select(
                    indexSelect.getFrom(),
                    indexSelect.getHint(),
                    false,
                    aliasedNodes,
                    where,
                    null,
                    null,
                    null,
                    null,
                    null,
                    indexSelect.getBindCount(),
                    false,
                    indexSelect.hasSequence(),
                    Collections.<SelectStatement>emptyList(),
                    indexSelect.getUdfParseNodes());
            ParseNode outerWhere =
                FACTORY.in(
                    nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes),
                    FACTORY.subquery(innerSelect, false),
                    false,
                    true);
            ParseNode extractedCondition = whereRewriter.getExtractedCondition();
            if (extractedCondition != null) {
              outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition));
            }
            HintNode hint =
                HintNode.combine(
                    HintNode.subtract(
                        indexSelect.getHint(),
                        new Hint[] {Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION}),
                    FACTORY.hint("NO_INDEX"));
            SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere);
            ColumnResolver queryResolver =
                FromCompiler.getResolverForQuery(query, statement.getConnection());
            query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection());
            queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
            query = StatementNormalizer.normalize(query, queryResolver);
            QueryPlan plan =
                new QueryCompiler(
                        statement,
                        query,
                        queryResolver,
                        targetColumns,
                        parallelIteratorFactory,
                        dataPlan.getContext().getSequenceManager(),
                        isProjected)
                    .compile();
            return plan;
          }
        }
      }
    }
    return null;
  }