예제 #1
0
 @Override
 public RelNode convert(RelNode rel) {
   LogicalJoin join = (LogicalJoin) rel;
   final List<RelNode> newInputs = new ArrayList<>();
   for (RelNode input : join.getInputs()) {
     if (!(input.getConvention() == getOutTrait())) {
       input = convert(input, input.getTraitSet().replace(out));
     }
     newInputs.add(input);
   }
   if (!canJoinOnCondition(join.getCondition())) {
     return null;
   }
   try {
     return new JdbcJoin(
         join.getCluster(),
         join.getTraitSet().replace(out),
         newInputs.get(0),
         newInputs.get(1),
         join.getCondition(),
         join.getVariablesSet(),
         join.getJoinType());
   } catch (InvalidRelException e) {
     LOGGER.fine(e.toString());
     return null;
   }
 }
예제 #2
0
  /**
   * Creates a relational expression that projects the given fields of the input.
   *
   * <p>Optimizes if the fields are the identity projection.
   *
   * @param relBuilder RelBuilder
   * @param child Input relational expression
   * @param posList Source of each projected field
   * @return Relational expression that projects given fields
   */
  public static RelNode createProject(
      final RelBuilder relBuilder, final RelNode child, final List<Integer> posList) {
    RelDataType rowType = child.getRowType();
    final List<String> fieldNames = rowType.getFieldNames();
    final RexBuilder rexBuilder = child.getCluster().getRexBuilder();
    return createProject(
        child,
        new AbstractList<RexNode>() {
          public int size() {
            return posList.size();
          }

          public RexNode get(int index) {
            final int pos = posList.get(index);
            return rexBuilder.makeInputRef(child, pos);
          }
        },
        new AbstractList<String>() {
          public int size() {
            return posList.size();
          }

          public String get(int index) {
            final int pos = posList.get(index);
            return fieldNames.get(pos);
          }
        },
        true,
        relBuilder);
  }
예제 #3
0
  /**
   * Infers predicates for an Aggregate.
   *
   * <p>Pulls up predicates that only contains references to columns in the GroupSet. For e.g.
   *
   * <pre>
   * childPullUpExprs : { a &gt; 7, b + c &lt; 10, a + e = 9}
   * groupSet         : { a, b}
   * pulledUpExprs    : { a &gt; 7}
   * </pre>
   */
  public RelOptPredicateList getPredicates(Aggregate agg) {
    RelNode child = agg.getInput();
    RelOptPredicateList childInfo = RelMetadataQuery.getPulledUpPredicates(child);

    List<RexNode> aggPullUpPredicates = new ArrayList<RexNode>();

    ImmutableBitSet groupKeys = agg.getGroupSet();
    Mapping m =
        Mappings.create(
            MappingType.PARTIAL_FUNCTION,
            child.getRowType().getFieldCount(),
            agg.getRowType().getFieldCount());

    int i = 0;
    for (int j : groupKeys) {
      m.set(j, i++);
    }

    for (RexNode r : childInfo.pulledUpPredicates) {
      ImmutableBitSet rCols = RelOptUtil.InputFinder.bits(r);
      if (groupKeys.contains(rCols)) {
        r = r.accept(new RexPermuteInputsShuttle(m, child));
        aggPullUpPredicates.add(r);
      }
    }
    return RelOptPredicateList.of(aggPullUpPredicates);
  }
 @Test
 public void testExplainAsXml() {
   String sql = "select 1 + 2, 3 from (values (true))";
   final RelNode rel = tester.convertSqlToRel(sql).rel;
   StringWriter sw = new StringWriter();
   PrintWriter pw = new PrintWriter(sw);
   RelXmlWriter planWriter = new RelXmlWriter(pw, SqlExplainLevel.EXPPLAN_ATTRIBUTES);
   rel.explain(planWriter);
   pw.flush();
   TestUtil.assertEqualsVerbose(
       "<RelNode type=\"LogicalProject\">\n"
           + "\t<Property name=\"EXPR$0\">\n"
           + "\t\t+(1, 2)\t</Property>\n"
           + "\t<Property name=\"EXPR$1\">\n"
           + "\t\t3\t</Property>\n"
           + "\t<Inputs>\n"
           + "\t\t<RelNode type=\"LogicalValues\">\n"
           + "\t\t\t<Property name=\"tuples\">\n"
           + "\t\t\t\t[{ true }]\t\t\t</Property>\n"
           + "\t\t\t<Inputs/>\n"
           + "\t\t</RelNode>\n"
           + "\t</Inputs>\n"
           + "</RelNode>\n",
       Util.toLinux(sw.toString()));
 }
예제 #5
0
 public static List<ExprNodeDesc> getExprNodes(
     List<Integer> inputRefs, RelNode inputRel, String inputTabAlias) {
   List<ExprNodeDesc> exprNodes = new ArrayList<ExprNodeDesc>();
   List<RexNode> rexInputRefs = getInputRef(inputRefs, inputRel);
   List<RexNode> exprs = inputRel.getChildExps();
   // TODO: Change ExprNodeConverter to be independent of Partition Expr
   ExprNodeConverter exprConv =
       new ExprNodeConverter(
           inputTabAlias,
           inputRel.getRowType(),
           new HashSet<Integer>(),
           inputRel.getCluster().getTypeFactory());
   for (int index = 0; index < rexInputRefs.size(); index++) {
     // The following check is only a guard against failures.
     // TODO: Knowing which expr is constant in GBY's aggregation function
     // arguments could be better done using Metadata provider of Calcite.
     if (exprs != null && index < exprs.size() && exprs.get(index) instanceof RexLiteral) {
       ExprNodeDesc exprNodeDesc = exprConv.visitLiteral((RexLiteral) exprs.get(index));
       exprNodes.add(exprNodeDesc);
     } else {
       RexNode iRef = rexInputRefs.get(index);
       exprNodes.add(iRef.accept(exprConv));
     }
   }
   return exprNodes;
 }
예제 #6
0
 public RelNode convert(RelNode rel) {
   final LogicalMinus minus = (LogicalMinus) rel;
   if (minus.all) {
     return null; // EXCEPT ALL not implemented
   }
   final RelTraitSet traitSet = rel.getTraitSet().replace(out);
   return new JdbcMinus(rel.getCluster(), traitSet, convertList(minus.getInputs(), out), false);
 }
예제 #7
0
    public RelNode convert(RelNode rel) {
      final LogicalFilter filter = (LogicalFilter) rel;

      return new JdbcFilter(
          rel.getCluster(),
          rel.getTraitSet().replace(out),
          convert(filter.getInput(), filter.getInput().getTraitSet().replace(out)),
          filter.getCondition());
    }
예제 #8
0
    public RelNode convert(RelNode rel) {
      final LogicalProject project = (LogicalProject) rel;

      return new JdbcProject(
          rel.getCluster(),
          rel.getTraitSet().replace(out),
          convert(project.getInput(), project.getInput().getTraitSet().replace(out)),
          project.getProjects(),
          project.getRowType());
    }
예제 #9
0
  /**
   * Infers predicates for a project.
   *
   * <ol>
   *   <li>create a mapping from input to projection. Map only positions that directly reference an
   *       input column.
   *   <li>Expressions that only contain above columns are retained in the Project's pullExpressions
   *       list.
   *   <li>For e.g. expression 'a + e = 9' below will not be pulled up because 'e' is not in the
   *       projection list.
   *       <pre>
   * childPullUpExprs:      {a &gt; 7, b + c &lt; 10, a + e = 9}
   * projectionExprs:       {a, b, c, e / 2}
   * projectionPullupExprs: {a &gt; 7, b + c &lt; 10}
   * </pre>
   * </ol>
   */
  public RelOptPredicateList getPredicates(Project project) {
    RelNode child = project.getInput();
    final RexBuilder rexBuilder = project.getCluster().getRexBuilder();
    RelOptPredicateList childInfo = RelMetadataQuery.getPulledUpPredicates(child);

    List<RexNode> projectPullUpPredicates = new ArrayList<RexNode>();

    ImmutableBitSet.Builder columnsMappedBuilder = ImmutableBitSet.builder();
    Mapping m =
        Mappings.create(
            MappingType.PARTIAL_FUNCTION,
            child.getRowType().getFieldCount(),
            project.getRowType().getFieldCount());

    for (Ord<RexNode> o : Ord.zip(project.getProjects())) {
      if (o.e instanceof RexInputRef) {
        int sIdx = ((RexInputRef) o.e).getIndex();
        m.set(sIdx, o.i);
        columnsMappedBuilder.set(sIdx);
      }
    }

    // Go over childPullUpPredicates. If a predicate only contains columns in
    // 'columnsMapped' construct a new predicate based on mapping.
    final ImmutableBitSet columnsMapped = columnsMappedBuilder.build();
    for (RexNode r : childInfo.pulledUpPredicates) {
      ImmutableBitSet rCols = RelOptUtil.InputFinder.bits(r);
      if (columnsMapped.contains(rCols)) {
        r = r.accept(new RexPermuteInputsShuttle(m, child));
        projectPullUpPredicates.add(r);
      }
    }

    // Project can also generate constants. We need to include them.
    for (Ord<RexNode> expr : Ord.zip(project.getProjects())) {
      if (RexLiteral.isNullLiteral(expr.e)) {
        projectPullUpPredicates.add(
            rexBuilder.makeCall(
                SqlStdOperatorTable.IS_NULL, rexBuilder.makeInputRef(project, expr.i)));
      } else if (RexUtil.isConstant(expr.e)) {
        final List<RexNode> args =
            ImmutableList.of(rexBuilder.makeInputRef(project, expr.i), expr.e);
        final SqlOperator op =
            args.get(0).getType().isNullable() || args.get(1).getType().isNullable()
                ? SqlStdOperatorTable.IS_NOT_DISTINCT_FROM
                : SqlStdOperatorTable.EQUALS;
        projectPullUpPredicates.add(rexBuilder.makeCall(op, args));
      }
    }
    return RelOptPredicateList.of(projectPullUpPredicates);
  }
예제 #10
0
    public RelNode convert(RelNode rel) {
      final LogicalCalc calc = (LogicalCalc) rel;

      // If there's a multiset, let FarragoMultisetSplitter work on it
      // first.
      if (RexMultisetUtil.containsMultiset(calc.getProgram())) {
        return null;
      }

      return new JdbcCalc(
          rel.getCluster(),
          rel.getTraitSet().replace(out),
          convert(calc.getInput(), calc.getTraitSet().replace(out)),
          calc.getProgram());
    }
예제 #11
0
  /**
   * Get top level select starting from root. Assumption here is root can only be Sort & Project.
   * Also the top project should be at most 2 levels below Sort; i.e Sort(Limit)-Sort(OB)-Select
   *
   * @param rootRel
   * @return
   */
  public static Pair<RelNode, RelNode> getTopLevelSelect(final RelNode rootRel) {
    RelNode tmpRel = rootRel;
    RelNode parentOforiginalProjRel = rootRel;
    HiveProject originalProjRel = null;

    while (tmpRel != null) {
      if (tmpRel instanceof HiveProject) {
        originalProjRel = (HiveProject) tmpRel;
        break;
      }
      parentOforiginalProjRel = tmpRel;
      tmpRel = tmpRel.getInput(0);
    }

    return (new Pair<RelNode, RelNode>(parentOforiginalProjRel, originalProjRel));
  }
예제 #12
0
 public static ImmutableList<RexNode> getInputRef(List<Integer> inputRefs, RelNode inputRel) {
   ImmutableList.Builder<RexNode> bldr = ImmutableList.<RexNode>builder();
   for (int i : inputRefs) {
     bldr.add(new RexInputRef(i, inputRel.getRowType().getFieldList().get(i).getType()));
   }
   return bldr.build();
 }
예제 #13
0
  /** Appy Join Order Optimizations using Hep Planner. */
  private RelNode getLoptJoinOrderTree(
      RelNode root,
      Class<? extends Join> joinClass,
      RelFactories.JoinFactory joinFactory,
      RelFactories.FilterFactory filterFactory,
      RelFactories.ProjectFactory projectFactory) {
    final HepProgramBuilder hepPgmBldr =
        new HepProgramBuilder()
            .addMatchOrder(HepMatchOrder.BOTTOM_UP)
            .addRuleInstance(new JoinToMultiJoinRule(joinClass))
            .addRuleInstance(new LoptOptimizeJoinRule(joinFactory, projectFactory, filterFactory))
            .addRuleInstance(ProjectRemoveRule.INSTANCE);
    // .addRuleInstance(new ProjectMergeRule(true, projectFactory));

    // .addRuleInstance(DrillMergeProjectRule.getInstance(true, projectFactory,
    // this.context.getFunctionRegistry()));

    final HepProgram hepPgm = hepPgmBldr.build();
    final HepPlanner hepPlanner = new HepPlanner(hepPgm);

    final List<RelMetadataProvider> list = Lists.newArrayList();
    list.add(DrillDefaultRelMetadataProvider.INSTANCE);
    hepPlanner.registerMetadataProviders(list);
    final RelMetadataProvider cachingMetaDataProvider =
        new CachingRelMetadataProvider(ChainedRelMetadataProvider.of(list), hepPlanner);

    // Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
    root.accept(new MetaDataProviderModifier(cachingMetaDataProvider));

    hepPlanner.setRoot(root);

    RelNode calciteOptimizedPlan = hepPlanner.findBestExp();

    return calciteOptimizedPlan;
  }
예제 #14
0
  public static List<String> getFieldNames(List<Integer> inputRefs, RelNode inputRel) {
    List<String> fieldNames = new ArrayList<String>();
    List<String> schemaNames = inputRel.getRowType().getFieldNames();
    for (Integer iRef : inputRefs) {
      fieldNames.add(schemaNames.get(iRef));
    }

    return fieldNames;
  }
예제 #15
0
 public RelNode convert(RelNode rel) {
   final LogicalIntersect intersect = (LogicalIntersect) rel;
   if (intersect.all) {
     return null; // INTERSECT ALL not implemented
   }
   final RelTraitSet traitSet = intersect.getTraitSet().replace(out);
   return new JdbcIntersect(
       rel.getCluster(), traitSet, convertList(intersect.getInputs(), out), false);
 }
예제 #16
0
 /** Creates a SemiJoin. */
 public static SemiJoin create(
     RelNode left,
     RelNode right,
     RexNode condition,
     ImmutableIntList leftKeys,
     ImmutableIntList rightKeys) {
   final RelOptCluster cluster = left.getCluster();
   return new SemiJoin(
       cluster, cluster.traitSetOf(Convention.NONE), left, right, condition, leftKeys, rightKeys);
 }
예제 #17
0
 public RelNode convert(RelNode rel) {
   final Sort sort = (Sort) rel;
   if (sort.offset != null || sort.fetch != null) {
     // Cannot implement "OFFSET n FETCH n" currently.
     return null;
   }
   final RelTraitSet traitSet = sort.getTraitSet().replace(out);
   return new JdbcSort(
       rel.getCluster(), traitSet, convert(sort.getInput(), traitSet), sort.getCollation());
 }
예제 #18
0
  private RelNode convertToRel(SqlNode node) throws RelConversionException {
    final RelNode convertedNode = planner.convert(node);

    final RelMetadataProvider provider = convertedNode.getCluster().getMetadataProvider();

    // Register RelMetadataProvider with HepPlanner.
    final List<RelMetadataProvider> list = Lists.newArrayList(provider);
    hepPlanner.registerMetadataProviders(list);
    final RelMetadataProvider cachingMetaDataProvider =
        new CachingRelMetadataProvider(ChainedRelMetadataProvider.of(list), hepPlanner);
    convertedNode.accept(new MetaDataProviderModifier(cachingMetaDataProvider));

    // HepPlanner is specifically used for Window Function planning only.
    hepPlanner.setRoot(convertedNode);
    RelNode rel = hepPlanner.findBestExp();

    rel.accept(new MetaDataProviderModifier(provider));
    return rel;
  }
예제 #19
0
  public static ExprNodeDesc getExprNode(
      Integer inputRefIndx, RelNode inputRel, ExprNodeConverter exprConv) {
    ExprNodeDesc exprNode = null;
    RexNode rexInputRef =
        new RexInputRef(
            inputRefIndx, inputRel.getRowType().getFieldList().get(inputRefIndx).getType());
    exprNode = rexInputRef.accept(exprConv);

    return exprNode;
  }
예제 #20
0
  public static RexNode convert(
      RelOptCluster cluster,
      ExprNodeDesc joinCondnExprNode,
      List<RelNode> inputRels,
      LinkedHashMap<RelNode, RowResolver> relToHiveRR,
      Map<RelNode, ImmutableMap<String, Integer>> relToHiveColNameCalcitePosMap,
      boolean flattenExpr)
      throws SemanticException {
    List<InputCtx> inputCtxLst = new ArrayList<InputCtx>();

    int offSet = 0;
    for (RelNode r : inputRels) {
      inputCtxLst.add(
          new InputCtx(
              r.getRowType(), relToHiveColNameCalcitePosMap.get(r), relToHiveRR.get(r), offSet));
      offSet += r.getRowType().getFieldCount();
    }

    return (new RexNodeConverter(cluster, inputCtxLst, flattenExpr)).convert(joinCondnExprNode);
  }
예제 #21
0
 private List<Double> averageJoinColumnSizes(Join rel, RelMetadataQuery mq, boolean semijoin) {
   final RelNode left = rel.getLeft();
   final RelNode right = rel.getRight();
   final List<Double> lefts = mq.getAverageColumnSizes(left);
   final List<Double> rights = semijoin ? null : mq.getAverageColumnSizes(right);
   if (lefts == null && rights == null) {
     return null;
   }
   final int fieldCount = rel.getRowType().getFieldCount();
   Double[] sizes = new Double[fieldCount];
   if (lefts != null) {
     lefts.toArray(sizes);
   }
   if (rights != null) {
     final int leftCount = left.getRowType().getFieldCount();
     for (int i = 0; i < rights.size(); i++) {
       sizes[leftCount + i] = rights.get(i);
     }
   }
   return ImmutableNullableList.copyOf(sizes);
 }
 public Double getDistinctRowCount(Union rel, ImmutableBitSet groupKey, RexNode predicate) {
   Double rowCount = 0.0;
   int[] adjustments = new int[rel.getRowType().getFieldCount()];
   RexBuilder rexBuilder = rel.getCluster().getRexBuilder();
   for (RelNode input : rel.getInputs()) {
     // convert the predicate to reference the types of the union child
     RexNode modifiedPred;
     if (predicate == null) {
       modifiedPred = null;
     } else {
       modifiedPred =
           predicate.accept(
               new RelOptUtil.RexInputConverter(
                   rexBuilder, null, input.getRowType().getFieldList(), adjustments));
     }
     Double partialRowCount = RelMetadataQuery.getDistinctRowCount(input, groupKey, modifiedPred);
     if (partialRowCount == null) {
       return null;
     }
     rowCount += partialRowCount;
   }
   return rowCount;
 }
예제 #23
0
 public static List<RexNode> getProjsFromBelowAsInputRef(final RelNode rel) {
   List<RexNode> projectList =
       Lists.transform(
           rel.getRowType().getFieldList(),
           new Function<RelDataTypeField, RexNode>() {
             @Override
             public RexNode apply(RelDataTypeField field) {
               return rel.getCluster()
                   .getRexBuilder()
                   .makeInputRef(field.getType(), field.getIndex());
             }
           });
   return projectList;
 }
예제 #24
0
 /**
  * Catch-all implementation for {@link BuiltInMetadata.Size#averageRowSize()}, invoked using
  * reflection.
  *
  * @see org.apache.calcite.rel.metadata.RelMetadataQuery#getAverageRowSize
  */
 public Double averageRowSize(RelNode rel, RelMetadataQuery mq) {
   final List<Double> averageColumnSizes = mq.getAverageColumnSizes(rel);
   if (averageColumnSizes == null) {
     return null;
   }
   Double d = 0d;
   final List<RelDataTypeField> fields = rel.getRowType().getFieldList();
   for (Pair<Double, RelDataTypeField> p : Pair.zip(averageColumnSizes, fields)) {
     if (p.left == null) {
       d += averageFieldValueSize(p.right);
     } else {
       d += p.left;
     }
   }
   return d;
 }
예제 #25
0
  private RelNode preprocessNode(RelNode rel) throws SqlUnsupportedException {
    /*
     * Traverse the tree to do the following pre-processing tasks: 1. replace the convert_from, convert_to function to
     * actual implementations Eg: convert_from(EXPR, 'JSON') be converted to convert_fromjson(EXPR); TODO: Ideally all
     * function rewrites would move here instead of DrillOptiq.
     *
     * 2. see where the tree contains unsupported functions; throw SqlUnsupportedException if there is any.
     */

    PreProcessLogicalRel visitor =
        PreProcessLogicalRel.createVisitor(
            planner.getTypeFactory(), context.getDrillOperatorTable());
    try {
      rel = rel.accept(visitor);
    } catch (UnsupportedOperationException ex) {
      visitor.convertException();
      throw ex;
    }

    return rel;
  }
예제 #26
0
 public JdbcTableModify(
     RelOptCluster cluster,
     RelTraitSet traitSet,
     RelOptTable table,
     Prepare.CatalogReader catalogReader,
     RelNode input,
     Operation operation,
     List<String> updateColumnList,
     boolean flattened) {
   super(cluster, traitSet, table, catalogReader, input, operation, updateColumnList, flattened);
   assert input.getConvention() instanceof JdbcConvention;
   assert getConvention() instanceof JdbcConvention;
   final ModifiableTable modifiableTable = table.unwrap(ModifiableTable.class);
   if (modifiableTable == null) {
     throw new AssertionError(); // TODO: user error in validator
   }
   this.expression = table.getExpression(Queryable.class);
   if (expression == null) {
     throw new AssertionError(); // TODO: user error in validator
   }
 }
예제 #27
0
  /**
   * Do logical planning using both VolcanoPlanner and LOPT HepPlanner.
   *
   * @param relNode
   * @return
   * @throws RelConversionException
   * @throws SqlUnsupportedException
   */
  private RelNode logicalPlanningVolcanoAndLopt(RelNode relNode)
      throws RelConversionException, SqlUnsupportedException {

    final RelNode convertedRelNode =
        planner.transform(
            DrillSqlWorker.LOGICAL_CONVERT_RULES,
            relNode.getTraitSet().plus(DrillRel.DRILL_LOGICAL),
            relNode);
    log("VolCalciteRel", convertedRelNode, logger);

    final RelNode loptNode =
        getLoptJoinOrderTree(
            convertedRelNode,
            DrillJoinRel.class,
            DrillRelFactories.DRILL_LOGICAL_JOIN_FACTORY,
            DrillRelFactories.DRILL_LOGICAL_FILTER_FACTORY,
            DrillRelFactories.DRILL_LOGICAL_PROJECT_FACTORY);

    log("HepCalciteRel", loptNode, logger);

    return loptNode;
  }
예제 #28
0
 public RelNode convert(RelNode rel) {
   final LogicalAggregate agg = (LogicalAggregate) rel;
   if (agg.getGroupSets().size() != 1) {
     // GROUPING SETS not supported; see
     // [CALCITE-734] Push GROUPING SETS to underlying SQL via JDBC adapter
     return null;
   }
   final RelTraitSet traitSet = agg.getTraitSet().replace(out);
   try {
     return new JdbcAggregate(
         rel.getCluster(),
         traitSet,
         convert(agg.getInput(), out),
         agg.indicator,
         agg.getGroupSet(),
         agg.getGroupSets(),
         agg.getAggCallList());
   } catch (InvalidRelException e) {
     LOGGER.fine(e.toString());
     return null;
   }
 }
 public void visit(RelNode node, int ordinal, RelNode parent) {
   if (!node.isValid(true)) {
     ++invalidCount;
   }
   super.visit(node, ordinal, parent);
 }
예제 #30
0
  /**
   * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for {@link
   * org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin}.
   */
  public TrimResult trimFields(
      HiveMultiJoin join, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
    final int fieldCount = join.getRowType().getFieldCount();
    final RexNode conditionExpr = join.getCondition();

    // Add in fields used in the condition.
    final Set<RelDataTypeField> combinedInputExtraFields =
        new LinkedHashSet<RelDataTypeField>(extraFields);
    RelOptUtil.InputFinder inputFinder = new RelOptUtil.InputFinder(combinedInputExtraFields);
    inputFinder.inputBitSet.addAll(fieldsUsed);
    conditionExpr.accept(inputFinder);
    final ImmutableBitSet fieldsUsedPlus = inputFinder.inputBitSet.build();

    int inputStartPos = 0;
    int changeCount = 0;
    int newFieldCount = 0;
    List<RelNode> newInputs = new ArrayList<RelNode>();
    List<Mapping> inputMappings = new ArrayList<Mapping>();
    for (RelNode input : join.getInputs()) {
      final RelDataType inputRowType = input.getRowType();
      final int inputFieldCount = inputRowType.getFieldCount();

      // Compute required mapping.
      ImmutableBitSet.Builder inputFieldsUsed = ImmutableBitSet.builder();
      for (int bit : fieldsUsedPlus) {
        if (bit >= inputStartPos && bit < inputStartPos + inputFieldCount) {
          inputFieldsUsed.set(bit - inputStartPos);
        }
      }

      Set<RelDataTypeField> inputExtraFields = Collections.<RelDataTypeField>emptySet();
      TrimResult trimResult = trimChild(join, input, inputFieldsUsed.build(), inputExtraFields);
      newInputs.add(trimResult.left);
      if (trimResult.left != input) {
        ++changeCount;
      }

      final Mapping inputMapping = trimResult.right;
      inputMappings.add(inputMapping);

      // Move offset to point to start of next input.
      inputStartPos += inputFieldCount;
      newFieldCount += inputMapping.getTargetCount();
    }

    Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, fieldCount, newFieldCount);
    int offset = 0;
    int newOffset = 0;
    for (int i = 0; i < inputMappings.size(); i++) {
      Mapping inputMapping = inputMappings.get(i);
      for (IntPair pair : inputMapping) {
        mapping.set(pair.source + offset, pair.target + newOffset);
      }
      offset += inputMapping.getSourceCount();
      newOffset += inputMapping.getTargetCount();
    }

    if (changeCount == 0 && mapping.isIdentity()) {
      return new TrimResult(join, Mappings.createIdentity(fieldCount));
    }

    // Build new join.
    final RexVisitor<RexNode> shuttle =
        new RexPermuteInputsShuttle(mapping, newInputs.toArray(new RelNode[newInputs.size()]));
    RexNode newConditionExpr = conditionExpr.accept(shuttle);

    final RelDataType newRowType =
        RelOptUtil.permute(join.getCluster().getTypeFactory(), join.getRowType(), mapping);
    final RelNode newJoin =
        new HiveMultiJoin(
            join.getCluster(),
            newInputs,
            newConditionExpr,
            newRowType,
            join.getJoinInputs(),
            join.getJoinTypes(),
            join.getJoinFilters());

    return new TrimResult(newJoin, mapping);
  }