/**
   * Returns the type the row which results when two relations are joined.
   *
   * <p>The resulting row type consists of the system fields (if any), followed by the fields of the
   * left type, followed by the fields of the right type. The field name list, if present, overrides
   * the original names of the fields.
   *
   * @param typeFactory Type factory
   * @param leftType Type of left input to join
   * @param rightType Type of right input to join
   * @param fieldNameList If not null, overrides the original names of the fields
   * @param systemFieldList List of system fields that will be prefixed to output row type;
   *     typically empty but must not be null
   * @return type of row which results when two relations are joined
   */
  public static RelDataType createJoinType(
      RelDataTypeFactory typeFactory,
      RelDataType leftType,
      RelDataType rightType,
      List<String> fieldNameList,
      List<RelDataTypeField> systemFieldList) {
    assert (fieldNameList == null)
        || (fieldNameList.size()
            == (systemFieldList.size() + leftType.getFieldCount() + rightType.getFieldCount()));
    List<String> nameList = new ArrayList<>();
    final List<RelDataType> typeList = new ArrayList<>();

    // Use a set to keep track of the field names; this is needed
    // to ensure that the contains() call to check for name uniqueness
    // runs in constant time; otherwise, if the number of fields is large,
    // doing a contains() on a list can be expensive.
    final Set<String> uniqueNameList =
        typeFactory.getTypeSystem().isSchemaCaseSensitive()
            ? new HashSet<String>()
            : new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
    addFields(systemFieldList, typeList, nameList, uniqueNameList);
    addFields(leftType.getFieldList(), typeList, nameList, uniqueNameList);
    if (rightType != null) {
      addFields(rightType.getFieldList(), typeList, nameList, uniqueNameList);
    }
    if (fieldNameList != null) {
      assert fieldNameList.size() == nameList.size();
      nameList = fieldNameList;
    }
    return typeFactory.createStructType(typeList, nameList);
  }
 public Type getJavaClass(RelDataType type) {
   if (type instanceof JavaType) {
     JavaType javaType = (JavaType) type;
     return javaType.getJavaClass();
   }
   if (type.isStruct() && type.getFieldCount() == 1) {
     return getJavaClass(type.getFieldList().get(0).getType());
   }
   if (type instanceof BasicSqlType || type instanceof IntervalSqlType) {
     switch (type.getSqlTypeName()) {
       case VARCHAR:
       case CHAR:
         return String.class;
       case DATE:
       case TIME:
       case INTEGER:
       case INTERVAL_YEAR_MONTH:
         return type.isNullable() ? Integer.class : int.class;
       case TIMESTAMP:
       case BIGINT:
       case INTERVAL_DAY_TIME:
         return type.isNullable() ? Long.class : long.class;
       case SMALLINT:
         return type.isNullable() ? Short.class : short.class;
       case TINYINT:
         return type.isNullable() ? Byte.class : byte.class;
       case DECIMAL:
         return BigDecimal.class;
       case BOOLEAN:
         return type.isNullable() ? Boolean.class : boolean.class;
       case DOUBLE:
       case FLOAT: // sic
         return type.isNullable() ? Double.class : double.class;
       case REAL:
         return type.isNullable() ? Float.class : float.class;
       case BINARY:
       case VARBINARY:
         return ByteString.class;
       case ARRAY:
         return Array.class;
       case ANY:
         return Object.class;
     }
   }
   switch (type.getSqlTypeName()) {
     case ROW:
       assert type instanceof RelRecordType;
       if (type instanceof JavaRecordType) {
         return ((JavaRecordType) type).clazz;
       } else {
         return createSyntheticType((RelRecordType) type);
       }
     case MAP:
       return Map.class;
     case ARRAY:
     case MULTISET:
       return List.class;
   }
   return null;
 }
  /**
   * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for {@link
   * org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin}.
   */
  public TrimResult trimFields(
      HiveMultiJoin join, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
    final int fieldCount = join.getRowType().getFieldCount();
    final RexNode conditionExpr = join.getCondition();

    // Add in fields used in the condition.
    final Set<RelDataTypeField> combinedInputExtraFields =
        new LinkedHashSet<RelDataTypeField>(extraFields);
    RelOptUtil.InputFinder inputFinder = new RelOptUtil.InputFinder(combinedInputExtraFields);
    inputFinder.inputBitSet.addAll(fieldsUsed);
    conditionExpr.accept(inputFinder);
    final ImmutableBitSet fieldsUsedPlus = inputFinder.inputBitSet.build();

    int inputStartPos = 0;
    int changeCount = 0;
    int newFieldCount = 0;
    List<RelNode> newInputs = new ArrayList<RelNode>();
    List<Mapping> inputMappings = new ArrayList<Mapping>();
    for (RelNode input : join.getInputs()) {
      final RelDataType inputRowType = input.getRowType();
      final int inputFieldCount = inputRowType.getFieldCount();

      // Compute required mapping.
      ImmutableBitSet.Builder inputFieldsUsed = ImmutableBitSet.builder();
      for (int bit : fieldsUsedPlus) {
        if (bit >= inputStartPos && bit < inputStartPos + inputFieldCount) {
          inputFieldsUsed.set(bit - inputStartPos);
        }
      }

      Set<RelDataTypeField> inputExtraFields = Collections.<RelDataTypeField>emptySet();
      TrimResult trimResult = trimChild(join, input, inputFieldsUsed.build(), inputExtraFields);
      newInputs.add(trimResult.left);
      if (trimResult.left != input) {
        ++changeCount;
      }

      final Mapping inputMapping = trimResult.right;
      inputMappings.add(inputMapping);

      // Move offset to point to start of next input.
      inputStartPos += inputFieldCount;
      newFieldCount += inputMapping.getTargetCount();
    }

    Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, fieldCount, newFieldCount);
    int offset = 0;
    int newOffset = 0;
    for (int i = 0; i < inputMappings.size(); i++) {
      Mapping inputMapping = inputMappings.get(i);
      for (IntPair pair : inputMapping) {
        mapping.set(pair.source + offset, pair.target + newOffset);
      }
      offset += inputMapping.getSourceCount();
      newOffset += inputMapping.getTargetCount();
    }

    if (changeCount == 0 && mapping.isIdentity()) {
      return new TrimResult(join, Mappings.createIdentity(fieldCount));
    }

    // Build new join.
    final RexVisitor<RexNode> shuttle =
        new RexPermuteInputsShuttle(mapping, newInputs.toArray(new RelNode[newInputs.size()]));
    RexNode newConditionExpr = conditionExpr.accept(shuttle);

    final RelDataType newRowType =
        RelOptUtil.permute(join.getCluster().getTypeFactory(), join.getRowType(), mapping);
    final RelNode newJoin =
        new HiveMultiJoin(
            join.getCluster(),
            newInputs,
            newConditionExpr,
            newRowType,
            join.getJoinInputs(),
            join.getJoinTypes(),
            join.getJoinFilters());

    return new TrimResult(newJoin, mapping);
  }