// The output table's column names always follow the left table, // where the output type is chosen based on DRILL's implicit casting rules private void inferOutputFields() { outputFields = Lists.newArrayList(); leftSchema = leftSide.getRecordBatch().getSchema(); rightSchema = rightSide.getRecordBatch().getSchema(); Iterator<MaterializedField> leftIter = leftSchema.iterator(); Iterator<MaterializedField> rightIter = rightSchema.iterator(); int index = 1; while (leftIter.hasNext() && rightIter.hasNext()) { MaterializedField leftField = leftIter.next(); MaterializedField rightField = rightIter.next(); if (hasSameTypeAndMode(leftField, rightField)) { outputFields.add(MaterializedField.create(leftField.getPath(), leftField.getType())); } else { // If the output type is not the same, // cast the column of one of the table to a data type which is the Least Restrictive MinorType outputMinorType; if (leftField.getType().getMinorType() == rightField.getType().getMinorType()) { outputMinorType = leftField.getType().getMinorType(); } else { List<MinorType> types = Lists.newLinkedList(); types.add(leftField.getType().getMinorType()); types.add(rightField.getType().getMinorType()); outputMinorType = TypeCastRules.getLeastRestrictiveType(types); if (outputMinorType == null) { throw new DrillRuntimeException( "Type mismatch between " + leftField.getType().getMinorType().toString() + " on the left side and " + rightField.getType().getMinorType().toString() + " on the right side in column " + index + " of UNION ALL"); } } // The output data mode should be as flexible as the more flexible one from the two input // tables List<DataMode> dataModes = Lists.newLinkedList(); dataModes.add(leftField.getType().getMode()); dataModes.add(rightField.getType().getMode()); DataMode dataMode = TypeCastRules.getLeastRestrictiveDataMode(dataModes); MajorType.Builder builder = MajorType.newBuilder(); builder.setMinorType(outputMinorType); builder.setMode(dataMode); outputFields.add(MaterializedField.create(leftField.getPath(), builder.build())); } ++index; } assert !leftIter.hasNext() && !rightIter.hasNext() : "Mis-match of column count should have been detected when validating sqlNode at planning"; }
public void updateColumnMetaData( String catalogName, String schemaName, String tableName, BatchSchema schema, List<Class<?>> getObjectClasses) { final List<ColumnMetaData> newColumns = new ArrayList<>(schema.getFieldCount()); for (int colOffset = 0; colOffset < schema.getFieldCount(); colOffset++) { final MaterializedField field = schema.getColumn(colOffset); Class<?> objectClass = getObjectClasses.get(colOffset); final String columnName = field.getPath(); final MajorType rpcDataType = field.getType(); final AvaticaType bundledSqlDataType = getAvaticaType(rpcDataType); final String columnClassName = objectClass.getName(); final int nullability; switch (field.getDataMode()) { case OPTIONAL: nullability = ResultSetMetaData.columnNullable; break; case REQUIRED: nullability = ResultSetMetaData.columnNoNulls; break; // Should REPEATED still map to columnNoNulls? or to columnNullable? case REPEATED: nullability = ResultSetMetaData.columnNoNulls; break; default: throw new AssertionError( "Unexpected new DataMode value '" + field.getDataMode().name() + "'"); } final boolean isSigned = Types.isJdbcSignedType(rpcDataType); // TODO(DRILL-3355): TODO(DRILL-3356): When string lengths, precisions, // interval kinds, etc., are available from RPC-level data, implement: // - precision for ResultSetMetadata.getPrecision(...) (like // getColumns()'s COLUMN_SIZE) // - scale for getScale(...), and // - and displaySize for getColumnDisplaySize(...). final int precision = rpcDataType.hasPrecision() ? rpcDataType.getPrecision() : 0; final int scale = rpcDataType.hasScale() ? rpcDataType.getScale() : 0; final int displaySize = 10; ColumnMetaData col = new ColumnMetaData( colOffset, // (zero-based ordinal (for Java arrays/lists).) false, /* autoIncrement */ false, /* caseSensitive */ true, /* searchable */ false, /* currency */ nullability, isSigned, displaySize, columnName, /* label */ columnName, /* columnName */ schemaName, precision, scale, tableName, catalogName, bundledSqlDataType, true, /* readOnly */ false, /* writable */ false, /* definitelyWritable */ columnClassName); newColumns.add(col); } columns = newColumns; }