/* * Builds results for a DESCRIBE <table> command. This consists of the column * definition for each column in the table. */ private static TDescribeTableResult describeTableMinimal(Table table) { TDescribeTableResult descResult = new TDescribeTableResult(); descResult.results = Lists.newArrayList(); // Get description of all the table's columns (includes partition columns). for (Column column : table.getColumnsInHiveOrder()) { TColumnValue colNameCol = new TColumnValue(); colNameCol.setString_val(column.getName()); TColumnValue dataTypeCol = new TColumnValue(); dataTypeCol.setString_val(column.getType().toString().toLowerCase()); TColumnValue commentCol = new TColumnValue(); commentCol.setString_val(column.getComment() != null ? column.getComment() : ""); descResult.results.add( new TResultRow(Lists.newArrayList(colNameCol, dataTypeCol, commentCol))); } return descResult; }
/** * Performs three final parts of the analysis: 1. Checks type compatibility between all * expressions and their targets * * <p>2. Populates partitionKeyExprs with type-compatible expressions, in Hive partition-column * order, for all partition columns * * <p>3. Populates resultExprs_ with type-compatible expressions, in Hive column order, for all * expressions in the select-list. Unmentioned columns are assigned NULL literal expressions. * * <p>If necessary, adds casts to the expressions to make them compatible with the type of the * corresponding column. * * @throws AnalysisException If an expression is not compatible with its target column */ private void prepareExpressions( List<Column> selectExprTargetColumns, List<Expr> selectListExprs, Table tbl, Analyzer analyzer) throws AnalysisException { // Temporary lists of partition key exprs and names in an arbitrary order. List<Expr> tmpPartitionKeyExprs = new ArrayList<Expr>(); List<String> tmpPartitionKeyNames = new ArrayList<String>(); int numClusteringCols = (tbl instanceof HBaseTable) ? 0 : tbl.getNumClusteringCols(); // Check dynamic partition columns for type compatibility. for (int i = 0; i < selectListExprs.size(); ++i) { Column targetColumn = selectExprTargetColumns.get(i); Expr compatibleExpr = checkTypeCompatibility(targetColumn, selectListExprs.get(i)); if (targetColumn.getPosition() < numClusteringCols) { // This is a dynamic clustering column tmpPartitionKeyExprs.add(compatibleExpr); tmpPartitionKeyNames.add(targetColumn.getName()); } selectListExprs.set(i, compatibleExpr); } // Check static partition columns, dynamic entries in partitionKeyValues will already // be in selectExprTargetColumns and therefore are ignored in this loop if (partitionKeyValues_ != null) { for (PartitionKeyValue pkv : partitionKeyValues_) { if (pkv.isStatic()) { // tableColumns is guaranteed to exist after the earlier analysis checks Column tableColumn = table_.getColumn(pkv.getColName()); Expr compatibleExpr = checkTypeCompatibility(tableColumn, pkv.getValue()); tmpPartitionKeyExprs.add(compatibleExpr); tmpPartitionKeyNames.add(pkv.getColName()); } } } // Reorder the partition key exprs and names to be consistent with the target table // declaration. We need those exprs in the original order to create the corresponding // Hdfs folder structure correctly. for (Column c : table_.getColumns()) { for (int j = 0; j < tmpPartitionKeyNames.size(); ++j) { if (c.getName().equals(tmpPartitionKeyNames.get(j))) { partitionKeyExprs_.add(tmpPartitionKeyExprs.get(j)); break; } } } Preconditions.checkState(partitionKeyExprs_.size() == numClusteringCols); // Make sure we have stats for partitionKeyExprs for (Expr expr : partitionKeyExprs_) { expr.analyze(analyzer); } // Finally, 'undo' the permutation so that the selectListExprs are in Hive column // order, and add NULL expressions to all missing columns. for (Column tblColumn : table_.getColumnsInHiveOrder()) { boolean matchFound = false; for (int i = 0; i < selectListExprs.size(); ++i) { if (selectExprTargetColumns.get(i).getName().equals(tblColumn.getName())) { resultExprs_.add(selectListExprs.get(i)); matchFound = true; break; } } // If no match is found, either the column is a clustering column with a static // value, or it was unmentioned and therefore should have a NULL select-list // expression. if (!matchFound) { if (tblColumn.getPosition() >= numClusteringCols) { // Unmentioned non-clustering columns get NULL literals with the appropriate // target type because Parquet cannot handle NULL_TYPE (IMPALA-617). resultExprs_.add(NullLiteral.create(tblColumn.getType())); } } } // TODO: Check that HBase row-key columns are not NULL? See IMPALA-406 if (needsGeneratedQueryStatement_) { // Build a query statement that returns NULL for every column List<SelectListItem> selectListItems = Lists.newArrayList(); for (Expr e : resultExprs_) { selectListItems.add(new SelectListItem(e, null)); } SelectList selectList = new SelectList(selectListItems); queryStmt_ = new SelectStmt(selectList, null, null, null, null, null, null); queryStmt_.analyze(analyzer); } }