@Override public void analyze(Analyzer analyzer) throws AnalysisException, AuthorizationException { super.analyze(analyzer); Table t = getTargetTable(); String tableName = getDb() + "." + getTbl(); // Verify there are no conflicts with partition columns. for (FieldSchema fs : t.getMetaStoreTable().getPartitionKeys()) { if (fs.getName().toLowerCase().equals(colName.toLowerCase())) { throw new AnalysisException("Cannot modify partition column: " + colName); } if (fs.getName().toLowerCase().equals(newColDef.getColName().toLowerCase())) { throw new AnalysisException( "Column name conflicts with existing partition column: " + newColDef.getColName()); } } // Verify the column being modified exists in the table if (t.getColumn(colName) == null) { throw new AnalysisException( String.format("Column '%s' does not exist in table: %s", colName, tableName)); } // Check that the new column def's name is valid. newColDef.analyze(); // Verify that if the column name is being changed, the new name doesn't conflict // with an existing column. if (!colName.toLowerCase().equals(newColDef.getColName().toLowerCase()) && t.getColumn(newColDef.getColName()) != null) { throw new AnalysisException("Column already exists: " + newColDef.getColName()); } }
/* * Builds a TDescribeTableResult that contains the result of a DESCRIBE FORMATTED * <table> command. For the formatted describe output the goal is to be exactly the * same as what Hive (via HiveServer2) outputs, for compatibility reasons. To do this, * Hive's MetadataFormatUtils class is used to build the results. */ private static TDescribeTableResult describeTableFormatted(Table table) { TDescribeTableResult descResult = new TDescribeTableResult(); descResult.results = Lists.newArrayList(); org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable().deepCopy(); // Fixup the metastore table so the output of DESCRIBE FORMATTED matches Hive's. // This is to distinguish between empty comments and no comments (value is null). for (FieldSchema fs : msTable.getSd().getCols()) fs.setComment(table.getColumn(fs.getName()).getComment()); for (FieldSchema fs : msTable.getPartitionKeys()) { fs.setComment(table.getColumn(fs.getName()).getComment()); } // To avoid initializing any of the SerDe classes in the metastore table Thrift // struct, create the ql.metadata.Table object by calling the empty c'tor and // then calling setTTable(). org.apache.hadoop.hive.ql.metadata.Table hiveTable = new org.apache.hadoop.hive.ql.metadata.Table(); hiveTable.setTTable(msTable); StringBuilder sb = new StringBuilder(); // First add all the columns (includes partition columns). sb.append( MetaDataFormatUtils.getAllColumnsInformation( msTable.getSd().getCols(), msTable.getPartitionKeys())); // Add the extended table metadata information. sb.append(MetaDataFormatUtils.getTableInformation(hiveTable)); for (String line : sb.toString().split("\n")) { // To match Hive's HiveServer2 output, split each line into multiple column // values based on the field delimiter. String[] columns = line.split(MetaDataFormatUtils.FIELD_DELIM); TResultRow resultRow = new TResultRow(); for (int i = 0; i < NUM_DESC_FORMATTED_RESULT_COLS; ++i) { TColumnValue colVal = new TColumnValue(); colVal.setString_val(null); if (columns.length > i) { // Add the column value. colVal.setString_val(columns[i]); } resultRow.addToColVals(colVal); } descResult.results.add(resultRow); } return descResult; }