示例#1
0
  /** Compute file paths and key values based on key ranges. */
  @Override
  public void finalize(Analyzer analyzer) throws InternalException {
    Preconditions.checkNotNull(keyRanges);

    for (HdfsPartition p : tbl.getPartitions()) {
      if (p.getFileDescriptors().size() == 0) {
        // No point scanning partitions that have no data
        continue;
      }

      Preconditions.checkState(p.getPartitionValues().size() == tbl.getNumClusteringCols());
      // check partition key values against key ranges, if set
      Preconditions.checkState(keyRanges.size() == p.getPartitionValues().size());
      boolean matchingPartition = true;
      for (int i = 0; i < keyRanges.size(); ++i) {
        ValueRange keyRange = keyRanges.get(i);
        if (keyRange != null && !keyRange.isInRange(analyzer, p.getPartitionValues().get(i))) {
          matchingPartition = false;
          break;
        }
      }
      if (!matchingPartition) {
        // skip this partition, it's outside the key ranges
        continue;
      }
      // HdfsPartition is immutable, so it's ok to copy by reference
      partitions.add(p);
    }
  }
示例#2
0
  @Test
  public void TestPartitions() throws TableLoadingException {
    HdfsTable table = (HdfsTable) catalog_.getDb("functional").getTable("AllTypes");
    List<HdfsPartition> partitions = table.getPartitions();

    // check that partition keys cover the date range 1/1/2009-12/31/2010
    // and that we have one file per partition, plus the default partition
    assertEquals(25, partitions.size());
    Set<Long> months = Sets.newHashSet();
    for (HdfsPartition p : partitions) {
      if (p.getId() == DEFAULT_PARTITION_ID) {
        continue;
      }

      assertEquals(2, p.getPartitionValues().size());

      LiteralExpr key1Expr = p.getPartitionValues().get(0);
      assertTrue(key1Expr instanceof IntLiteral);
      long key1 = ((IntLiteral) key1Expr).getValue();
      assertTrue(key1 == 2009 || key1 == 2010);

      LiteralExpr key2Expr = p.getPartitionValues().get(1);
      assertTrue(key2Expr instanceof IntLiteral);
      long key2 = ((IntLiteral) key2Expr).getValue();
      assertTrue(key2 >= 1 && key2 <= 12);

      months.add(key1 * 100 + key2);

      assertEquals(p.getFileDescriptors().size(), 1);
    }
    assertEquals(months.size(), 24);
  }
示例#3
0
 // This table has metadata set so the escape is \n, which is also the tuple delim. This
 // test validates that our representation of the catalog fixes this and removes the
 // escape char.
 @Test
 public void TestTableWithBadEscapeChar() throws TableLoadingException {
   HdfsTable table = (HdfsTable) catalog_.getDb("functional").getTable("escapechartesttable");
   List<HdfsPartition> partitions = table.getPartitions();
   for (HdfsPartition p : partitions) {
     HdfsStorageDescriptor desc = p.getInputFormatDescriptor();
     assertEquals(desc.getEscapeChar(), HdfsStorageDescriptor.DEFAULT_ESCAPE_CHAR);
   }
 }
示例#4
0
  /** Return scan ranges (hdfs splits) plus their storage locations, including volume ids. */
  @Override
  public List<TScanRangeLocations> getScanRangeLocations(long maxScanRangeLength) {
    List<TScanRangeLocations> result = Lists.newArrayList();
    List<HdfsTable.BlockMetadata> blockMetadata = HdfsTable.getBlockMetadata(partitions);
    for (HdfsTable.BlockMetadata block : blockMetadata) {
      // collect all locations for block
      String[] blockHostPorts = null;
      try {
        // Use getNames() to get port number as well
        blockHostPorts = block.getLocation().getNames();
        // uncomment if you need to see detailed block locations
        // LOG.info(Arrays.toString(blockHostPorts));
      } catch (IOException e) {
        // this shouldn't happen, getHosts() doesn't throw anything
        String errorMsg = "BlockLocation.getHosts() failed:\n" + e.getMessage();
        LOG.error(errorMsg);
        throw new IllegalStateException(errorMsg);
      }

      if (blockHostPorts.length == 0) {
        // we didn't get locations for this block; for now, just ignore the block
        // TODO: do something meaningful with that
        continue;
      }

      // record host/ports and volume ids
      Preconditions.checkState(blockHostPorts.length > 0);
      List<TScanRangeLocation> locations = Lists.newArrayList();
      for (int i = 0; i < blockHostPorts.length; ++i) {
        TScanRangeLocation location = new TScanRangeLocation();
        String hostPort = blockHostPorts[i];
        location.setServer(addressToTHostPort(hostPort));
        location.setVolume_id(block.getVolumeId(i));
        locations.add(location);
      }

      // create scan ranges, taking into account maxScanRangeLength
      BlockLocation blockLocation = block.getLocation();
      long currentOffset = blockLocation.getOffset();
      long remainingLength = blockLocation.getLength();
      while (remainingLength > 0) {
        long currentLength = remainingLength;
        if (maxScanRangeLength > 0 && remainingLength > maxScanRangeLength) {
          currentLength = maxScanRangeLength;
        }
        TScanRange scanRange = new TScanRange();
        scanRange.setHdfs_file_split(
            new THdfsFileSplit(
                block.getFileName(), currentOffset, currentLength, block.getPartition().getId()));
        TScanRangeLocations scanRangeLocations = new TScanRangeLocations();
        scanRangeLocations.scan_range = scanRange;
        scanRangeLocations.locations = locations;
        result.add(scanRangeLocations);
        remainingLength -= currentLength;
        currentOffset += currentLength;
      }
    }
    return result;
  }
 /** Adds the partition to its HdfsTable. Returns the table with an updated catalog version. */
 public Table addPartition(HdfsPartition partition) throws CatalogException {
   Preconditions.checkNotNull(partition);
   HdfsTable hdfsTable = partition.getTable();
   Db db = getDb(hdfsTable.getDb().getName());
   // Locking the catalog here because this accesses the hdfsTable's partition list and
   // updates its catalog version.
   // TODO: Fix this locking pattern.
   catalogLock_.writeLock().lock();
   try {
     hdfsTable.addPartition(partition);
     hdfsTable.setCatalogVersion(incrementAndGetCatalogVersion());
     db.addTable(hdfsTable);
   } finally {
     catalogLock_.writeLock().unlock();
   }
   return hdfsTable;
 }
 /**
  * Drops the partition from its HdfsTable. If the HdfsTable does not exist, an exception is
  * thrown. If the partition having the given partition spec does not exist, null is returned.
  * Otherwise, the table with an updated catalog version is returned.
  */
 public Table dropPartition(TableName tableName, List<TPartitionKeyValue> partitionSpec)
     throws CatalogException {
   Preconditions.checkNotNull(partitionSpec);
   Table tbl = getOrLoadTable(tableName.getDb(), tableName.getTbl());
   if (tbl == null) {
     throw new TableNotFoundException("Table not found: " + tbl.getFullName());
   }
   if (!(tbl instanceof HdfsTable)) {
     throw new CatalogException("Table " + tbl.getFullName() + " is not an Hdfs table");
   }
   HdfsTable hdfsTable = (HdfsTable) tbl;
   // Locking the catalog here because this accesses hdfsTable's partition list and
   // updates its catalog version.
   // TODO: Fix this locking pattern.
   catalogLock_.writeLock().lock();
   try {
     HdfsPartition hdfsPartition = hdfsTable.dropPartition(partitionSpec);
     if (hdfsPartition == null) return null;
     return replaceTableIfUnchanged(hdfsTable, hdfsTable.getCatalogVersion());
   } finally {
     catalogLock_.writeLock().unlock();
   }
 }
示例#7
0
  // TODO: All Hive-stats related tests are temporarily disabled because of an unknown,
  // sporadic issue causing stats of some columns to be absent in Jenkins runs.
  // Investigate this issue further.
  // @Test
  public void testColStatsColTypeMismatch() throws Exception {
    // First load a table that has column stats.
    // catalog_.refreshTable("functional", "alltypesagg", false);
    HdfsTable table = (HdfsTable) catalog_.getDb("functional").getTable("alltypesagg");

    // Now attempt to update a column's stats with mismatched stats data and ensure
    // we get the expected results.
    MetaStoreClient client = catalog_.getMetaStoreClient();
    try {
      // Load some string stats data and use it to update the stats of different
      // typed columns.
      ColumnStatisticsData stringColStatsData =
          client
              .getHiveClient()
              .getTableColumnStatistics("functional", "alltypesagg", "string_col")
              .getStatsObj()
              .get(0)
              .getStatsData();

      assertTrue(!table.getColumn("int_col").updateStats(stringColStatsData));
      assertStatsUnknown(table.getColumn("int_col"));

      assertTrue(!table.getColumn("double_col").updateStats(stringColStatsData));
      assertStatsUnknown(table.getColumn("double_col"));

      assertTrue(!table.getColumn("bool_col").updateStats(stringColStatsData));
      assertStatsUnknown(table.getColumn("bool_col"));

      // Do the same thing, but apply bigint stats to a string column.
      ColumnStatisticsData bigIntCol =
          client
              .getHiveClient()
              .getTableColumnStatistics("functional", "alltypes", "bigint_col")
              .getStatsObj()
              .get(0)
              .getStatsData();
      assertTrue(!table.getColumn("string_col").updateStats(bigIntCol));
      assertStatsUnknown(table.getColumn("string_col"));

      // Now try to apply a matching column stats data and ensure it succeeds.
      assertTrue(table.getColumn("string_col").updateStats(stringColStatsData));
      assertEquals(1178, table.getColumn("string_col").getStats().getNumDistinctValues());
    } finally {
      // Make sure to invalidate the metadata so the next test isn't using bad col stats
      // catalog_.refreshTable("functional", "alltypesagg", false);
      client.release();
    }
  }
示例#8
0
  // TODO: All Hive-stats related tests are temporarily disabled because of an unknown,
  // sporadic issue causing stats of some columns to be absent in Jenkins runs.
  // Investigate this issue further.
  // @Test
  public void testStats() throws TableLoadingException {
    // make sure the stats for functional.alltypesagg look correct
    HdfsTable table = (HdfsTable) catalog_.getDb("functional").getTable("AllTypesAgg");

    Column idCol = table.getColumn("id");
    assertEquals(
        idCol.getStats().getAvgSerializedSize() - PrimitiveType.INT.getSlotSize(),
        PrimitiveType.INT.getSlotSize(),
        0.0001);
    assertEquals(idCol.getStats().getMaxSize(), PrimitiveType.INT.getSlotSize());
    assertTrue(!idCol.getStats().hasNulls());

    Column boolCol = table.getColumn("bool_col");
    assertEquals(
        boolCol.getStats().getAvgSerializedSize() - PrimitiveType.BOOLEAN.getSlotSize(),
        PrimitiveType.BOOLEAN.getSlotSize(),
        0.0001);
    assertEquals(boolCol.getStats().getMaxSize(), PrimitiveType.BOOLEAN.getSlotSize());
    assertTrue(!boolCol.getStats().hasNulls());

    Column tinyintCol = table.getColumn("tinyint_col");
    assertEquals(
        tinyintCol.getStats().getAvgSerializedSize() - PrimitiveType.TINYINT.getSlotSize(),
        PrimitiveType.TINYINT.getSlotSize(),
        0.0001);
    assertEquals(tinyintCol.getStats().getMaxSize(), PrimitiveType.TINYINT.getSlotSize());
    assertTrue(tinyintCol.getStats().hasNulls());

    Column smallintCol = table.getColumn("smallint_col");
    assertEquals(
        smallintCol.getStats().getAvgSerializedSize() - PrimitiveType.SMALLINT.getSlotSize(),
        PrimitiveType.SMALLINT.getSlotSize(),
        0.0001);
    assertEquals(smallintCol.getStats().getMaxSize(), PrimitiveType.SMALLINT.getSlotSize());
    assertTrue(smallintCol.getStats().hasNulls());

    Column intCol = table.getColumn("int_col");
    assertEquals(
        intCol.getStats().getAvgSerializedSize() - PrimitiveType.INT.getSlotSize(),
        PrimitiveType.INT.getSlotSize(),
        0.0001);
    assertEquals(intCol.getStats().getMaxSize(), PrimitiveType.INT.getSlotSize());
    assertTrue(intCol.getStats().hasNulls());

    Column bigintCol = table.getColumn("bigint_col");
    assertEquals(
        bigintCol.getStats().getAvgSerializedSize() - PrimitiveType.BIGINT.getSlotSize(),
        PrimitiveType.BIGINT.getSlotSize(),
        0.0001);
    assertEquals(bigintCol.getStats().getMaxSize(), PrimitiveType.BIGINT.getSlotSize());
    assertTrue(bigintCol.getStats().hasNulls());

    Column floatCol = table.getColumn("float_col");
    assertEquals(
        floatCol.getStats().getAvgSerializedSize() - PrimitiveType.FLOAT.getSlotSize(),
        PrimitiveType.FLOAT.getSlotSize(),
        0.0001);
    assertEquals(floatCol.getStats().getMaxSize(), PrimitiveType.FLOAT.getSlotSize());
    assertTrue(floatCol.getStats().hasNulls());

    Column doubleCol = table.getColumn("double_col");
    assertEquals(
        doubleCol.getStats().getAvgSerializedSize() - PrimitiveType.DOUBLE.getSlotSize(),
        PrimitiveType.DOUBLE.getSlotSize(),
        0.0001);
    assertEquals(doubleCol.getStats().getMaxSize(), PrimitiveType.DOUBLE.getSlotSize());
    assertTrue(doubleCol.getStats().hasNulls());

    Column timestampCol = table.getColumn("timestamp_col");
    assertEquals(
        timestampCol.getStats().getAvgSerializedSize() - PrimitiveType.TIMESTAMP.getSlotSize(),
        PrimitiveType.TIMESTAMP.getSlotSize(),
        0.0001);
    assertEquals(timestampCol.getStats().getMaxSize(), PrimitiveType.TIMESTAMP.getSlotSize());
    // this does not have nulls, it's not clear why this passes
    // TODO: investigate and re-enable
    // assertTrue(timestampCol.getStats().hasNulls());

    Column stringCol = table.getColumn("string_col");
    assertTrue(stringCol.getStats().getAvgSerializedSize() >= PrimitiveType.STRING.getSlotSize());
    assertTrue(stringCol.getStats().getAvgSerializedSize() > 0);
    assertTrue(stringCol.getStats().getMaxSize() > 0);
    assertTrue(!stringCol.getStats().hasNulls());
  }
示例#9
0
  /**
   * Sets table_ based on targetTableName_ and performs table-type specific analysis: - Partition
   * clause is invalid for unpartitioned Hdfs tables and HBase tables - Overwrite is invalid for
   * HBase tables - Check INSERT privileges as well as write access to Hdfs paths - Cannot insert
   * into a view Adds table_ to the analyzer's descriptor table if analysis succeeds.
   */
  private void setTargetTable(Analyzer analyzer) throws AnalysisException {
    // If the table has not yet been set, load it from the Catalog. This allows for
    // callers to set a table to analyze that may not actually be created in the Catalog.
    // One example use case is CREATE TABLE AS SELECT which must run analysis on the
    // INSERT before the table has actually been created.
    if (table_ == null) {
      if (!targetTableName_.isFullyQualified()) {
        targetTableName_ = new TableName(analyzer.getDefaultDb(), targetTableName_.getTbl());
      }
      table_ = analyzer.getTable(targetTableName_, Privilege.INSERT);
    } else {
      targetTableName_ = new TableName(table_.getDb().getName(), table_.getName());
      PrivilegeRequestBuilder pb = new PrivilegeRequestBuilder();
      analyzer.registerPrivReq(
          pb.onTable(table_.getDb().getName(), table_.getName())
              .allOf(Privilege.INSERT)
              .toRequest());
    }

    // We do not support inserting into views.
    if (table_ instanceof View) {
      throw new AnalysisException(
          String.format("Impala does not support inserting into views: %s", table_.getFullName()));
    }

    boolean isHBaseTable = (table_ instanceof HBaseTable);
    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();

    if (partitionKeyValues_ != null && numClusteringCols == 0) {
      if (isHBaseTable) {
        throw new AnalysisException(
            "PARTITION clause is not valid for INSERT into "
                + "HBase tables. '"
                + targetTableName_
                + "' is an HBase table");

      } else {
        // Unpartitioned table, but INSERT has PARTITION clause
        throw new AnalysisException(
            "PARTITION clause is only valid for INSERT into "
                + "partitioned table. '"
                + targetTableName_
                + "' is not partitioned");
      }
    }

    if (table_ instanceof HdfsTable) {
      HdfsTable hdfsTable = (HdfsTable) table_;
      if (!hdfsTable.hasWriteAccess()) {
        throw new AnalysisException(
            String.format(
                "Unable to INSERT into target table "
                    + "(%s) because Impala does not have WRITE access to at least one HDFS path"
                    + ": %s",
                targetTableName_, hdfsTable.getFirstLocationWithoutWriteAccess()));
      }

      for (int colIdx = 0; colIdx < numClusteringCols; ++colIdx) {
        Column col = hdfsTable.getColumns().get(colIdx);
        // Hive has a number of issues handling BOOLEAN partition columns (see HIVE-6590).
        // Instead of working around the Hive bugs, INSERT is disabled for BOOLEAN
        // partitions in Impala. Once the Hive JIRA is resolved, we can remove this
        // analysis check.
        if (col.getType() == Type.BOOLEAN) {
          throw new AnalysisException(
              String.format(
                  "INSERT into table with BOOLEAN " + "partition column (%s) is not supported: %s",
                  col.getName(), targetTableName_));
        }
      }
    }

    if (isHBaseTable && overwrite_) {
      throw new AnalysisException("HBase doesn't have a way to perform INSERT OVERWRITE");
    }

    // Add target table to descriptor table.
    analyzer.getDescTbl().addReferencedTable(table_);
  }
示例#10
0
  @Override
  /**
   * Load the table metadata and reuse metadata to speed up metadata loading. If the lastDdlTime has
   * not been changed, that means the Hive metastore metadata has not been changed. Reuses the old
   * Hive partition metadata from cachedEntry. To speed up Hdfs metadata loading, if a file's mtime
   * has not been changed, reuses the old file block metadata from old value.
   *
   * <p>There are several cases where the cachedEntry might be reused incorrectly: 1. an ALTER TABLE
   * ADD PARTITION or dynamic partition insert is executed through Hive. This does not update the
   * lastDdlTime. 2. Hdfs rebalancer is executed. This changes the block locations but won't update
   * the mtime (file modification time). If any of these occurs, user has to execute "invalidate
   * metadata" to invalidate the metadata cache of the table to trigger a fresh load.
   */
  public void load(
      Table cachedEntry,
      HiveMetaStoreClient client,
      org.apache.hadoop.hive.metastore.api.Table msTbl)
      throws TableLoadingException {
    numHdfsFiles_ = 0;
    totalHdfsBytes_ = 0;
    LOG.debug("load table: " + db_.getName() + "." + name_);
    // turn all exceptions into TableLoadingException
    try {
      // set nullPartitionKeyValue from the hive conf.
      nullPartitionKeyValue_ =
          client.getConfigValue("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__");

      // set NULL indicator string from table properties
      nullColumnValue_ = msTbl.getParameters().get(serdeConstants.SERIALIZATION_NULL_FORMAT);
      if (nullColumnValue_ == null) nullColumnValue_ = DEFAULT_NULL_COLUMN_VALUE;

      // populate with both partition keys and regular columns
      List<FieldSchema> partKeys = msTbl.getPartitionKeys();
      List<FieldSchema> tblFields = Lists.newArrayList();
      String inputFormat = msTbl.getSd().getInputFormat();
      if (HdfsFileFormat.fromJavaClassName(inputFormat) == HdfsFileFormat.AVRO) {
        tblFields.addAll(client.getFields(db_.getName(), name_));
      } else {
        tblFields.addAll(msTbl.getSd().getCols());
      }
      List<FieldSchema> fieldSchemas =
          new ArrayList<FieldSchema>(partKeys.size() + tblFields.size());
      fieldSchemas.addAll(partKeys);
      fieldSchemas.addAll(tblFields);
      // The number of clustering columns is the number of partition keys.
      numClusteringCols_ = partKeys.size();
      loadColumns(fieldSchemas, client);

      // Collect the list of partitions to use for the table. Partitions may be reused
      // from the existing cached table entry (if one exists), read from the metastore,
      // or a mix of both. Whether or not a partition is reused depends on whether
      // the table or partition has been modified.
      List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions = Lists.newArrayList();
      if (cachedEntry == null
          || !(cachedEntry instanceof HdfsTable)
          || cachedEntry.lastDdlTime_ != lastDdlTime_) {
        msPartitions.addAll(client.listPartitions(db_.getName(), name_, Short.MAX_VALUE));
      } else {
        // The table was already in the metadata cache and it has not been modified.
        Preconditions.checkArgument(cachedEntry instanceof HdfsTable);
        HdfsTable cachedHdfsTableEntry = (HdfsTable) cachedEntry;
        // Set of partition names that have been modified. Partitions in this Set need to
        // be reloaded from the metastore.
        Set<String> modifiedPartitionNames = Sets.newHashSet();
        // If these are not the exact same object, look up the set of partition names in
        // the metastore. This is to support the special case of CTAS which creates a
        // "temp" table that doesn't actually exist in the metastore.
        if (cachedEntry != this) {
          // Since the table has not been modified, we might be able to reuse some of the
          // old partition metadata if the individual partitions have not been modified.
          // First get a list of all the partition names for this table from the
          // metastore, this is much faster than listing all the Partition objects.
          modifiedPartitionNames.addAll(
              client.listPartitionNames(db_.getName(), name_, Short.MAX_VALUE));
        }

        int totalPartitions = modifiedPartitionNames.size();
        // Get all the partitions from the cached entry that have not been modified.
        for (HdfsPartition cachedPart : cachedHdfsTableEntry.getPartitions()) {
          // Skip the default partition and any partitions that have been modified.
          if (cachedPart.isDirty()
              || cachedPart.getMetaStorePartition() == null
              || cachedPart.getId() == DEFAULT_PARTITION_ID) {
            continue;
          }
          org.apache.hadoop.hive.metastore.api.Partition cachedMsPart =
              cachedPart.getMetaStorePartition();
          Preconditions.checkNotNull(cachedMsPart);

          // This is a partition we already know about and it hasn't been modified.
          // No need to reload the metadata.
          String cachedPartName = cachedPart.getPartitionName();
          if (modifiedPartitionNames.contains(cachedPartName)) {
            msPartitions.add(cachedMsPart);
            modifiedPartitionNames.remove(cachedPartName);
          }
        }
        LOG.info(
            String.format(
                "Incrementally refreshing %d/%d partitions.",
                modifiedPartitionNames.size(), totalPartitions));

        // No need to make the metastore call if no partitions are to be updated.
        if (modifiedPartitionNames.size() > 0) {
          // Now reload the the remaining partitions.
          msPartitions.addAll(
              client.getPartitionsByNames(
                  db_.getName(), name_, Lists.newArrayList(modifiedPartitionNames)));
        }
      }
      Map<String, FileDescriptor> oldFileDescMap = null;
      if (cachedEntry != null && cachedEntry instanceof HdfsTable) {
        oldFileDescMap = ((HdfsTable) cachedEntry).fileDescMap_;
      }
      loadPartitions(msPartitions, msTbl, oldFileDescMap);

      // load table stats
      numRows_ = getRowCount(msTbl.getParameters());
      LOG.debug("table #rows=" + Long.toString(numRows_));

      // For unpartitioned tables set the numRows in its partitions
      // to the table's numRows.
      if (numClusteringCols_ == 0 && !partitions_.isEmpty()) {
        // Unpartitioned tables have a 'dummy' partition and a default partition.
        // Temp tables used in CTAS statements have one partition.
        Preconditions.checkState(partitions_.size() == 2 || partitions_.size() == 1);
        for (HdfsPartition p : partitions_) {
          p.setNumRows(numRows_);
        }
      }

      // populate Avro schema if necessary
      if (HdfsFileFormat.fromJavaClassName(inputFormat) == HdfsFileFormat.AVRO) {
        // Look for the schema in TBLPROPERTIES and in SERDEPROPERTIES, with the latter
        // taking precedence.
        List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
        schemaSearchLocations.add(getMetaStoreTable().getSd().getSerdeInfo().getParameters());
        schemaSearchLocations.add(getMetaStoreTable().getParameters());
        avroSchema_ = HdfsTable.getAvroSchema(schemaSearchLocations, getFullName(), true);
      }
    } catch (TableLoadingException e) {
      throw e;
    } catch (Exception e) {
      throw new TableLoadingException("Failed to load metadata for table: " + name_, e);
    }
  }