public static void baseSetup() throws Exception {
    MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
    fs = dfs.getFileSystem();
    baseDfsDir = new Path(new Path(fs.getUri()), "/base");
    fs.mkdirs(baseDfsDir);
    warehouseDir = new Path(baseDfsDir, "warehouse");
    fs.mkdirs(warehouseDir);
    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());

    // Assuming the tests are run either in C or D drive in Windows OS!
    dataFileDir =
        conf.get("test.data.files")
            .replace('\\', '/')
            .replace("c:", "")
            .replace("C:", "")
            .replace("D:", "")
            .replace("d:", "");
    dataFilePath = new Path(dataFileDir, "kv1.txt");

    // Set up scratch directory
    Path scratchDir = new Path(baseDfsDir, "scratchdir");
    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());

    // set hive conf vars
    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
    int port = MetaStoreUtils.findFreePort();
    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());

    SessionState.start(new CliSessionState(conf));
    driver = new Driver(conf);
    setupDataTable();
  }
Пример #2
0
 /**
  * Returns archiving level, which is how many fields were set in partial specification ARCHIVE was
  * run for
  */
 public static int getArchivingLevel(Partition p) throws HiveException {
   try {
     return MetaStoreUtils.getArchivingLevel(p.getTPartition());
   } catch (MetaException ex) {
     throw new HiveException(ex.getMessage(), ex);
   }
 }
  @Override
  public void preCreateTable(Table tbl) throws MetaException {

    boolean isExternal = MetaStoreUtils.isExternalTable(tbl);
    if (isExternal) {
      Log.info("Creating External table for Splice...");
    }

    String inputTableName = tbl.getParameters().get(MRConstants.SPLICE_TABLE_NAME);
    if (inputTableName == null)
      throw new MetaException(
          "Wrong param, you are missing " + MRConstants.SPLICE_TABLE_NAME + " ? ");

    // We can choose to support user define column mapping.
    // But currently I don't think it is necessary
    // We map all columns from Splice Table to Hive Table.
    String connStr = tbl.getParameters().get(MRConstants.SPLICE_JDBC_STR);
    if (connStr == null)
      throw new MetaException("Wrong param, did you mean " + MRConstants.SPLICE_JDBC_STR + " ? ");
    if (sqlUtil == null) sqlUtil = SMSQLUtil.getInstance(connStr);
    if (inputTableName != null) {
      inputTableName = inputTableName.trim();
      checkTableExists(inputTableName);
    }
  }
Пример #4
0
 public void analyze() throws AnalysisException {
   // Check whether the column name meets the Metastore's requirements.
   if (!MetaStoreUtils.validateName(colName_)) {
     throw new AnalysisException("Invalid column name: " + colName_);
   }
   colType_.analyze();
 }
Пример #5
0
  private static Properties getSerdeProperties(HCatTableInfo info, HCatSchema s)
      throws SerDeException {
    Properties props = new Properties();
    List<FieldSchema> fields = HCatUtil.getFieldSchemaList(s.getFields());
    props.setProperty(
        org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMNS,
        MetaStoreUtils.getColumnNamesFromFieldSchema(fields));
    props.setProperty(
        org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMN_TYPES,
        MetaStoreUtils.getColumnTypesFromFieldSchema(fields));

    // setting these props to match LazySimpleSerde
    props.setProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_FORMAT, "\\N");
    props.setProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");

    // add props from params set in table schema
    props.putAll(info.getStorerInfo().getProperties());

    return props;
  }
Пример #6
0
  /**
   * drop
   *
   * <p>delete the schema for this table and optionally delete the data. Note the data is actually
   * moved to the Trash, not really deleted.
   *
   * @exception MetaException if any problems instantiating this object
   */
  @SuppressWarnings("nls")
  public void drop() throws MetaException {

    if (this.o_rdonly_) {
      throw new RuntimeException("cannot perform write operation on a read-only table");
    }

    MetaStoreUtils.deleteWHDirectory(this.whPath_, this.conf_, this.use_trash_);
    try {
      this.store_.drop(this.parent_, this.tableName_);
    } catch (IOException e) {
      throw new MetaException(e.getMessage());
    }
    this.o_rdonly_ = true; // the table is dropped, so can only do reads now
  }
Пример #7
0
  /**
   * create
   *
   * @exception MetaException if any problems encountered during the creation
   */
  public static Table create(DB parent, String tableName, Properties schema, Configuration conf)
      throws MetaException {
    Table newTable = new Table();
    newTable.parent_ = parent;
    newTable.tableName_ = tableName;
    newTable.conf_ = conf;
    newTable.o_rdonly_ = false;
    newTable.schema_ = schema;
    newTable.store_ = new FileStore(conf);

    if (MetaStoreUtils.validateName(tableName) == false) {
      throw new MetaException(
          "Invalid table name: " + tableName + " - allowed characters are \\w and _");
    }

    String location = schema.getProperty(Constants.META_TABLE_LOCATION);

    if (location == null) {
      newTable.whPath_ = parent.getDefaultTablePath(tableName, (String) null);
      newTable.schema_.setProperty(
          Constants.META_TABLE_LOCATION, newTable.whPath_.toUri().toASCIIString());
    } else {
      newTable.whPath_ = new Path(location);
    }

    try {
      if (newTable.whPath_.getFileSystem(conf).exists(newTable.whPath_)) {
        // current unit tests will fail
        // throw new MetaException("for new table: " + tableName + " " + newTable.whPath_ + "
        // already exists cannot create??");
      } else {
        newTable.whPath_.getFileSystem(conf).mkdirs(newTable.whPath_);
      }
    } catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw new MetaException(e.getMessage());
    }

    newTable.save(false);
    return newTable;
  }
Пример #8
0
  @SuppressWarnings("nls")
  public void truncate(String partition) throws MetaException {

    if (this.o_rdonly_) {
      throw new RuntimeException("cannot perform write operation on a read-only table");
    }

    try {
      MetaStoreUtils.deleteWHDirectory(
          (partition == null || partition.length() == 0)
              ? this.whPath_
              : new Path(this.whPath_, partition),
          this.conf_,
          this.use_trash_);
      // ensure the directory is re-made
      if (partition == null || partition.length() == 0) {
        this.whPath_.getFileSystem(this.conf_).mkdirs(this.whPath_);
      }
    } catch (IOException e) {
      throw new MetaException(e.getMessage());
    }
  }
Пример #9
0
 /**
  * Determines whether a partition has been archived
  *
  * @param p
  * @return is it archived?
  */
 public static boolean isArchived(Partition p) {
   return MetaStoreUtils.isArchived(p.getTPartition());
 }
Пример #10
0
  private List<HiveLockObj> getLockObjects(
      QueryPlan plan, Database db, Table t, Partition p, HiveLockMode mode) throws LockException {
    List<HiveLockObj> locks = new LinkedList<HiveLockObj>();

    HiveLockObject.HiveLockObjectData lockData =
        new HiveLockObject.HiveLockObjectData(
            plan.getQueryId(),
            String.valueOf(System.currentTimeMillis()),
            "IMPLICIT",
            plan.getQueryStr());

    if (db != null) {
      locks.add(new HiveLockObj(new HiveLockObject(db.getName(), lockData), mode));
      return locks;
    }

    if (t != null) {
      locks.add(new HiveLockObj(new HiveLockObject(t, lockData), mode));
      mode = HiveLockMode.SHARED;
      locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), lockData), mode));
      return locks;
    }

    if (p != null) {
      if (!(p instanceof DummyPartition)) {
        locks.add(new HiveLockObj(new HiveLockObject(p, lockData), mode));
      }

      // All the parents are locked in shared mode
      mode = HiveLockMode.SHARED;

      // For dummy partitions, only partition name is needed
      String name = p.getName();

      if (p instanceof DummyPartition) {
        name = p.getName().split("@")[2];
      }

      String partialName = "";
      String[] partns = name.split("/");
      int len = p instanceof DummyPartition ? partns.length : partns.length - 1;
      Map<String, String> partialSpec = new LinkedHashMap<String, String>();
      for (int idx = 0; idx < len; idx++) {
        String partn = partns[idx];
        partialName += partn;
        String[] nameValue = partn.split("=");
        assert (nameValue.length == 2);
        partialSpec.put(nameValue[0], nameValue[1]);
        try {
          locks.add(
              new HiveLockObj(
                  new HiveLockObject(
                      new DummyPartition(
                          p.getTable(),
                          p.getTable().getDbName()
                              + "/"
                              + MetaStoreUtils.encodeTableName(p.getTable().getTableName())
                              + "/"
                              + partialName,
                          partialSpec),
                      lockData),
                  mode));
          partialName += "/";
        } catch (HiveException e) {
          throw new LockException(e.getMessage());
        }
      }

      locks.add(new HiveLockObj(new HiveLockObject(p.getTable(), lockData), mode));
      locks.add(new HiveLockObj(new HiveLockObject(p.getTable().getDbName(), lockData), mode));
    }
    return locks;
  }