public void generateTestData() throws Exception {

    // remove data from previous runs.
    cleanDir(DB_DIR);
    cleanDir(WH_DIR);

    HiveConf conf = new HiveConf();

    conf.set(
        "javax.jdo.option.ConnectionURL",
        String.format("jdbc:derby:;databaseName=%s;create=true", DB_DIR));
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    conf.set("hive.metastore.warehouse.dir", WH_DIR);

    SessionState ss = new SessionState(new HiveConf(SessionState.class));
    SessionState.start(ss);
    hiveDriver = new Driver(conf);

    // generate (key, value) test data
    String testDataFile = generateTestDataFile();

    createTableAndLoadData("default", "kv", testDataFile);
    executeQuery("CREATE DATABASE IF NOT EXISTS db1");
    createTableAndLoadData("db1", "kv_db1", testDataFile);

    ss.close();
  }
Example #2
0
    public void preTest(HiveConf conf) throws Exception {

      if (zooKeeperCluster == null) {
        // create temp dir
        String tmpBaseDir = System.getProperty("test.tmp.dir");
        File tmpDir = Utilities.createTempDir(tmpBaseDir);

        zooKeeperCluster = new MiniZooKeeperCluster();
        zkPort = zooKeeperCluster.startup(tmpDir);
      }

      if (zooKeeper != null) {
        zooKeeper.close();
      }

      int sessionTimeout =
          (int)
              conf.getTimeVar(
                  HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS);
      zooKeeper =
          new ZooKeeper(
              "localhost:" + zkPort,
              sessionTimeout,
              new Watcher() {
                @Override
                public void process(WatchedEvent arg0) {}
              });

      String zkServer = "localhost";
      conf.set("hive.zookeeper.quorum", zkServer);
      conf.set("hive.zookeeper.client.port", "" + zkPort);
    }
Example #3
0
  public static HiveConf getHiveConf(Configuration conf) throws IOException {

    HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);

    // copy the hive conf into the job conf and restore it
    // in the backend context
    if (conf.get(HCatConstants.HCAT_KEY_HIVE_CONF) == null) {
      conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, HCatUtil.serialize(hiveConf.getAllProperties()));
    } else {
      // Copy configuration properties into the hive conf
      Properties properties =
          (Properties) HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));

      for (Map.Entry<Object, Object> prop : properties.entrySet()) {
        if (prop.getValue() instanceof String) {
          hiveConf.set((String) prop.getKey(), (String) prop.getValue());
        } else if (prop.getValue() instanceof Integer) {
          hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue());
        } else if (prop.getValue() instanceof Boolean) {
          hiveConf.setBoolean((String) prop.getKey(), (Boolean) prop.getValue());
        } else if (prop.getValue() instanceof Long) {
          hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue());
        } else if (prop.getValue() instanceof Float) {
          hiveConf.setFloat((String) prop.getKey(), (Float) prop.getValue());
        }
      }
    }

    if (conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
      hiveConf.set(
          "hive.metastore.token.signature", conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE));
    }

    return hiveConf;
  }
Example #4
0
  private CliSessionState startSessionState() throws IOException {

    HiveConf.setVar(
        conf,
        HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
        "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator");

    String execEngine = conf.get("hive.execution.engine");
    conf.set("hive.execution.engine", "mr");
    CliSessionState ss = new CliSessionState(conf);
    assert ss != null;
    ss.in = System.in;
    ss.out = System.out;
    ss.err = System.out;

    SessionState oldSs = SessionState.get();
    if (oldSs != null && clusterType == MiniClusterType.tez) {
      oldSs.close();
    }
    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
      oldSs.out.close();
    }
    SessionState.start(ss);

    isSessionStateStarted = true;

    conf.set("hive.execution.engine", execEngine);
    return ss;
  }
Example #5
0
  private HCatClient getHCatClient(URI uri, Configuration conf, String user)
      throws HCatAccessorException {
    final HiveConf hiveConf = new HiveConf(conf, this.getClass());
    String serverURI = getMetastoreConnectURI(uri);
    if (!serverURI.equals("")) {
      hiveConf.set("hive.metastore.local", "false");
    }
    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, serverURI);
    try {
      XLog.getLog(HCatURIHandler.class)
          .info(
              "Creating HCatClient for user [{0}] login_user [{1}] and server [{2}] ",
              user, UserGroupInformation.getLoginUser(), serverURI);

      // HiveMetastoreClient (hive 0.9) currently does not work if UGI has doAs
      // We are good to connect as the oozie user since listPartitions does not require
      // authorization
      /*
      UserGroupInformation ugi = ugiService.getProxyUser(user);
      return ugi.doAs(new PrivilegedExceptionAction<HCatClient>() {
          public HCatClient run() throws Exception {
              return HCatClient.create(hiveConf);
          }
      });
      */

      return HCatClient.create(hiveConf);
    } catch (HCatException e) {
      throw new HCatAccessorException(ErrorCode.E1501, e);
    } catch (IOException e) {
      throw new HCatAccessorException(ErrorCode.E1501, e);
    }
  }
Example #6
0
 /**
  * Create dirs & session paths for this session: 1. HDFS scratch dir 2. Local scratch dir 3. Local
  * downloaded resource dir 4. HDFS session path 5. Local session path 6. HDFS temp table space
  *
  * @param userName
  * @throws IOException
  */
 private void createSessionDirs(String userName) throws IOException {
   HiveConf conf = getConf();
   Path rootHDFSDirPath = createRootHDFSDir(conf);
   // Now create session specific dirs
   String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
   Path path;
   // 1. HDFS scratch dir
   path = new Path(rootHDFSDirPath, userName);
   hdfsScratchDirURIString = path.toUri().toString();
   createPath(conf, path, scratchDirPermission, false, false);
   // 2. Local scratch dir
   path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
   createPath(conf, path, scratchDirPermission, true, false);
   // 3. Download resources dir
   path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
   createPath(conf, path, scratchDirPermission, true, false);
   // Finally, create session paths for this session
   // Local & non-local tmp location is configurable. however it is the same across
   // all external file systems
   String sessionId = getSessionId();
   // 4. HDFS session path
   hdfsSessionPath = new Path(hdfsScratchDirURIString, sessionId);
   createPath(conf, hdfsSessionPath, scratchDirPermission, false, true);
   conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString());
   // 5. Local session path
   localSessionPath =
       new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId);
   createPath(conf, localSessionPath, scratchDirPermission, true, true);
   conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString());
   // 6. HDFS temp table space
   hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX);
   createPath(conf, hdfsTmpTableSpace, scratchDirPermission, false, true);
   conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString());
 }
Example #7
0
 public void init() throws Exception {
   testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
   String execEngine = conf.get("hive.execution.engine");
   conf.set("hive.execution.engine", "mr");
   SessionState.start(conf);
   conf.set("hive.execution.engine", execEngine);
   db = Hive.get(conf);
   pd = new ParseDriver();
   sem = new SemanticAnalyzer(conf);
 }
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   tableName = "TestOperationLoggingLayout_table";
   hiveConf = new HiveConf();
   hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "execution");
   // We need to set the below parameter to test performance level logging
   hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
   miniHS2 = new MiniHS2(hiveConf);
   confOverlay = new HashMap<String, String>();
   confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
   miniHS2.start(confOverlay);
 }
  private static HiveConf createHiveConf(
      Configuration conf, String metastoreUrl, String metastorePrincipal, String hive2Principal)
      throws IOException {
    HiveConf hcatConf = new HiveConf(conf, HiveConf.class);

    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUrl);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.set(
        HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");

    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    if (StringUtils.isNotEmpty(metastorePrincipal)) {
      hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, metastorePrincipal);
      hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
      hcatConf.set(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
    }
    if (StringUtils.isNotEmpty(hive2Principal)) {
      hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname, hive2Principal);
      hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, "kerberos");
    }

    return hcatConf;
  }
  @Override
  protected void setUp() throws Exception {

    super.setUp();
    System.setProperty("hive.metastore.init.hooks", DummyMetaStoreInitListener.class.getName());
    int port = MetaStoreUtils.findFreePort();
    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
    hiveConf = new HiveConf(this.getClass());
    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    SessionState.start(new CliSessionState(hiveConf));
    msc = new HiveMetaStoreClient(hiveConf);
    driver = new Driver(hiveConf);
  }
  @Override
  protected void setUp() throws Exception {

    super.setUp();
    System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname, DummyListener.class.getName());
    System.setProperty(
        ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, DummyPreListener.class.getName());
    Thread t = new Thread(new RunMS());
    t.start();
    Thread.sleep(40000);
    hiveConf = new HiveConf(this.getClass());
    hiveConf.setBoolVar(ConfVars.METASTORE_MODE, false);
    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    SessionState.start(new CliSessionState(hiveConf));
    msc = new HiveMetaStoreClient(hiveConf, null);
    driver = new Driver(hiveConf);
  }
  private void obtainTokenAndAddIntoUGI(UserGroupInformation clientUgi, String tokenSig)
      throws Exception {
    // obtain a token by directly invoking the metastore operation(without going
    // through the thrift interface). Obtaining a token makes the secret manager
    // aware of the user and that it gave the token to the user
    String tokenStrForm;
    if (tokenSig == null) {
      tokenStrForm = HiveMetaStore.getDelegationToken(clientUgi.getShortUserName());
    } else {
      tokenStrForm = HiveMetaStore.getDelegationToken(clientUgi.getShortUserName(), tokenSig);
      conf.set("hive.metastore.token.signature", tokenSig);
    }

    Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>();
    t.decodeFromUrlString(tokenStrForm);
    // add the token to the clientUgi for securely talking to the metastore
    clientUgi.addToken(t);
    // Create the metastore client as the clientUgi. Doing so this
    // way will give the client access to the token that was added earlier
    // in the clientUgi
    HiveMetaStoreClient hiveClient =
        clientUgi.doAs(
            new PrivilegedExceptionAction<HiveMetaStoreClient>() {
              public HiveMetaStoreClient run() throws Exception {
                HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(conf);
                return hiveClient;
              }
            });

    assertTrue("Couldn't connect to metastore", hiveClient != null);

    // try out some metastore operations
    createDBAndVerifyExistence(hiveClient);
    hiveClient.close();

    // Now cancel the delegation token
    HiveMetaStore.cancelDelegationToken(tokenStrForm);

    // now metastore connection should fail
    hiveClient =
        clientUgi.doAs(
            new PrivilegedExceptionAction<HiveMetaStoreClient>() {
              public HiveMetaStoreClient run() {
                try {
                  HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(conf);
                  return hiveClient;
                } catch (MetaException e) {
                  return null;
                }
              }
            });
    assertTrue("Expected metastore operations to fail", hiveClient == null);
  }
Example #13
0
  @BeforeClass
  public static void startMetaStoreServer() throws Exception {

    Thread t = new Thread(new RunMS());
    t.start();
    Thread.sleep(40000);

    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());
    hcatConf = new HiveConf(TestHCatClient.class);
    hcatConf.set("hive.metastore.local", "false");
    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.set(
        HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
  private static void initializeSetup() throws Exception {

    hiveConf = new HiveConf(mrConf, TestHCatMultiOutputFormat.class);
    hiveConf.set("hive.metastore.local", "false");
    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
    hiveConf.set(
        HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");

    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString());
    try {
      hmsc = new HiveMetaStoreClient(hiveConf, null);
      initalizeTables();
    } catch (Throwable e) {
      LOG.error("Exception encountered while setting up testcase", e);
      throw new Exception(e);
    } finally {
      hmsc.close();
    }
  }
Example #15
0
    /**
     * Separate from constructor, because initialize() may need to be called in a separate thread.
     */
    synchronized void initialize() {
      assertState(QueryState.CREATED);
      this.hiveConf = new HiveConf(Driver.class);

      // Update configuration with user/group info.
      if (query.hadoop_user == null) {
        throw new RuntimeException("User must be specified.");
      }

      // Update scratch dir (to have one per user)
      File scratchDir = new File("/tmp/hive-beeswax-" + query.hadoop_user);
      hiveConf.set(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.getPath());
      // Create the temporary directory if necessary.
      // If mapred.job.tracker is set to local, this is used by MapRedTask.
      if (!scratchDir.isDirectory()) {
        if (scratchDir.exists() || !scratchDir.mkdirs()) {
          LOG.warn("Could not create tmp dir:" + scratchDir);
        }
      }

      driver = new Driver(hiveConf);
      ClassLoader loader = hiveConf.getClassLoader();
      String auxJars = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVEAUXJARS);
      if (StringUtils.isNotBlank(auxJars)) {
        try {
          loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
        } catch (Exception e) {
          LOG.error("Failed to add jars to class loader: " + auxJars, e);
        }
      }
      hiveConf.setClassLoader(loader);
      Thread.currentThread().setContextClassLoader(loader);
      SessionState.start(hiveConf); // this is thread-local
      this.sessionState = SessionState.get();

      // If this work has a LogContext, associate the children output to the logContext
      OutputStream lcOutStream = null;
      if (this.logContext != null) lcOutStream = this.logContext.getOutputStream();

      // A copy of everything goes to the LogContext.
      // In addition, stderr goes to errStream for error reporting.
      // Note that child output is explicitly tee to System.{out,err},
      // otherwise it'll be swallowed by outStream.
      this.sessionState.out = new PrintStream(new TeeOutputStream(lcOutStream, this.outStream));
      this.sessionState.err = new PrintStream(new TeeOutputStream(lcOutStream, this.errStream));
      this.sessionState.childOut =
          new PrintStream(new TeeOutputStream(System.out, sessionState.out));
      this.sessionState.childErr =
          new PrintStream(new TeeOutputStream(System.err, sessionState.err));

      this.state = QueryState.INITIALIZED;
    }
Example #16
0
  /**
   * Given a Hive Configuration object - generate a command line fragment for passing such
   * configuration information to ExecDriver.
   */
  public static String generateCmdLine(HiveConf hconf, Context ctx) throws IOException {
    HiveConf tempConf = new HiveConf();
    Path hConfFilePath = new Path(ctx.getLocalTmpPath(), JOBCONF_FILENAME);
    OutputStream out = null;

    Properties deltaP = hconf.getChangedProperties();
    boolean hadoopLocalMode = ShimLoader.getHadoopShims().isLocalMode(hconf);
    String hadoopSysDir = "mapred.system.dir";
    String hadoopWorkDir = "mapred.local.dir";

    for (Object one : deltaP.keySet()) {
      String oneProp = (String) one;

      if (hadoopLocalMode && (oneProp.equals(hadoopSysDir) || oneProp.equals(hadoopWorkDir))) {
        continue;
      }
      tempConf.set(oneProp, hconf.get(oneProp));
    }

    // Multiple concurrent local mode job submissions can cause collisions in
    // working dirs and system dirs
    // Workaround is to rename map red working dir to a temp dir in such cases
    if (hadoopLocalMode) {
      tempConf.set(hadoopSysDir, hconf.get(hadoopSysDir) + "/" + Utilities.randGen.nextInt());
      tempConf.set(hadoopWorkDir, hconf.get(hadoopWorkDir) + "/" + Utilities.randGen.nextInt());
    }

    try {
      out = FileSystem.getLocal(hconf).create(hConfFilePath);
      tempConf.writeXml(out);
    } finally {
      if (out != null) {
        out.close();
      }
    }
    return " -jobconffile " + hConfFilePath.toString();
  }
Example #17
0
  /**
   * If authorization mode is v2, then pass it through authorizer so that it can apply any security
   * configuration changes.
   */
  public void applyAuthorizationPolicy() throws HiveException {
    if (!isAuthorizationModeV2()) {
      // auth v1 interface does not have this functionality
      return;
    }

    // avoid processing the same config multiple times, check marker
    if (conf.get(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, "").equals(Boolean.TRUE.toString())) {
      return;
    }

    authorizerV2.applyAuthorizationConfigPolicy(conf);
    // set a marker that this conf has been processed.
    conf.set(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, Boolean.TRUE.toString());
  }
  public static HiveConf buildHiveConf() {

    // Handle Windows
    WindowsLibsUtils.setHadoopHome();

    HiveConf hiveConf = new HiveConf();
    hiveConf.set(
        HiveConf.ConfVars.HIVE_TXN_MANAGER.varname,
        "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
    hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true");
    hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5");
    hiveConf.set("hive.root.logger", "DEBUG,console");
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
    return hiveConf;
  }
 /**
  * Set config property
  *
  * @param propertyKey
  * @param propertyValue
  */
 public void setConfProperty(String propertyKey, String propertyValue) {
   System.setProperty(propertyKey, propertyValue);
   hiveConf.set(propertyKey, propertyValue);
   addedProperties.add(propertyKey);
 }
Example #20
0
  public static void main(String[] args) throws Exception {
    OptionsProcessor oproc = new OptionsProcessor();
    if (!oproc.process_stage1(args)) {
      System.exit(1);
    }

    SessionState.initHiveLog4j();

    CliSessionState ss = new CliSessionState(new HiveConf(SessionState.class));
    ss.in = System.in;
    try {
      ss.out = new PrintStream(System.out, true, "UTF-8");
      ss.err = new PrintStream(System.err, true, "UTF-8");
    } catch (UnsupportedEncodingException e) {
      System.exit(3);
    }

    if (!oproc.process_stage2(ss)) {
      System.exit(2);
    }

    HiveConf conf = ss.getConf();
    for (Map.Entry<Object, Object> item : ss.cmdProperties.entrySet()) {
      conf.set((String) item.getKey(), (String) item.getValue());
    }

    if (!ShimLoader.getHadoopShims().usesJobShell()) {
      ClassLoader loader = conf.getClassLoader();
      String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);
      if (StringUtils.isNotBlank(auxJars)) {
        loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
      }
      conf.setClassLoader(loader);
      Thread.currentThread().setContextClassLoader(loader);
    }

    SessionState.start(ss);

    CliDriver cli = new CliDriver();

    Hive hive = Hive.get();

    String username = ss.getUserName();
    String passwd = ss.passwd;
    if (!hive.isAUser(username, passwd)) {
      System.out.println("User or passwd is wrong!");
      System.exit(0);
    } else {
      System.out.println("Connect to TDW successfully!");
    }

    if (ss.getDbName() == null) {
      ss.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME);
    }
    if (ss.execString != null) {
      System.exit(cli.processLine(ss.execString));
    }

    try {
      if (ss.fileName != null) {
        System.exit(cli.processReader(new BufferedReader(new FileReader(ss.fileName))));
      }
    } catch (FileNotFoundException e) {
      System.err.println("Could not open input file for reading. (" + e.getMessage() + ")");
      System.exit(3);
    }

    ConsoleReader reader = new ConsoleReader();
    reader.setBellEnabled(false);

    List<SimpleCompletor> completors = new LinkedList<SimpleCompletor>();
    completors.add(
        new SimpleCompletor(
            new String[] {"set", "from", "create", "load", "describe", "quit", "exit"}));
    reader.addCompletor(new ArgumentCompletor(completors));

    String line;
    final String HISTORYFILE = ".hivehistory";
    String historyFile = System.getProperty("user.home") + File.separator + HISTORYFILE;
    reader.setHistory(new History(new File(historyFile)));
    int ret = 0;

    String prefix = "";
    String curPrompt = prompt;
    while ((line = reader.readLine(curPrompt + "> ")) != null) {
      if (!prefix.equals("")) {
        prefix += '\n';
      }
      if (line.trim().endsWith(";") && !line.trim().endsWith("\\;")) {
        line = prefix + line;
        ret = cli.processLine(line);
        prefix = "";
        curPrompt = prompt;
      } else {
        prefix = prefix + line;
        curPrompt = prompt2;
        continue;
      }
    }

    System.exit(ret);
  }
Example #21
0
  public HiveTestUtil(
      String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer)
      throws Exception {
    this.outDir = outDir;
    this.logDir = logDir;
    if (confDir != null && !confDir.isEmpty()) {
      HiveConf.setHiveSiteLocation(
          new URL("file://" + new File(confDir).toURI().getPath() + "/hive-site.xml"));
      LOG.info("Setting hive-site: " + HiveConf.getHiveSiteLocation());
    }
    conf = new HiveConf();
    String tmpBaseDir = System.getProperty("test.tmp.dir");
    if (tmpBaseDir == null || tmpBaseDir == "") {
      tmpBaseDir = System.getProperty("java.io.tmpdir");
    }
    String metaStoreURL =
        "jdbc:derby:" + tmpBaseDir + File.separator + "metastore_dbtest;" + "create=true";
    conf.set(ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
    System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);

    // set where derby logs
    File derbyLogFile = new File(tmpBaseDir + "/derby.log");
    derbyLogFile.createNewFile();
    System.setProperty("derby.stream.error.file", derbyLogFile.getPath());

    this.hadoopVer = getHadoopMainVersion(hadoopVer);
    qMap = new TreeMap<String, String>();
    qSkipSet = new HashSet<String>();
    qSortSet = new HashSet<String>();
    qSortQuerySet = new HashSet<String>();
    qHashQuerySet = new HashSet<String>();
    qSortNHashQuerySet = new HashSet<String>();
    qJavaVersionSpecificOutput = new HashSet<String>();
    this.clusterType = clusterType;

    // Using randomUUID for dfs cluster
    System.setProperty("test.build.data", "target/test-data/hive-" + UUID.randomUUID().toString());

    HadoopShims shims = ShimLoader.getHadoopShims();
    int numberOfDataNodes = 4;

    if (clusterType != MiniClusterType.none) {
      dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
      FileSystem fs = dfs.getFileSystem();
      String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
      if (clusterType == MiniClusterType.tez) {
        mr = shims.getMiniTezCluster(conf, 4, uriString, 1);
      } else {
        mr = shims.getMiniMrCluster(conf, 4, uriString, 1);
      }
    }

    initConf();

    // Use the current directory if it is not specified
    String dataDir = conf.get("test.data.files");
    if (dataDir == null) {
      dataDir = new File(".").getAbsolutePath() + "/data/files";
    }

    testFiles = dataDir;

    // Use the current directory if it is not specified
    String scriptsDir = conf.get("test.data.scripts");
    if (scriptsDir == null) {
      scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
    }
    if (!initScript.isEmpty()) {
      this.initScript = scriptsDir + "/" + initScript;
    }
    if (!cleanupScript.isEmpty()) {
      this.cleanupScript = scriptsDir + "/" + cleanupScript;
    }

    overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));

    setup = new HiveTestSetup();
    setup.preTest(conf);
    init();
  }
  @Override
  protected void runReportal() throws Exception {
    System.out.println("Reportal Hive: Setting up Hive");
    HiveConf conf = new HiveConf(SessionState.class);

    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      conf.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }

    File tempTSVFile = new File("./temp.tsv");
    OutputStream tsvTempOutputStream =
        new BoundedOutputStream(
            new BufferedOutputStream(new FileOutputStream(tempTSVFile)), outputCapacity);
    PrintStream logOut = System.out;

    // NOTE: It is critical to do this here so that log4j is reinitialized
    // before any of the other core hive classes are loaded
    // [email protected]: I disabled this because it appears to swallow
    // all future logging (even outside of hive).
    // SessionState.initHiveLog4j();

    String orig = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);

    CliSessionState sessionState = new CliSessionState(conf);
    sessionState.in = System.in;
    sessionState.out = new PrintStream(tsvTempOutputStream, true, "UTF-8");
    sessionState.err = new PrintStream(logOut, true, "UTF-8");

    OptionsProcessor oproc = new OptionsProcessor();

    // Feed in Hive Args
    String[] args = buildHiveArgs();
    if (!oproc.process_stage1(args)) {
      throw new Exception("unable to parse options stage 1");
    }

    if (!oproc.process_stage2(sessionState)) {
      throw new Exception("unable to parse options stage 2");
    }

    // Set all properties specified via command line
    for (Map.Entry<Object, Object> item : sessionState.cmdProperties.entrySet()) {
      conf.set((String) item.getKey(), (String) item.getValue());
    }

    SessionState.start(sessionState);

    String expanded = expandHiveAuxJarsPath(orig);
    if (orig == null || orig.equals(expanded)) {
      System.out.println("Hive aux jars variable not expanded");
    } else {
      System.out.println("Expanded aux jars variable from [" + orig + "] to [" + expanded + "]");
      HiveConf.setVar(conf, HiveConf.ConfVars.HIVEAUXJARS, expanded);
    }

    if (!ShimLoader.getHadoopShims().usesJobShell()) {
      // hadoop-20 and above - we need to augment classpath using hiveconf
      // components
      // see also: code in ExecDriver.java
      ClassLoader loader = conf.getClassLoader();
      String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);

      System.out.println("Got auxJars = " + auxJars);

      if (StringUtils.isNotBlank(auxJars)) {
        loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
      }
      conf.setClassLoader(loader);
      Thread.currentThread().setContextClassLoader(loader);
    }

    CliDriver cli = new CliDriver();
    int returnValue = 0;
    String prefix = "";

    returnValue = cli.processLine("set hive.cli.print.header=true;");
    String[] queries = jobQuery.split("\n");
    for (String line : queries) {
      if (!prefix.isEmpty()) {
        prefix += '\n';
      }
      if (line.trim().endsWith(";") && !line.trim().endsWith("\\;")) {
        line = prefix + line;
        line = injectVariables(line);
        System.out.println("Reportal Hive: Running Hive Query: " + line);
        System.out.println(
            "Reportal Hive: HiveConf HIVEAUXJARS: "
                + HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS));
        returnValue = cli.processLine(line);
        prefix = "";
      } else {
        prefix = prefix + line;
        continue;
      }
    }

    tsvTempOutputStream.close();

    // convert tsv to csv and write it do disk
    System.out.println("Reportal Hive: Converting output");
    InputStream tsvTempInputStream = new BufferedInputStream(new FileInputStream(tempTSVFile));
    Scanner rowScanner = new Scanner(tsvTempInputStream);
    PrintStream csvOutputStream = new PrintStream(outputStream);
    while (rowScanner.hasNextLine()) {
      String tsvLine = rowScanner.nextLine();
      // strip all quotes, and then quote the columns
      csvOutputStream.println("\"" + tsvLine.replace("\"", "").replace("\t", "\",\"") + "\"");
    }
    rowScanner.close();
    csvOutputStream.close();

    // Flush the temp file out
    tempTSVFile.delete();

    if (returnValue != 0) {
      throw new Exception("Hive query finished with a non zero return code");
    }

    System.out.println("Reportal Hive: Ended successfully");
  }
Example #23
0
  private void generateTestData() throws Exception {
    HiveConf conf = new HiveConf(SessionState.class);

    conf.set(
        "javax.jdo.option.ConnectionURL",
        String.format("jdbc:derby:;databaseName=%s;create=true", dbDir));
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    conf.set("hive.metastore.warehouse.dir", whDir);
    conf.set("mapred.job.tracker", "local");
    conf.set(ConfVars.SCRATCHDIR.varname, getTempDir("scratch_dir"));
    conf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir"));
    conf.set(ConfVars.DYNAMICPARTITIONINGMODE.varname, "nonstrict");

    SessionState ss = new SessionState(conf);
    SessionState.start(ss);
    Driver hiveDriver = new Driver(conf);

    // generate (key, value) test data
    String testDataFile = generateTestDataFile();

    // Create a (key, value) schema table with Text SerDe which is available in hive-serdes.jar
    executeQuery(
        hiveDriver,
        "CREATE TABLE IF NOT EXISTS default.kv(key INT, value STRING) "
            + "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
    executeQuery(
        hiveDriver,
        "LOAD DATA LOCAL INPATH '" + testDataFile + "' OVERWRITE INTO TABLE default.kv");

    // Create a (key, value) schema table in non-default database with RegexSerDe which is available
    // in hive-contrib.jar
    // Table with RegExSerde is expected to have columns of STRING type only.
    executeQuery(hiveDriver, "CREATE DATABASE IF NOT EXISTS db1");
    executeQuery(
        hiveDriver,
        "CREATE TABLE db1.kv_db1(key STRING, value STRING) "
            + "ROW FORMAT SERDE 'org.apache.hadoop.hive.contrib.serde2.RegexSerDe' "
            + "WITH SERDEPROPERTIES ("
            + "  \"input.regex\" = \"([0-9]*), (.*_[0-9]*)\", "
            + "  \"output.format.string\" = \"%1$s, %2$s\""
            + ") ");
    executeQuery(hiveDriver, "INSERT INTO TABLE db1.kv_db1 SELECT * FROM default.kv");

    // Create an Avro format based table backed by schema in a separate file
    final String avroCreateQuery =
        String.format(
            "CREATE TABLE db1.avro "
                + "ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' "
                + "STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' "
                + "OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' "
                + "TBLPROPERTIES ('avro.schema.url'='file:///%s')",
            BaseTestQuery.getPhysicalFileFromResource("avro_test_schema.json").replace('\\', '/'));

    executeQuery(hiveDriver, avroCreateQuery);
    executeQuery(hiveDriver, "INSERT INTO TABLE db1.avro SELECT * FROM default.kv");

    executeQuery(hiveDriver, "USE default");

    // create a table with no data
    executeQuery(hiveDriver, "CREATE TABLE IF NOT EXISTS empty_table(a INT, b STRING)");
    // delete the table location of empty table
    File emptyTableLocation = new File(whDir, "empty_table");
    if (emptyTableLocation.exists()) {
      FileUtils.forceDelete(emptyTableLocation);
    }

    // create a Hive table that has columns with data types which are supported for reading in
    // Drill.
    testDataFile = generateAllTypesDataFile();
    executeQuery(
        hiveDriver,
        "CREATE TABLE IF NOT EXISTS readtest ("
            + "  binary_field BINARY,"
            + "  boolean_field BOOLEAN,"
            + "  tinyint_field TINYINT,"
            + "  decimal0_field DECIMAL,"
            + "  decimal9_field DECIMAL(6, 2),"
            + "  decimal18_field DECIMAL(15, 5),"
            + "  decimal28_field DECIMAL(23, 1),"
            + "  decimal38_field DECIMAL(30, 3),"
            + "  double_field DOUBLE,"
            + "  float_field FLOAT,"
            + "  int_field INT,"
            + "  bigint_field BIGINT,"
            + "  smallint_field SMALLINT,"
            + "  string_field STRING,"
            + "  varchar_field VARCHAR(50),"
            + "  timestamp_field TIMESTAMP,"
            + "  date_field DATE"
            + ") PARTITIONED BY ("
            + "  binary_part BINARY,"
            + "  boolean_part BOOLEAN,"
            + "  tinyint_part TINYINT,"
            + "  decimal0_part DECIMAL,"
            + "  decimal9_part DECIMAL(6, 2),"
            + "  decimal18_part DECIMAL(15, 5),"
            + "  decimal28_part DECIMAL(23, 1),"
            + "  decimal38_part DECIMAL(30, 3),"
            + "  double_part DOUBLE,"
            + "  float_part FLOAT,"
            + "  int_part INT,"
            + "  bigint_part BIGINT,"
            + "  smallint_part SMALLINT,"
            + "  string_part STRING,"
            + "  varchar_part VARCHAR(50),"
            + "  timestamp_part TIMESTAMP,"
            + "  date_part DATE"
            + ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' "
            + "TBLPROPERTIES ('serialization.null.format'='') ");

    // Add a partition to table 'readtest'
    executeQuery(
        hiveDriver,
        "ALTER TABLE readtest ADD IF NOT EXISTS PARTITION ( "
            + "  binary_part='binary', "
            + "  boolean_part='true', "
            + "  tinyint_part='64', "
            + "  decimal0_part='36.9', "
            + "  decimal9_part='36.9', "
            + "  decimal18_part='3289379872.945645', "
            + "  decimal28_part='39579334534534.35345', "
            + "  decimal38_part='363945093845093890.9', "
            + "  double_part='8.345', "
            + "  float_part='4.67', "
            + "  int_part='123456', "
            + "  bigint_part='234235', "
            + "  smallint_part='3455', "
            + "  string_part='string', "
            + "  varchar_part='varchar', "
            + "  timestamp_part='2013-07-05 17:01:00', "
            + "  date_part='2013-07-05')");

    // Add a second partition to table 'readtest' which contains the same values as the first
    // partition except
    // for boolean_part partition column
    executeQuery(
        hiveDriver,
        "ALTER TABLE readtest ADD IF NOT EXISTS PARTITION ( "
            + "  binary_part='binary', "
            + "  boolean_part='false', "
            + "  tinyint_part='64', "
            + "  decimal0_part='36.9', "
            + "  decimal9_part='36.9', "
            + "  decimal18_part='3289379872.945645', "
            + "  decimal28_part='39579334534534.35345', "
            + "  decimal38_part='363945093845093890.9', "
            + "  double_part='8.345', "
            + "  float_part='4.67', "
            + "  int_part='123456', "
            + "  bigint_part='234235', "
            + "  smallint_part='3455', "
            + "  string_part='string', "
            + "  varchar_part='varchar', "
            + "  timestamp_part='2013-07-05 17:01:00', "
            + "  date_part='2013-07-05')");

    // Load data into table 'readtest'
    executeQuery(
        hiveDriver,
        String.format(
            "LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.readtest PARTITION ("
                + "  binary_part='binary', "
                + "  boolean_part='true', "
                + "  tinyint_part='64', "
                + "  decimal0_part='36.9', "
                + "  decimal9_part='36.9', "
                + "  decimal18_part='3289379872.945645', "
                + "  decimal28_part='39579334534534.35345', "
                + "  decimal38_part='363945093845093890.9', "
                + "  double_part='8.345', "
                + "  float_part='4.67', "
                + "  int_part='123456', "
                + "  bigint_part='234235', "
                + "  smallint_part='3455', "
                + "  string_part='string', "
                + "  varchar_part='varchar', "
                + "  timestamp_part='2013-07-05 17:01:00', "
                + "  date_part='2013-07-05')",
            testDataFile));

    // create a table that has all Hive types. This is to test how hive tables metadata is populated
    // in
    // Drill's INFORMATION_SCHEMA.
    executeQuery(
        hiveDriver,
        "CREATE TABLE IF NOT EXISTS infoschematest("
            + "booleanType BOOLEAN, "
            + "tinyintType TINYINT, "
            + "smallintType SMALLINT, "
            + "intType INT, "
            + "bigintType BIGINT, "
            + "floatType FLOAT, "
            + "doubleType DOUBLE, "
            + "dateType DATE, "
            + "timestampType TIMESTAMP, "
            + "binaryType BINARY, "
            + "decimalType DECIMAL(38, 2), "
            + "stringType STRING, "
            + "varCharType VARCHAR(20), "
            + "listType ARRAY<STRING>, "
            + "mapType MAP<STRING,INT>, "
            + "structType STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>, "
            + "uniontypeType UNIONTYPE<int, double, array<string>>)");

    // create a Hive view to test how its metadata is populated in Drill's INFORMATION_SCHEMA
    executeQuery(hiveDriver, "CREATE VIEW IF NOT EXISTS hiveview AS SELECT * FROM kv");

    executeQuery(
        hiveDriver,
        "CREATE TABLE IF NOT EXISTS "
            + "partition_pruning_test_loadtable(a DATE, b TIMESTAMP, c INT, d INT, e INT) "
            + "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
    executeQuery(
        hiveDriver,
        String.format(
            "LOAD DATA LOCAL INPATH '%s' INTO TABLE partition_pruning_test_loadtable",
            generateTestDataFileForPartitionInput()));

    // create partitioned hive table to test partition pruning
    executeQuery(
        hiveDriver,
        "CREATE TABLE IF NOT EXISTS partition_pruning_test(a DATE, b TIMESTAMP) "
            + "partitioned by (c INT, d INT, e INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
    executeQuery(
        hiveDriver,
        "INSERT OVERWRITE TABLE partition_pruning_test PARTITION(c, d, e) "
            + "SELECT a, b, c, d, e FROM partition_pruning_test_loadtable");

    // Add a partition with custom location
    executeQuery(
        hiveDriver,
        String.format(
            "ALTER TABLE partition_pruning_test ADD PARTITION (c=99, d=98, e=97) LOCATION '%s'",
            getTempDir("part1")));
    executeQuery(
        hiveDriver,
        String.format(
            "INSERT INTO TABLE partition_pruning_test PARTITION(c=99, d=98, e=97) "
                + "SELECT '%s', '%s' FROM kv LIMIT 1",
            new Date(System.currentTimeMillis()).toString(),
            new Timestamp(System.currentTimeMillis()).toString()));

    executeQuery(hiveDriver, "DROP TABLE partition_pruning_test_loadtable");

    ss.close();
  }
Example #24
0
  public static HiveServer create(
      Map<String, String> properties,
      File baseDir,
      File confDir,
      File logDir,
      FileSystem fileSystem)
      throws Exception {

    if (!properties.containsKey(WAREHOUSE_DIR)) {
      LOGGER.info("fileSystem " + fileSystem.getClass().getSimpleName());
      if (fileSystem instanceof DistributedFileSystem) {
        String dfsUri = FileSystem.getDefaultUri(fileSystem.getConf()).toString();
        LOGGER.info("dfsUri " + dfsUri);
        properties.put(WAREHOUSE_DIR, dfsUri + "/data");
        fileSystem.mkdirs(new Path("/data/"), new FsPermission((short) 0777));
      } else {
        properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath());
        fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777));
      }
    }
    LOGGER.info("Setting an readable path to hive.exec.scratchdir");
    properties.put("hive.exec.scratchdir", new File(baseDir, "scratchdir").getPath());

    if (!properties.containsKey(METASTORE_CONNECTION_URL)) {
      properties.put(
          METASTORE_CONNECTION_URL,
          String.format(
              "jdbc:derby:;databaseName=%s;create=true", new File(baseDir, "metastore").getPath()));
    }
    if (!properties.containsKey(HS2_PORT)) {
      properties.put(HS2_PORT, String.valueOf(findPort()));
    }
    if (!properties.containsKey(SUPPORT_CONCURRENCY)) {
      properties.put(SUPPORT_CONCURRENCY, "false");
    }
    if (!properties.containsKey(HADOOPBIN)) {
      properties.put(HADOOPBIN, "./target/test-classes/hadoop");
    }

    // Modify the test resource to have executable permission
    java.nio.file.Path hadoopPath =
        FileSystems.getDefault().getPath("target/test-classes", "hadoop");
    if (hadoopPath != null) {
      hadoopPath.toFile().setExecutable(true);
    }

    properties.put(METASTORE_SETUGI, "true");
    properties.put(METASTORE_CLIENT_TIMEOUT, "100");
    properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true");

    properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false");
    properties.put(ConfVars.HIVE_STATS_COLLECT_SCANCOLS.varname, "true");
    String hadoopBinPath = properties.get(HADOOPBIN);
    Assert.assertNotNull(hadoopBinPath, "Hadoop Bin");
    File hadoopBin = new File(hadoopBinPath);
    if (!hadoopBin.isFile()) {
      Assert.fail(
          "Path to hadoop bin "
              + hadoopBin.getPath()
              + " is invalid. "
              + "Perhaps you missed the download-hadoop profile.");
    }

    /*
     * This hack, setting the hiveSiteURL field removes a previous hack involving
     * setting of system properties for each property. Although both are hacks,
     * I prefer this hack because once the system properties are set they can
     * affect later tests unless those tests clear them. This hack allows for
     * a clean switch to a new set of defaults when a new HiveConf object is created.
     */
    Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(null);
    HiveConf hiveConf = new HiveConf();
    for (Map.Entry<String, String> entry : properties.entrySet()) {
      LOGGER.info(entry.getKey() + " => " + entry.getValue());
      hiveConf.set(entry.getKey(), entry.getValue());
    }
    File hiveSite = new File(confDir, "hive-site.xml");

    hiveConf.set(HIVESERVER2_IMPERSONATION, "false");
    OutputStream out = new FileOutputStream(hiveSite);
    hiveConf.writeXml(out);
    out.close();

    Reflection.staticField("hiveSiteURL")
        .ofType(URL.class)
        .in(HiveConf.class)
        .set(hiveSite.toURI().toURL());

    LOGGER.info("Creating InternalHiveServer");
    return new InternalHiveServer(hiveConf);
  }