public HiveAuthFactory(HiveConf conf) throws TTransportException {
   this.conf = conf;
   saslMessageLimit = conf.getIntVar(ConfVars.HIVE_THRIFT_SASL_MESSAGE_LIMIT);
   String transTypeStr = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE);
   String authTypeStr = conf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION);
   transportType = TransTypes.valueOf(transTypeStr.toUpperCase());
   authType =
       authTypeStr == null
           ? transportType.getDefaultAuthType()
           : AuthTypes.valueOf(authTypeStr.toUpperCase());
   if (transportType == TransTypes.BINARY
       && authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.name())
       && ShimLoader.getHadoopShims().isSecureShimImpl()) {
     saslServer =
         ShimLoader.getHadoopThriftAuthBridge()
             .createServer(
                 conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB),
                 conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL));
     // start delegation token manager
     try {
       saslServer.startDelegationTokenSecretManager(conf, null, ServerMode.HIVESERVER2);
     } catch (Exception e) {
       throw new TTransportException("Failed to start token manager", e);
     }
   } else {
     saslServer = null;
   }
 }
Beispiel #2
0
 @VisibleForTesting
 public static String initHiveLog4jCommon(HiveConf conf, ConfVars confVarName)
     throws LogInitializationException {
   if (HiveConf.getVar(conf, confVarName).equals("")) {
     // if log4j configuration file not set, or could not found, use default setting
     return initHiveLog4jDefault(conf, "", confVarName);
   } else {
     // if log4j configuration file found successfully, use HiveConf property value
     String log4jFileName = HiveConf.getVar(conf, confVarName);
     File log4jConfigFile = new File(log4jFileName);
     boolean fileExists = log4jConfigFile.exists();
     if (!fileExists) {
       // if property specified file not found in local file system
       // use default setting
       return initHiveLog4jDefault(
           conf, "Not able to find conf file: " + log4jConfigFile, confVarName);
     } else {
       // property speficied file found in local file system
       // use the specified file
       if (confVarName == HiveConf.ConfVars.HIVE_EXEC_LOG4J_FILE) {
         String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID);
         if (queryId == null || (queryId = queryId.trim()).isEmpty()) {
           queryId = "unknown-" + System.currentTimeMillis();
         }
         System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
       }
       final boolean async = checkAndSetAsyncLogging(conf);
       Configurator.initialize(null, log4jFileName);
       logConfigLocation(conf);
       return "Logging initialized using configuration in " + log4jConfigFile + " Async: " + async;
     }
   }
 }
 // Check if this write entity needs to skipped
 private boolean filterWriteEntity(WriteEntity writeEntity) throws AuthorizationException {
   // skip URI validation for session scratch file URIs
   if (writeEntity.isTempURI()) {
     return true;
   }
   try {
     if (writeEntity.getTyp().equals(Type.DFS_DIR)
         || writeEntity.getTyp().equals(Type.LOCAL_DIR)) {
       HiveConf conf = SessionState.get().getConf();
       String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE);
       URI scratchURI =
           new URI(PathUtils.parseDFSURI(warehouseDir, conf.getVar(HiveConf.ConfVars.SCRATCHDIR)));
       URI requestURI =
           new URI(PathUtils.parseDFSURI(warehouseDir, writeEntity.getLocation().getPath()));
       LOG.debug("scratchURI = " + scratchURI + ", requestURI = " + requestURI);
       if (PathUtils.impliesURI(scratchURI, requestURI)) {
         return true;
       }
       URI localScratchURI =
           new URI(PathUtils.parseLocalURI(conf.getVar(HiveConf.ConfVars.LOCALSCRATCHDIR)));
       URI localRequestURI = new URI(PathUtils.parseLocalURI(writeEntity.getLocation().getPath()));
       LOG.debug(
           "localScratchURI = " + localScratchURI + ", localRequestURI = " + localRequestURI);
       if (PathUtils.impliesURI(localScratchURI, localRequestURI)) {
         return true;
       }
     }
   } catch (Exception e) {
     throw new AuthorizationException("Failed to extract uri details", e);
   }
   return false;
 }
Beispiel #4
0
 /**
  * Create dirs & session paths for this session: 1. HDFS scratch dir 2. Local scratch dir 3. Local
  * downloaded resource dir 4. HDFS session path 5. Local session path 6. HDFS temp table space
  *
  * @param userName
  * @throws IOException
  */
 private void createSessionDirs(String userName) throws IOException {
   HiveConf conf = getConf();
   Path rootHDFSDirPath = createRootHDFSDir(conf);
   // Now create session specific dirs
   String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
   Path path;
   // 1. HDFS scratch dir
   path = new Path(rootHDFSDirPath, userName);
   hdfsScratchDirURIString = path.toUri().toString();
   createPath(conf, path, scratchDirPermission, false, false);
   // 2. Local scratch dir
   path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
   createPath(conf, path, scratchDirPermission, true, false);
   // 3. Download resources dir
   path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
   createPath(conf, path, scratchDirPermission, true, false);
   // Finally, create session paths for this session
   // Local & non-local tmp location is configurable. however it is the same across
   // all external file systems
   String sessionId = getSessionId();
   // 4. HDFS session path
   hdfsSessionPath = new Path(hdfsScratchDirURIString, sessionId);
   createPath(conf, hdfsSessionPath, scratchDirPermission, false, true);
   conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString());
   // 5. Local session path
   localSessionPath =
       new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId);
   createPath(conf, localSessionPath, scratchDirPermission, true, true);
   conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString());
   // 6. HDFS temp table space
   hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX);
   createPath(conf, hdfsTmpTableSpace, scratchDirPermission, false, true);
   conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString());
 }
 public ThriftHttpServlet(
     TProcessor processor,
     TProtocolFactory protocolFactory,
     String authType,
     UserGroupInformation serviceUGI,
     UserGroupInformation httpUGI,
     HiveAuthFactory hiveAuthFactory) {
   super(processor, protocolFactory);
   this.authType = authType;
   this.serviceUGI = serviceUGI;
   this.httpUGI = httpUGI;
   this.hiveAuthFactory = hiveAuthFactory;
   this.isCookieAuthEnabled =
       hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED);
   // Initialize the cookie based authentication related variables.
   if (isCookieAuthEnabled) {
     // Generate the signer with secret.
     String secret = Long.toString(RAN.nextLong());
     LOG.debug("Using the random number as the secret for cookie generation " + secret);
     this.signer = new CookieSigner(secret.getBytes());
     this.cookieMaxAge =
         (int)
             hiveConf.getTimeVar(
                 ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE, TimeUnit.SECONDS);
     this.cookieDomain = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN);
     this.cookiePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH);
     // always send secure cookies for SSL mode
     this.isCookieSecure = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL);
     this.isHttpOnlyCookie =
         hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY);
   }
 }
 /**
  * Gets the temporary directory of the given job
  *
  * @param conf
  * @param isLocal true to resolve local temporary directory
  * @return
  */
 public static String getJobTmpDir(Configuration conf, boolean isLocal) {
   String fsName = HiveConf.getVar(conf, ConfVars.HADOOPFS);
   if (fsName.endsWith("/")) {
     fsName = fsName.substring(0, fsName.length() - 1);
   }
   return fsName
       + HiveConf.getVar(conf, (isLocal ? ConfVars.LOCALSCRATCHDIR : ConfVars.SCRATCHDIR), "");
 }
 // Perform kerberos login using the hadoop shim API if the configuration is available
 public static void loginFromKeytab(HiveConf hiveConf) throws IOException {
   String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL);
   String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
   if (principal.isEmpty() || keyTabFile.isEmpty()) {
     throw new IOException("HiveServer2 Kerberos principal or keytab is not correctly configured");
   } else {
     ShimLoader.getHadoopShims().loginUserFromKeytab(principal, keyTabFile);
   }
 }
 // Perform SPNEGO login using the hadoop shim API if the configuration is available
 public static UserGroupInformation loginFromSpnegoKeytabAndReturnUGI(HiveConf hiveConf)
     throws IOException {
   String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL);
   String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB);
   if (principal.isEmpty() || keyTabFile.isEmpty()) {
     throw new IOException("HiveServer2 SPNEGO principal or keytab is not correctly configured");
   } else {
     return ShimLoader.getHadoopShims().loginUserFromKeytabAndReturnUGI(principal, keyTabFile);
   }
 }
Beispiel #9
0
  /**
   * Create a Context with a given executionId. ExecutionId, together with user name and conf, will
   * determine the temporary directory locations.
   */
  public Context(Configuration conf, String executionId) {
    this.conf = conf;
    this.executionId = executionId;

    // local & non-local tmp location is configurable. however it is the same across
    // all external file systems
    nonLocalScratchPath =
        new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), executionId);
    localScratchDir =
        new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), executionId)
            .toUri()
            .getPath();
  }
Beispiel #10
0
 /*
  * Helper to determine what java options to use for the containers
  * Falls back to Map-reduces map java opts if no tez specific options
  * are set
  */
 private String getContainerJavaOpts(Configuration conf) {
   String javaOpts = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZJAVAOPTS);
   if (javaOpts != null && !javaOpts.isEmpty()) {
     String logLevel = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZLOGLEVEL);
     List<String> logProps = Lists.newArrayList();
     MRHelpers.addLog4jSystemProperties(logLevel, logProps);
     StringBuilder sb = new StringBuilder();
     for (String str : logProps) {
       sb.append(str).append(" ");
     }
     return javaOpts + " " + sb.toString();
   }
   return MRHelpers.getMapJavaOpts(conf);
 }
  private void initOperationLogRootDir() {
    operationLogRootDir =
        new File(hiveConf.getVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION));
    isOperationLogEnabled = true;

    if (operationLogRootDir.exists() && !operationLogRootDir.isDirectory()) {
      LOG.warn(
          "The operation log root directory exists, but it is not a directory: "
              + operationLogRootDir.getAbsolutePath());
      isOperationLogEnabled = false;
    }

    if (!operationLogRootDir.exists()) {
      if (!operationLogRootDir.mkdirs()) {
        LOG.warn(
            "Unable to create operation log root directory: "
                + operationLogRootDir.getAbsolutePath());
        isOperationLogEnabled = false;
      }
    }

    if (isOperationLogEnabled) {
      LOG.info("Operation log root directory is created: " + operationLogRootDir.getAbsolutePath());
      try {
        FileUtils.forceDeleteOnExit(operationLogRootDir);
      } catch (IOException e) {
        LOG.warn(
            "Failed to schedule cleanup HS2 operation logging root dir: "
                + operationLogRootDir.getAbsolutePath(),
            e);
      }
    }
  }
 public Map<String, String> getSaslProperties() {
   Map<String, String> saslProps = new HashMap<String, String>();
   SaslQOP saslQOP = SaslQOP.fromString(conf.getVar(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP));
   saslProps.put(Sasl.QOP, saslQOP.toString());
   saslProps.put(Sasl.SERVER_AUTH, "true");
   return saslProps;
 }
Beispiel #13
0
 private boolean inputFileChanged() {
   String currentInputFile = HiveConf.getVar(jc, HiveConf.ConfVars.HADOOPMAPFILENAME);
   if (this.lastInputFile == null || !this.lastInputFile.equals(currentInputFile)) {
     return true;
   }
   return false;
 }
Beispiel #14
0
 /**
  * Fatal errors are those errors that cannot be recovered by retries. These are application
  * dependent. Examples of fatal errors include: - the small table in the map-side joins is too
  * large to be feasible to be handled by one mapper. The job should fail and the user should be
  * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters
  * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value
  * of the counter indicates the error type.
  *
  * @return true if fatal errors happened during job execution, false otherwise.
  */
 @Override
 public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
   Counters.Counter cntr =
       ctrs.findCounter(
           HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), Operator.HIVECOUNTERFATAL);
   return cntr != null && cntr.getValue() > 0;
 }
Beispiel #15
0
  /**
   * Localizes files, archives and jars the user has instructed us to provide on the cluster as
   * resources for execution.
   *
   * @param conf
   * @return List<LocalResource> local resources to add to execution
   * @throws IOException when hdfs operation fails
   * @throws LoginException when getDefaultDestDir fails with the same exception
   */
  public List<LocalResource> localizeTempFilesFromConf(String hdfsDirPathStr, Configuration conf)
      throws IOException, LoginException {
    List<LocalResource> tmpResources = new ArrayList<LocalResource>();

    String addedFiles = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE);
    if (StringUtils.isNotBlank(addedFiles)) {
      HiveConf.setVar(conf, ConfVars.HIVEADDEDFILES, addedFiles);
    }
    String addedJars = Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR);
    if (StringUtils.isNotBlank(addedJars)) {
      HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars);
    }
    String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE);
    if (StringUtils.isNotBlank(addedArchives)) {
      HiveConf.setVar(conf, ConfVars.HIVEADDEDARCHIVES, addedArchives);
    }

    String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);

    // need to localize the additional jars and files
    // we need the directory on hdfs to which we shall put all these files
    String allFiles = auxJars + "," + addedJars + "," + addedFiles + "," + addedArchives;
    addTempFiles(conf, tmpResources, hdfsDirPathStr, allFiles.split(","));
    return tmpResources;
  }
Beispiel #16
0
  @Override
  public synchronized void init(HiveConf hiveConf) {
    this.hiveConf = hiveConf;
    sessionManager = new SessionManager(hiveServer2);
    defaultFetchRows = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_RESULTSET_DEFAULT_FETCH_SIZE);
    addService(sessionManager);
    //  If the hadoop cluster is secure, do a kerberos login for the service from the keytab
    if (UserGroupInformation.isSecurityEnabled()) {
      try {
        HiveAuthFactory.loginFromKeytab(hiveConf);
        this.serviceUGI = Utils.getUGI();
      } catch (IOException e) {
        throw new ServiceException("Unable to login to kerberos with given principal/keytab", e);
      } catch (LoginException e) {
        throw new ServiceException("Unable to login to kerberos with given principal/keytab", e);
      }

      // Also try creating a UGI object for the SPNego principal
      String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL);
      String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB);
      if (principal.isEmpty() || keyTabFile.isEmpty()) {
        LOG.info(
            "SPNego httpUGI not created, spNegoPrincipal: "
                + principal
                + ", ketabFile: "
                + keyTabFile);
      } else {
        try {
          this.httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf);
          LOG.info("SPNego httpUGI successfully created.");
        } catch (IOException e) {
          LOG.warn("SPNego httpUGI creation failed: ", e);
        }
      }
    }
    // creates connection to HMS and thus *must* occur after kerberos login above
    try {
      applyAuthorizationConfigPolicy(hiveConf);
    } catch (Exception e) {
      throw new RuntimeException(
          "Error applying authorization policy on hive configuration: " + e.getMessage(), e);
    }
    setupBlockedUdfs();
    super.init(hiveConf);
  }
Beispiel #17
0
  /**
   * set current session to existing session object if a thread is running multiple sessions - it
   * must call this method with the new session object when switching from one session to another.
   */
  public static SessionState start(SessionState startSs) {
    setCurrentSessionState(startSs);

    if (startSs.hiveHist == null) {
      if (startSs.getConf().getBoolVar(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED)) {
        startSs.hiveHist = new HiveHistoryImpl(startSs);
      } else {
        // Hive history is disabled, create a no-op proxy
        startSs.hiveHist = HiveHistoryProxyHandler.getNoOpHiveHistoryProxy();
      }
    }

    // Get the following out of the way when you start the session these take a
    // while and should be done when we start up.
    try {
      // Hive object instance should be created with a copy of the conf object. If the conf is
      // shared with SessionState, other parts of the code might update the config, but
      // Hive.get(HiveConf) would not recognize the case when it needs refreshing
      Hive.get(new HiveConf(startSs.conf)).getMSC();
      UserGroupInformation sessionUGI = Utils.getUGI();
      FileSystem.get(startSs.conf);

      // Create scratch dirs for this session
      startSs.createSessionDirs(sessionUGI.getShortUserName());

      // Set temp file containing results to be sent to HiveClient
      if (startSs.getTmpOutputFile() == null) {
        try {
          startSs.setTmpOutputFile(createTempFile(startSs.getConf()));
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }

    } catch (Exception e) {
      // Catch-all due to some exec time dependencies on session state
      // that would cause ClassNoFoundException otherwise
      throw new RuntimeException(e);
    }

    if (HiveConf.getVar(startSs.getConf(), HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")
        && (startSs.isHiveServerQuery == false)) {
      try {
        if (startSs.tezSessionState == null) {
          startSs.tezSessionState = new TezSessionState(startSs.getSessionId());
        }
        if (!startSs.tezSessionState.isOpen()) {
          startSs.tezSessionState.open(startSs.conf); // should use conf on session start-up
        }
      } catch (Exception e) {
        throw new RuntimeException(e);
      }
    } else {
      LOG.info("No Tez session required at this point. hive.execution.engine=mr.");
    }
    return startSs;
  }
Beispiel #18
0
 public Warehouse(Configuration conf) throws MetaException {
   this.conf = conf;
   whRootString = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE);
   if (StringUtils.isBlank(whRootString)) {
     throw new MetaException(
         HiveConf.ConfVars.METASTOREWAREHOUSE.varname + " is not set in the config or blank");
   }
   multiHdfsInfo = new MultiHdfsInfo(conf);
 }
Beispiel #19
0
  /**
   * @param conf
   * @return per-session temp file
   * @throws IOException
   */
  private static File createTempFile(HiveConf conf) throws IOException {
    String lScratchDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR);

    File tmpDir = new File(lScratchDir);
    String sessionID = conf.getVar(HiveConf.ConfVars.HIVESESSIONID);
    if (!tmpDir.exists()) {
      if (!tmpDir.mkdirs()) {
        // Do another exists to check to handle possible race condition
        // Another thread might have created the dir, if that is why
        // mkdirs returned false, that is fine
        if (!tmpDir.exists()) {
          throw new RuntimeException("Unable to create log directory " + lScratchDir);
        }
      }
    }
    File tmpFile = File.createTempFile(sessionID, ".pipeout", tmpDir);
    tmpFile.deleteOnExit();
    return tmpFile;
  }
 @VisibleForTesting
 protected static AccessURI parseURI(String uri, boolean isLocal) throws SemanticException {
   try {
     HiveConf conf = SessionState.get().getConf();
     String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE);
     return new AccessURI(PathUtils.parseURI(warehouseDir, uri, isLocal));
   } catch (Exception e) {
     throw new SemanticException("Error parsing URI " + uri + ": " + e.getMessage(), e);
   }
 }
Beispiel #21
0
 public void init() throws Exception {
   testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
   String execEngine = conf.get("hive.execution.engine");
   conf.set("hive.execution.engine", "mr");
   SessionState.start(conf);
   conf.set("hive.execution.engine", execEngine);
   db = Hive.get(conf);
   pd = new ParseDriver();
   sem = new SemanticAnalyzer(conf);
 }
Beispiel #22
0
    public static void convertPathsFromWindowsToHdfs(HiveConf conf) {
      // Following local paths are used as HDFS paths in unit tests.
      // It works well in Unix as the path notation in Unix and HDFS is more or less same.
      // But when it comes to Windows, drive letter separator ':' & backslash '\" are invalid
      // characters in HDFS so we need to converts these local paths to HDFS paths before
      // using them
      // in unit tests.

      String orgWarehouseDir = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
      conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, getHdfsUriString(orgWarehouseDir));

      String orgTestTempDir = System.getProperty("test.tmp.dir");
      System.setProperty("test.tmp.dir", getHdfsUriString(orgTestTempDir));

      String orgTestWarehouseDir = System.getProperty("test.warehouse.dir");
      System.setProperty("test.warehouse.dir", getHdfsUriString(orgTestWarehouseDir));

      String orgScratchDir = conf.getVar(HiveConf.ConfVars.SCRATCHDIR);
      conf.setVar(HiveConf.ConfVars.SCRATCHDIR, getHdfsUriString(orgScratchDir));
    }
Beispiel #23
0
    /**
     * Separate from constructor, because initialize() may need to be called in a separate thread.
     */
    synchronized void initialize() {
      assertState(QueryState.CREATED);
      this.hiveConf = new HiveConf(Driver.class);

      // Update configuration with user/group info.
      if (query.hadoop_user == null) {
        throw new RuntimeException("User must be specified.");
      }

      // Update scratch dir (to have one per user)
      File scratchDir = new File("/tmp/hive-beeswax-" + query.hadoop_user);
      hiveConf.set(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.getPath());
      // Create the temporary directory if necessary.
      // If mapred.job.tracker is set to local, this is used by MapRedTask.
      if (!scratchDir.isDirectory()) {
        if (scratchDir.exists() || !scratchDir.mkdirs()) {
          LOG.warn("Could not create tmp dir:" + scratchDir);
        }
      }

      driver = new Driver(hiveConf);
      ClassLoader loader = hiveConf.getClassLoader();
      String auxJars = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVEAUXJARS);
      if (StringUtils.isNotBlank(auxJars)) {
        try {
          loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
        } catch (Exception e) {
          LOG.error("Failed to add jars to class loader: " + auxJars, e);
        }
      }
      hiveConf.setClassLoader(loader);
      Thread.currentThread().setContextClassLoader(loader);
      SessionState.start(hiveConf); // this is thread-local
      this.sessionState = SessionState.get();

      // If this work has a LogContext, associate the children output to the logContext
      OutputStream lcOutStream = null;
      if (this.logContext != null) lcOutStream = this.logContext.getOutputStream();

      // A copy of everything goes to the LogContext.
      // In addition, stderr goes to errStream for error reporting.
      // Note that child output is explicitly tee to System.{out,err},
      // otherwise it'll be swallowed by outStream.
      this.sessionState.out = new PrintStream(new TeeOutputStream(lcOutStream, this.outStream));
      this.sessionState.err = new PrintStream(new TeeOutputStream(lcOutStream, this.errStream));
      this.sessionState.childOut =
          new PrintStream(new TeeOutputStream(System.out, sessionState.out));
      this.sessionState.childErr =
          new PrintStream(new TeeOutputStream(System.err, sessionState.err));

      this.state = QueryState.INITIALIZED;
    }
Beispiel #24
0
 public SessionState(HiveConf conf, String userName) {
   this.conf = conf;
   this.userName = userName;
   isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT);
   ls = new LineageState();
   // Must be deterministic order map for consistent q-test output across Java versions
   overriddenConfigurations = new LinkedHashMap<String, String>();
   overriddenConfigurations.putAll(HiveConf.getConfSystemProperties());
   // if there isn't already a session name, go ahead and create it.
   if (StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HIVESESSIONID))) {
     conf.setVar(HiveConf.ConfVars.HIVESESSIONID, makeSessionId());
   }
   parentLoader = JavaUtils.getClassLoader();
 }
Beispiel #25
0
 /**
  * @param resetPerfLogger
  * @return Tries to return an instance of the class whose name is configured in
  *     hive.exec.perf.logger, but if it can't it just returns an instance of the base PerfLogger
  *     class
  */
 public PerfLogger getPerfLogger(boolean resetPerfLogger) {
   if ((perfLogger == null) || resetPerfLogger) {
     try {
       perfLogger =
           (PerfLogger)
               ReflectionUtils.newInstance(
                   conf.getClassByName(conf.getVar(ConfVars.HIVE_PERF_LOGGER)), conf);
     } catch (ClassNotFoundException e) {
       LOG.error("Performance Logger Class not found:" + e.getMessage());
       perfLogger = new PerfLogger();
     }
   }
   return perfLogger;
 }
Beispiel #26
0
 @Override
 public synchronized void init(HiveConf hiveConf) {
   if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
     initOperationLogCapture(
         hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL));
   } else {
     LOG.debug("Operation level logging is turned off");
   }
   if (hiveConf.isWebUiQueryInfoCacheEnabled()) {
     historicSqlOperations =
         new SQLOperationDisplayCache(
             hiveConf.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES));
   }
   super.init(hiveConf);
 }
Beispiel #27
0
  /** Setup authentication and authorization plugins for this session. */
  private void setupAuth() {

    if (authenticator != null) {
      // auth has been initialized
      return;
    }

    try {
      authenticator =
          HiveUtils.getAuthenticator(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER);
      authenticator.setSessionState(this);

      String clsStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER);
      authorizer = HiveUtils.getAuthorizeProviderManager(conf, clsStr, authenticator, true);

      if (authorizer == null) {
        // if it was null, the new authorization plugin must be specified in
        // config
        HiveAuthorizerFactory authorizerFactory =
            HiveUtils.getAuthorizerFactory(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER);

        HiveAuthzSessionContext.Builder authzContextBuilder = new HiveAuthzSessionContext.Builder();
        authzContextBuilder.setClientType(
            isHiveServerQuery() ? CLIENT_TYPE.HIVESERVER2 : CLIENT_TYPE.HIVECLI);
        authzContextBuilder.setSessionString(getSessionId());

        authorizerV2 =
            authorizerFactory.createHiveAuthorizer(
                new HiveMetastoreClientFactoryImpl(),
                conf,
                authenticator,
                authzContextBuilder.build());

        authorizerV2.applyAuthorizationConfigPolicy(conf);
      }
      // create the create table grants with new config
      createTableGrants = CreateTableAutomaticGrant.create(conf);

    } catch (HiveException e) {
      throw new RuntimeException(e);
    }

    if (LOG.isDebugEnabled()) {
      Object authorizationClass = getActiveAuthorizer();
      LOG.debug("Session is using authorization class " + authorizationClass.getClass());
    }
    return;
  }
Beispiel #28
0
  /*
   * Creates the configuration object necessary to run a specific vertex from
   * map work. This includes input formats, input processor, etc.
   */
  private JobConf initializeVertexConf(JobConf baseConf, MapWork mapWork) {
    JobConf conf = new JobConf(baseConf);

    if (mapWork.getNumMapTasks() != null) {
      conf.setInt(MRJobConfig.NUM_MAPS, mapWork.getNumMapTasks().intValue());
    }

    if (mapWork.getMaxSplitSize() != null) {
      HiveConf.setLongVar(
          conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, mapWork.getMaxSplitSize().longValue());
    }

    if (mapWork.getMinSplitSize() != null) {
      HiveConf.setLongVar(
          conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, mapWork.getMinSplitSize().longValue());
    }

    if (mapWork.getMinSplitSizePerNode() != null) {
      HiveConf.setLongVar(
          conf,
          HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE,
          mapWork.getMinSplitSizePerNode().longValue());
    }

    if (mapWork.getMinSplitSizePerRack() != null) {
      HiveConf.setLongVar(
          conf,
          HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK,
          mapWork.getMinSplitSizePerRack().longValue());
    }

    Utilities.setInputAttributes(conf, mapWork);

    String inpFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT);
    if ((inpFormat == null) || (!StringUtils.isNotBlank(inpFormat))) {
      inpFormat = ShimLoader.getHadoopShims().getInputFormatClassName();
    }

    if (mapWork.isUseBucketizedHiveInputFormat()) {
      inpFormat = BucketizedHiveInputFormat.class.getName();
    }

    conf.set("mapred.mapper.class", ExecMapper.class.getName());
    conf.set("mapred.input.format.class", inpFormat);

    return conf;
  }
Beispiel #29
0
  /** Set hive input format, and input format file if necessary. */
  protected void setInputAttributes(Configuration conf) {
    MapWork mWork = work.getMapWork();
    if (mWork.getInputformat() != null) {
      HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat());
    }
    if (mWork.getIndexIntermediateFile() != null) {
      conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile());
      conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile());
    }

    // Intentionally overwrites anything the user may have put here
    conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted());

    if (HiveConf.getVar(conf, ConfVars.HIVE_CURRENT_DATABASE, null) == null) {
      HiveConf.setVar(conf, ConfVars.HIVE_CURRENT_DATABASE, getCurrentDB());
    }
  }
Beispiel #30
0
  // reloading the jars under the path specified in hive.reloadable.aux.jars.path property
  public void reloadAuxJars() throws IOException {
    final Set<String> reloadedAuxJars = new HashSet<String>();

    final String renewableJarPath = conf.getVar(ConfVars.HIVERELOADABLEJARS);
    // do nothing if this property is not specified or empty
    if (renewableJarPath == null || renewableJarPath.isEmpty()) {
      return;
    }

    Set<String> jarPaths = Utilities.getJarFilesByPath(renewableJarPath);

    // load jars under the hive.reloadable.aux.jars.path
    if (!jarPaths.isEmpty()) {
      reloadedAuxJars.addAll(jarPaths);
    }

    // remove the previous renewable jars
    try {
      if (preReloadableAuxJars != null && !preReloadableAuxJars.isEmpty()) {
        Utilities.removeFromClassPath(preReloadableAuxJars.toArray(new String[0]));
      }
    } catch (Exception e) {
      String msg = "Fail to remove the reloaded jars loaded last time: " + e;
      throw new IOException(msg, e);
    }

    try {
      if (reloadedAuxJars != null && !reloadedAuxJars.isEmpty()) {
        URLClassLoader currentCLoader =
            (URLClassLoader) SessionState.get().getConf().getClassLoader();
        currentCLoader =
            (URLClassLoader)
                Utilities.addToClassPath(currentCLoader, reloadedAuxJars.toArray(new String[0]));
        conf.setClassLoader(currentCLoader);
        Thread.currentThread().setContextClassLoader(currentCLoader);
      }
      preReloadableAuxJars.clear();
      preReloadableAuxJars.addAll(reloadedAuxJars);
    } catch (Exception e) {
      String msg =
          "Fail to add jars from the path specified in hive.reloadable.aux.jars.path property: "
              + e;
      throw new IOException(msg, e);
    }
  }