コード例 #1
0
    /**
     * Executes a query.
     *
     * @param cmd HiveQL query to execute
     */
    public void execute(String cmd) throws HiveServerException, TException {
      HiveServerHandler.LOG.info("Running the query: " + cmd);
      SessionState ss = SessionState.get();

      String cmd_trimmed = cmd.trim();
      String[] tokens = cmd_trimmed.split("\\s");
      String cmd_1 = cmd_trimmed.substring(tokens[0].length()).trim();

      int ret = 0;
      try {
        CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
        if (proc != null) {
          if (proc instanceof Driver) {
            isHiveQuery = true;
            ret = driver.run(cmd);
          } else {
            isHiveQuery = false;
            ret = proc.run(cmd_1);
          }
        }
      } catch (Exception e) {
        throw new HiveServerException("Error running query: " + e.toString());
      }

      if (ret != 0) {
        throw new HiveServerException("Query returned non-zero code: " + ret);
      }
    }
コード例 #2
0
    /** Return the status information about the Map-Reduce cluster */
    public HiveClusterStatus getClusterStatus() throws HiveServerException, TException {
      HiveClusterStatus hcs;
      try {
        ClusterStatus cs = driver.getClusterStatus();
        JobTracker.State jbs = cs.getJobTrackerState();

        // Convert the ClusterStatus to its Thrift equivalent: HiveClusterStatus
        int state;
        switch (jbs) {
          case INITIALIZING:
            state = JobTrackerState.INITIALIZING;
            break;
          case RUNNING:
            state = JobTrackerState.RUNNING;
            break;
          default:
            String errorMsg = "Unrecognized JobTracker state: " + jbs.toString();
            throw new Exception(errorMsg);
        }

        hcs =
            new HiveClusterStatus(
                cs.getTaskTrackers(),
                cs.getMapTasks(),
                cs.getReduceTasks(),
                cs.getMaxMapTasks(),
                cs.getMaxReduceTasks(),
                state);
      } catch (Exception e) {
        LOG.error(e.toString());
        e.printStackTrace();
        throw new HiveServerException("Unable to get cluster status: " + e.toString());
      }
      return hcs;
    }
コード例 #3
0
    /** Return the Thrift schema of the query result */
    public Schema getThriftSchema() throws HiveServerException, TException {
      if (!isHiveQuery)
        // Return empty schema if the last command was not a Hive query
        return new Schema();

      try {
        Schema schema = driver.getThriftSchema();
        if (schema == null) {
          schema = new Schema();
        }
        LOG.info("Returning schema: " + schema);
        return schema;
      } catch (Exception e) {
        LOG.error(e.toString());
        e.printStackTrace();
        throw new HiveServerException("Unable to get schema: " + e.toString());
      }
    }
コード例 #4
0
 public static void main(String[] args) {
   try {
     int port = 10000;
     if (args.length >= 1) {
       port = Integer.parseInt(args[0]);
     }
     TServerTransport serverTransport = new TServerSocket(port);
     ThriftHiveProcessorFactory hfactory = new ThriftHiveProcessorFactory(null);
     TThreadPoolServer.Options options = new TThreadPoolServer.Options();
     TServer server =
         new TThreadPoolServer(
             hfactory,
             serverTransport,
             new TTransportFactory(),
             new TTransportFactory(),
             new TBinaryProtocol.Factory(),
             new TBinaryProtocol.Factory(),
             options);
     HiveServerHandler.LOG.info("Starting hive server on port " + port);
     server.serve();
   } catch (Exception x) {
     x.printStackTrace();
   }
 }