예제 #1
0
  @Override
  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
    validateDefaultFetchOrientation(orientation);
    assertState(OperationState.FINISHED);

    RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion());

    try {
      /* if client is requesting fetch-from-start and its not the first time reading from this operation
       * then reset the fetch position to beginning
       */
      if (orientation.equals(FetchOrientation.FETCH_FIRST) && fetchStarted) {
        driver.resetFetch();
      }
      fetchStarted = true;
      driver.setMaxRows((int) maxRows);
      if (driver.getResults(convey)) {
        return decode(convey, rowSet);
      }
      return rowSet;
    } catch (IOException e) {
      throw new HiveSQLException(e);
    } catch (CommandNeedRetryException e) {
      throw new HiveSQLException(e);
    } catch (Exception e) {
      throw new HiveSQLException(e);
    } finally {
      convey.clear();
    }
  }
예제 #2
0
    public synchronized QueryExplanation explain() throws BeeswaxException {
      assertState(QueryState.INITIALIZED);
      // By manipulating the query, this will make errors harder to find.
      query.query = "EXPLAIN " + query.query;
      checkedCompile();

      int ret;
      if (0 != (ret = driver.execute())) {
        throwException(new RuntimeException("Failed to execute: EXPLAIN " + ret));
      }
      StringBuilder sb = new StringBuilder();
      ArrayList<String> v = new ArrayList<String>();
      try {
        while (driver.getResults(v)) {
          for (String s : v) {
            sb.append(s);
            sb.append("\n");
          }
          v.clear();
        }
      } catch (IOException e) {
        throwException(new RuntimeException(e));
      } finally {
        // Don't let folks re-use the state object.
        state = QueryState.FINISHED;
      }
      return new QueryExplanation(sb.toString());
    }
  @Test
  public void testExternalTable() throws Exception {
    String tableName = "externaltable";

    String myLocation = warehouseDir + "/myfolder";
    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
    fs.mkdirs(new Path(myLocation));
    setPermission(myLocation);

    CommandProcessorResponse ret =
        driver.run(
            "CREATE TABLE "
                + tableName
                + " (key string, value string) LOCATION '"
                + myLocation
                + "'");
    Assert.assertEquals(0, ret.getResponseCode());

    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    Assert.assertTrue(listStatus(myLocation).size() > 0);
    for (String child : listStatus(myLocation)) {
      verifyPermission(child);
    }
  }
  @Test
  public void testInsertDualDynamicPartitions() throws Exception {
    String tableName = "dualdynamicpart";

    CommandProcessorResponse ret =
        driver.run(
            "CREATE TABLE "
                + tableName
                + " (key string, value string) partitioned by (part1 string, part2 string)");
    Assert.assertEquals(0, ret.getResponseCode());
    assertExistence(warehouseDir + "/" + tableName);

    // Insert into test, with permission set 0.
    setPermission(warehouseDir + "/" + tableName, 0);
    ret =
        driver.run(
            "insert into table "
                + tableName
                + " partition (part1,part2) select key,value,part1,part2 from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyDualPartitionTable(warehouseDir + "/" + tableName, 0);

    // Insert overwrite test, with permission set 1.
    setPermission(warehouseDir + "/" + tableName, 1);
    ret =
        driver.run(
            "insert overwrite table "
                + tableName
                + " partition (part1,part2) select key,value,part1,part2 from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyDualPartitionTable(warehouseDir + "/" + tableName, 1);
  }
예제 #5
0
 private void runQuery(HiveConf sqlOperationConf) throws HiveSQLException {
   try {
     // In Hive server mode, we are not able to retry in the FetchTask
     // case, when calling fetch queries since execute() has returned.
     // For now, we disable the test attempts.
     driver.setTryCount(Integer.MAX_VALUE);
     response = driver.run();
     if (0 != response.getResponseCode()) {
       throw toSQLException("Error while processing statement", response);
     }
   } catch (HiveSQLException e) {
     // If the operation was cancelled by another thread,
     // Driver#run will return a non-zero response code.
     // We will simply return if the operation state is CANCELED,
     // otherwise throw an exception
     if (getStatus().getState() == OperationState.CANCELED) {
       return;
     } else {
       setState(OperationState.ERROR);
       throw e;
     }
   } catch (Throwable e) {
     setState(OperationState.ERROR);
     throw new HiveSQLException("Error running query: " + e.toString(), e);
   }
   setState(OperationState.FINISHED);
 }
예제 #6
0
    private void materializeResults(Results r, boolean startOver) throws IOException {
      if (driver.getPlan().getFetchTask() == null) {
        // This query is never going to return anything.
        r.has_more = false;
        r.setData(Collections.<String>emptyList());
        r.setColumns(Collections.<String>emptyList());
        return;
      }

      if (startOver) {
        // This is totally inappropriately reaching into internals.
        driver.getPlan().getFetchTask().initialize(hiveConf, driver.getPlan(), null);
        startRow = 0;
      }

      ArrayList<String> v = new ArrayList<String>();
      r.setData(v);
      r.has_more = driver.getResults(v);
      r.start_row = startRow;
      startRow += v.size();

      r.setColumns(new ArrayList<String>());
      try {
        for (FieldSchema f : driver.getSchema().getFieldSchemas()) {
          r.addToColumns(f.getName());
        }
      } catch (Exception e) {
        // An empty partitioned table may not have table description
        LOG.error("Error getting column names of results.", e);
      }
    }
  @Test
  public void testInsertNonPartTable() throws Exception {
    // case 1 is non-partitioned table.
    String tableName = "nonpart";

    CommandProcessorResponse ret =
        driver.run("CREATE TABLE " + tableName + " (key string, value string)");
    Assert.assertEquals(0, ret.getResponseCode());

    String tableLoc = warehouseDir + "/" + tableName;
    assertExistence(warehouseDir + "/" + tableName);

    // case1A: insert into non-partitioned table.
    setPermission(warehouseDir + "/" + tableName);
    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + tableName);
    Assert.assertTrue(listStatus(tableLoc).size() > 0);
    for (String child : listStatus(tableLoc)) {
      verifyPermission(child);
    }

    // case1B: insert overwrite non-partitioned-table
    setPermission(warehouseDir + "/" + tableName, 1);
    ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + tableName, 1);
    Assert.assertTrue(listStatus(tableLoc).size() > 0);
    for (String child : listStatus(tableLoc)) {
      verifyPermission(child, 1);
    }
  }
  @Test
  public void testCtas() throws Exception {
    String testDb = "ctasdb";
    String tableName = "createtable";
    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + testDb + ".db");
    setPermission(warehouseDir + "/" + testDb + ".db");
    verifyPermission(warehouseDir + "/" + testDb + ".db");

    ret = driver.run("USE " + testDb);
    Assert.assertEquals(0, ret.getResponseCode());

    ret = driver.run("create table " + tableName + " as select key,value from default.mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);

    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
      verifyPermission(child);
    }

    ret = driver.run("USE default");
    Assert.assertEquals(0, ret.getResponseCode());
  }
예제 #9
0
  public static CommandProcessor get(String cmd, HiveConf conf) {
    String cmdl = cmd.toLowerCase();

    if ("set".equals(cmdl)) {
      return new SetProcessor();
    } else if ("dfs".equals(cmdl)) {
      SessionState ss = SessionState.get();
      return new DfsProcessor(ss.getConf());
    } else if ("add".equals(cmdl)) {
      return new AddResourceProcessor();
    } else if ("delete".equals(cmdl)) {
      return new DeleteResourceProcessor();
    } else if (!isBlank(cmd)) {
      if (conf == null) {
        return new Driver();
      }

      Driver drv = mapDrivers.get(conf);
      if (drv == null) {
        drv = new Driver();
        mapDrivers.put(conf, drv);
      }
      drv.init();
      return drv;
    }

    return null;
  }
예제 #10
0
  public static void clean(HiveConf conf) {
    Driver drv = mapDrivers.get(conf);
    if (drv != null) {
      drv.destroy();
    }

    mapDrivers.remove(conf);
  }
예제 #11
0
  @Override
  public void close() throws HiveSQLException {
    setState(OperationState.CLOSED);
    if (driver != null) {
      driver.close();
      driver.destroy();
    }

    SessionState session = SessionState.get();
    if (session.getTmpOutputFile() != null) {
      session.getTmpOutputFile().delete();
    }
  }
  @Test
  public void testInsertStaticDualPartition() throws Exception {
    String tableName = "dualstaticpart";
    CommandProcessorResponse ret =
        driver.run(
            "CREATE TABLE "
                + tableName
                + " (key string, value string) partitioned by (part1 string, part2 string)");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + tableName);
    setPermission(warehouseDir + "/" + tableName);

    // insert into test
    ret =
        driver.run(
            "insert into table "
                + tableName
                + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + tableName);
    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");

    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
      verifyPermission(child);
    }

    // insert overwrite test
    setPermission(warehouseDir + "/" + tableName, 1);
    ret =
        driver.run(
            "insert overwrite table "
                + tableName
                + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + tableName, 1);
    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1);

    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
      verifyPermission(child, 1);
    }
  }
예제 #13
0
    private void checkedCompile() throws BeeswaxException {
      // Run through configuration commands
      for (String cmd : query.configuration) {
        // This is pretty whacky; SET and ADD get treated differently
        // than CREATE TEMPORARY FUNCTION...  The trimming logic
        // here is borrowed from CliDriver; oy.
        String cmd_trimmed = cmd.trim();
        String[] tokens = cmd_trimmed.split("\\s+");
        String cmd1 = cmd_trimmed.substring(tokens[0].length()).trim();
        CommandProcessor p = CommandProcessorFactory.get(tokens[0]);
        int res;
        if (p instanceof Driver) {
          res = p.run(cmd).getResponseCode();
        } else {
          res = p.run(cmd1).getResponseCode();
        }
        if (res != 0) {
          throwException(new RuntimeException(getErrorStreamAsString()));
        }
      }

      // Note that driver.compile() talks to HDFS, so it's
      // not as quick as one might think.
      int compileRes = driver.compile(query.query);
      if (compileRes != 0) {
        throwException(
            new BeeswaxException(getErrorStreamAsString(), this.logContext.getName(), this.handle));
      }
    }
예제 #14
0
    /**
     * Executes a query.
     *
     * @param cmd HiveQL query to execute
     */
    public void execute(String cmd) throws HiveServerException, TException {
      HiveServerHandler.LOG.info("Running the query: " + cmd);
      SessionState ss = SessionState.get();

      String cmd_trimmed = cmd.trim();
      String[] tokens = cmd_trimmed.split("\\s");
      String cmd_1 = cmd_trimmed.substring(tokens[0].length()).trim();

      int ret = 0;
      try {
        CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
        if (proc != null) {
          if (proc instanceof Driver) {
            isHiveQuery = true;
            ret = driver.run(cmd);
          } else {
            isHiveQuery = false;
            ret = proc.run(cmd_1);
          }
        }
      } catch (Exception e) {
        throw new HiveServerException("Error running query: " + e.toString());
      }

      if (ret != 0) {
        throw new HiveServerException("Query returned non-zero code: " + ret);
      }
    }
예제 #15
0
    /** Return the status information about the Map-Reduce cluster */
    public HiveClusterStatus getClusterStatus() throws HiveServerException, TException {
      HiveClusterStatus hcs;
      try {
        ClusterStatus cs = driver.getClusterStatus();
        JobTracker.State jbs = cs.getJobTrackerState();

        // Convert the ClusterStatus to its Thrift equivalent: HiveClusterStatus
        int state;
        switch (jbs) {
          case INITIALIZING:
            state = JobTrackerState.INITIALIZING;
            break;
          case RUNNING:
            state = JobTrackerState.RUNNING;
            break;
          default:
            String errorMsg = "Unrecognized JobTracker state: " + jbs.toString();
            throw new Exception(errorMsg);
        }

        hcs =
            new HiveClusterStatus(
                cs.getTaskTrackers(),
                cs.getMapTasks(),
                cs.getReduceTasks(),
                cs.getMaxMapTasks(),
                cs.getMaxReduceTasks(),
                state);
      } catch (Exception e) {
        LOG.error(e.toString());
        e.printStackTrace();
        throw new HiveServerException("Unable to get cluster status: " + e.toString());
      }
      return hcs;
    }
예제 #16
0
    /**
     * Fetches numRows rows.
     *
     * @param numRows Number of rows to fetch.
     * @return A list of rows. The size of the list is numRows if there are at least numRows rows
     *     available to return. The size is smaller than numRows if there aren't enough rows. The
     *     list will be empty if there is no more row to fetch or numRows == 0.
     * @throws HiveServerException Invalid value for numRows (numRows < 0)
     */
    public List<String> fetchN(int numRows) throws HiveServerException, TException {
      if (numRows < 0) {
        throw new HiveServerException("Invalid argument for number of rows: " + numRows);
      }
      if (!isHiveQuery)
        // Return no results if the last command was not a Hive query
        return new Vector<String>();

      Vector<String> result = new Vector<String>();
      driver.setMaxRows(numRows);
      try {
        driver.getResults(result);
      } catch (IOException e) {
        throw new HiveServerException(e.getMessage());
      }
      return result;
    }
예제 #17
0
  @Test
  public void testMethodCounts() throws Exception {
    driver.run("show databases");
    String json = metrics.dumpJson();

    // one call by init, one called here.
    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.TIMER, "api_get_all_databases", 2);
  }
예제 #18
0
 @Override
 public TableSchema getResultSetSchema() throws HiveSQLException {
   assertState(OperationState.FINISHED);
   if (resultSchema == null) {
     resultSchema = new TableSchema(driver.getSchema());
   }
   return resultSchema;
 }
예제 #19
0
  @Override
  public void run() throws HiveSQLException {
    setState(OperationState.RUNNING);
    String statement_trimmed = statement.trim();
    String[] tokens = statement_trimmed.split("\\s");
    String cmd_1 = statement_trimmed.substring(tokens[0].length()).trim();

    int ret = 0;
    String errorMessage = "";
    String SQLState = null;

    try {
      driver = new Driver(getParentSession().getHiveConf());
      // In Hive server mode, we are not able to retry in the FetchTask
      // case, when calling fetch queries since execute() has returned.
      // For now, we disable the test attempts.
      driver.setTryCount(Integer.MAX_VALUE);

      String subStatement =
          new VariableSubstitution().substitute(getParentSession().getHiveConf(), statement);

      response = driver.run(subStatement);
      if (0 != response.getResponseCode()) {
        throw new HiveSQLException(
            "Error while processing statement: " + response.getErrorMessage(),
            response.getSQLState(),
            response.getResponseCode());
      }

      mResultSchema = driver.getSchema();
      if (mResultSchema != null && mResultSchema.isSetFieldSchemas()) {
        resultSchema = new TableSchema(mResultSchema);
        setHasResultSet(true);
      } else {
        setHasResultSet(false);
      }
    } catch (HiveSQLException e) {
      setState(OperationState.ERROR);
      throw e;
    } catch (Exception e) {
      setState(OperationState.ERROR);
      throw new HiveSQLException("Error running query: " + e.toString());
    }
    setState(OperationState.FINISHED);
  }
예제 #20
0
  private void cleanup(OperationState state) throws HiveSQLException {
    setState(state);
    if (shouldRunAsync()) {
      Future<?> backgroundHandle = getBackgroundHandle();
      if (backgroundHandle != null) {
        backgroundHandle.cancel(true);
      }
    }
    if (driver != null) {
      driver.close();
      driver.destroy();
    }
    driver = null;

    SessionState ss = SessionState.get();
    ss.deleteTmpOutputFile();
    ss.deleteTmpErrOutputFile();
  }
예제 #21
0
    /**
     * Fetches the next row in a query result set.
     *
     * @return the next row in a query result set. null if there is no more row to fetch.
     */
    public String fetchOne() throws HiveServerException, TException {
      if (!isHiveQuery)
        // Return no results if the last command was not a Hive query
        return "";

      Vector<String> result = new Vector<String>();
      driver.setMaxRows(1);
      try {
        if (driver.getResults(result)) {
          return result.get(0);
        }
        // TODO: Cannot return null here because thrift cannot handle nulls
        // TODO: Returning empty string for now. Need to figure out how to
        // TODO: return null in some other way
        return "";
      } catch (IOException e) {
        throw new HiveServerException(e.getMessage());
      }
    }
  @Test
  public void testCreateDb() throws Exception {
    // see if db inherits permission from warehouse directory.
    String testDb = "mydb";
    String tableName = "createtable";

    setPermission(warehouseDir.toString());
    verifyPermission(warehouseDir.toString());

    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + testDb + ".db");
    verifyPermission(warehouseDir + "/" + testDb + ".db");

    ret = driver.run("USE " + testDb);
    Assert.assertEquals(0, ret.getResponseCode());

    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);

    ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);

    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
      verifyPermission(child);
    }

    ret = driver.run("USE default");
    Assert.assertEquals(0, ret.getResponseCode());

    // cleanup after the test.
    fs.delete(warehouseDir, true);
    fs.mkdirs(warehouseDir);
    Assert.assertEquals(listStatus(warehouseDir.toString()).size(), 0);
    setupDataTable();
  }
  @Test
  public void testAlterPartition() throws Exception {
    String tableName = "alterpart";
    CommandProcessorResponse ret =
        driver.run(
            "CREATE TABLE "
                + tableName
                + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + tableName);
    setPermission(warehouseDir + "/" + tableName);

    ret =
        driver.run(
            "insert into table "
                + tableName
                + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + tableName);
    setPermission(warehouseDir + "/" + tableName, 1);

    // alter partition
    ret =
        driver.run(
            "alter table "
                + tableName
                + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1);
    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1);
    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1);

    Assert.assertTrue(
        listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
      verifyPermission(child, 1);
    }
  }
  /**
   * Tests the permission to the table doesn't change after the truncation
   *
   * @throws Exception
   */
  @Test
  public void testTruncateTable() throws Exception {
    String tableName = "truncatetable";
    String partition = warehouseDir + "/" + tableName + "/part1=1";

    CommandProcessorResponse ret =
        driver.run(
            "CREATE TABLE " + tableName + " (key STRING, value STRING) PARTITIONED BY (part1 INT)");
    Assert.assertEquals(0, ret.getResponseCode());

    setPermission(warehouseDir + "/" + tableName);

    ret =
        driver.run(
            "insert into table "
                + tableName
                + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + tableName);

    verifyPermission(warehouseDir + "/" + tableName);
    verifyPermission(partition);

    ret = driver.run("TRUNCATE TABLE " + tableName);
    Assert.assertEquals(0, ret.getResponseCode());

    ret =
        driver.run(
            "insert into table "
                + tableName
                + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
    Assert.assertEquals(0, ret.getResponseCode());

    verifyPermission(warehouseDir + "/" + tableName);

    assertExistence(partition);
    verifyPermission(partition);
  }
예제 #25
0
  /**
   * * Compile the query and extract metadata
   *
   * @param sqlOperationConf
   * @throws HiveSQLException
   */
  public void prepare(HiveConf sqlOperationConf) throws HiveSQLException {
    setState(OperationState.RUNNING);

    try {
      driver = new Driver(sqlOperationConf, getParentSession().getUserName());

      // set the operation handle information in Driver, so that thrift API users
      // can use the operation handle they receive, to lookup query information in
      // Yarn ATS
      String guid64 =
          Base64.encodeBase64URLSafeString(
                  getHandle().getHandleIdentifier().toTHandleIdentifier().getGuid())
              .trim();
      driver.setOperationId(guid64);

      // In Hive server mode, we are not able to retry in the FetchTask
      // case, when calling fetch queries since execute() has returned.
      // For now, we disable the test attempts.
      driver.setTryCount(Integer.MAX_VALUE);

      response = driver.compileAndRespond(statement);
      if (0 != response.getResponseCode()) {
        throw toSQLException("Error while compiling statement", response);
      }

      mResultSchema = driver.getSchema();

      // hasResultSet should be true only if the query has a FetchTask
      // "explain" is an exception for now
      if (driver.getPlan().getFetchTask() != null) {
        // Schema has to be set
        if (mResultSchema == null || !mResultSchema.isSetFieldSchemas()) {
          throw new HiveSQLException(
              "Error compiling query: Schema and FieldSchema "
                  + "should be set when query plan has a FetchTask");
        }
        resultSchema = new TableSchema(mResultSchema);
        setHasResultSet(true);
      } else {
        setHasResultSet(false);
      }
      // Set hasResultSet true if the plan has ExplainTask
      // TODO explain should use a FetchTask for reading
      for (Task<? extends Serializable> task : driver.getPlan().getRootTasks()) {
        if (task.getClass() == ExplainTask.class) {
          resultSchema = new TableSchema(mResultSchema);
          setHasResultSet(true);
          break;
        }
      }
    } catch (HiveSQLException e) {
      setState(OperationState.ERROR);
      throw e;
    } catch (Throwable e) {
      setState(OperationState.ERROR);
      throw new HiveSQLException("Error running query: " + e.toString(), e);
    }
  }
  private static void setupDataTable() throws Exception {
    CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    ret =
        driver.run(
            "CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE");
    Assert.assertEquals(0, ret.getResponseCode());

    ret =
        driver.run(
            "LOAD DATA LOCAL INPATH '"
                + dataFilePath
                + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')");
    Assert.assertEquals(0, ret.getResponseCode());

    ret =
        driver.run(
            "LOAD DATA LOCAL INPATH '"
                + dataFilePath
                + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')");
    Assert.assertEquals(0, ret.getResponseCode());
  }
예제 #27
0
  @Override
  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
    assertState(OperationState.FINISHED);
    ArrayList<String> rows = new ArrayList<String>();
    driver.setMaxRows((int) maxRows);

    try {
      driver.getResults(rows);

      getSerDe();
      StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector();
      List<? extends StructField> fieldRefs = soi.getAllStructFieldRefs();
      RowSet rowSet = new RowSet();

      Object[] deserializedFields = new Object[fieldRefs.size()];
      Object rowObj;
      ObjectInspector fieldOI;

      for (String rowString : rows) {
        rowObj = serde.deserialize(new BytesWritable(rowString.getBytes()));
        for (int i = 0; i < fieldRefs.size(); i++) {
          StructField fieldRef = fieldRefs.get(i);
          fieldOI = fieldRef.getFieldObjectInspector();
          deserializedFields[i] =
              convertLazyToJava(soi.getStructFieldData(rowObj, fieldRef), fieldOI);
        }
        rowSet.addRow(resultSchema, deserializedFields);
      }
      return rowSet;
    } catch (IOException e) {
      throw new HiveSQLException(e);
    } catch (CommandNeedRetryException e) {
      throw new HiveSQLException(e);
    } catch (Exception e) {
      throw new HiveSQLException(e);
    }
  }
  @Test
  public void testInsertSingleDynamicPartition() throws Exception {
    String tableName = "singledynamicpart";

    CommandProcessorResponse ret =
        driver.run(
            "CREATE TABLE "
                + tableName
                + " (key string, value string) partitioned by (part1 string)");
    Assert.assertEquals(0, ret.getResponseCode());
    String tableLoc = warehouseDir + "/" + tableName;
    assertExistence(tableLoc);

    // Insert into test, with permission set 0.
    setPermission(tableLoc, 0);
    ret =
        driver.run(
            "insert into table "
                + tableName
                + " partition (part1) select key,value,part1 from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());
    verifySinglePartition(tableLoc, 0);

    // Insert overwrite test, with permission set 1.
    setPermission(tableLoc, 1);
    ret =
        driver.run(
            "insert overwrite table "
                + tableName
                + " partition (part1) select key,value,part1 from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());
    verifySinglePartition(tableLoc, 1);

    // delete and re-insert using insert overwrite.  There's different code paths insert vs insert
    // overwrite for new tables.
    ret = driver.run("DROP TABLE " + tableName);
    Assert.assertEquals(0, ret.getResponseCode());
    ret =
        driver.run(
            "CREATE TABLE "
                + tableName
                + " (key string, value string) partitioned by (part1 string)");
    Assert.assertEquals(0, ret.getResponseCode());

    assertExistence(warehouseDir + "/" + tableName);
    setPermission(warehouseDir + "/" + tableName);

    ret =
        driver.run(
            "insert overwrite table "
                + tableName
                + " partition (part1) select key,value,part1 from mysrc");
    Assert.assertEquals(0, ret.getResponseCode());

    verifySinglePartition(tableLoc, 0);
  }
예제 #29
0
    /** Get the FetchWork. Only SELECTs have them. */
    private synchronized FetchWork getFetchWork() {
      QueryPlan plan = driver.getPlan();
      FetchTask fetchTask = null;
      if (plan != null) {
        fetchTask = plan.getFetchTask();
        if (fetchTask != null) {
          fetchTask.initialize(hiveConf, plan, null);
        }
      }

      if (fetchTask == null) {
        return null;
      }

      FetchWork work = fetchTask.getWork();
      return work;
    }
예제 #30
0
    /**
     * Fetches all the rows in a result set.
     *
     * @return All the rows in a result set of a query executed using execute method.
     *     <p>TODO: Currently the server buffers all the rows before returning them to the client.
     *     Decide whether the buffering should be done in the client.
     */
    public List<String> fetchAll() throws HiveServerException, TException {
      if (!isHiveQuery)
        // Return no results if the last command was not a Hive query
        return new Vector<String>();

      Vector<String> rows = new Vector<String>();
      Vector<String> result = new Vector<String>();
      try {
        while (driver.getResults(result)) {
          rows.addAll(result);
          result.clear();
        }
      } catch (IOException e) {
        throw new HiveServerException(e.getMessage());
      }
      return rows;
    }