예제 #1
0
    public URI getHarUri(URI original, HadoopShims shim) throws HiveException {
      URI harUri = null;
      try {
        harUri = shim.getHarUri(original, base, originalBase);
      } catch (URISyntaxException e) {
        throw new HiveException("Couldn't create har URI for location", e);
      }

      return harUri;
    }
예제 #2
0
  public HiveTestUtil(
      String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer)
      throws Exception {
    this.outDir = outDir;
    this.logDir = logDir;
    if (confDir != null && !confDir.isEmpty()) {
      HiveConf.setHiveSiteLocation(
          new URL("file://" + new File(confDir).toURI().getPath() + "/hive-site.xml"));
      LOG.info("Setting hive-site: " + HiveConf.getHiveSiteLocation());
    }
    conf = new HiveConf();
    String tmpBaseDir = System.getProperty("test.tmp.dir");
    if (tmpBaseDir == null || tmpBaseDir == "") {
      tmpBaseDir = System.getProperty("java.io.tmpdir");
    }
    String metaStoreURL =
        "jdbc:derby:" + tmpBaseDir + File.separator + "metastore_dbtest;" + "create=true";
    conf.set(ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
    System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);

    // set where derby logs
    File derbyLogFile = new File(tmpBaseDir + "/derby.log");
    derbyLogFile.createNewFile();
    System.setProperty("derby.stream.error.file", derbyLogFile.getPath());

    this.hadoopVer = getHadoopMainVersion(hadoopVer);
    qMap = new TreeMap<String, String>();
    qSkipSet = new HashSet<String>();
    qSortSet = new HashSet<String>();
    qSortQuerySet = new HashSet<String>();
    qHashQuerySet = new HashSet<String>();
    qSortNHashQuerySet = new HashSet<String>();
    qJavaVersionSpecificOutput = new HashSet<String>();
    this.clusterType = clusterType;

    // Using randomUUID for dfs cluster
    System.setProperty("test.build.data", "target/test-data/hive-" + UUID.randomUUID().toString());

    HadoopShims shims = ShimLoader.getHadoopShims();
    int numberOfDataNodes = 4;

    if (clusterType != MiniClusterType.none) {
      dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
      FileSystem fs = dfs.getFileSystem();
      String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
      if (clusterType == MiniClusterType.tez) {
        mr = shims.getMiniTezCluster(conf, 4, uriString, 1);
      } else {
        mr = shims.getMiniMrCluster(conf, 4, uriString, 1);
      }
    }

    initConf();

    // Use the current directory if it is not specified
    String dataDir = conf.get("test.data.files");
    if (dataDir == null) {
      dataDir = new File(".").getAbsolutePath() + "/data/files";
    }

    testFiles = dataDir;

    // Use the current directory if it is not specified
    String scriptsDir = conf.get("test.data.scripts");
    if (scriptsDir == null) {
      scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
    }
    if (!initScript.isEmpty()) {
      this.initScript = scriptsDir + "/" + initScript;
    }
    if (!cleanupScript.isEmpty()) {
      this.cleanupScript = scriptsDir + "/" + cleanupScript;
    }

    overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));

    setup = new HiveTestSetup();
    setup.preTest(conf);
    init();
  }