Beispiel #1
0
 public MiniSparkShim(Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir)
     throws IOException {
   mr = new MiniSparkOnYARNCluster("sparkOnYarn");
   conf.set("fs.defaultFS", nameNode);
   conf.set(
       "yarn.resourcemanager.scheduler.class",
       "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler");
   // disable resource monitoring, although it should be off by default
   conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false);
   configureImpersonation(conf);
   mr.init(conf);
   mr.start();
   this.conf = mr.getConfig();
 }
Beispiel #2
0
    @Override
    public void setupConfiguration(Configuration conf) {
      Configuration config = mr.getConfig();
      for (Map.Entry<String, String> pair : config) {
        conf.set(pair.getKey(), pair.getValue());
      }

      Path jarPath = new Path("hdfs:///user/hive");
      Path hdfsPath = new Path("hdfs:///user/");
      try {
        FileSystem fs = cluster.getFileSystem();
        jarPath = fs.makeQualified(jarPath);
        conf.set("hive.jar.directory", jarPath.toString());
        fs.mkdirs(jarPath);
        hdfsPath = fs.makeQualified(hdfsPath);
        conf.set("hive.user.install.directory", hdfsPath.toString());
        fs.mkdirs(hdfsPath);
      } catch (Exception e) {
        e.printStackTrace();
      }
    }
Beispiel #3
0
 @Override
 public void shutdown() throws IOException {
   mr.stop();
 }