コード例 #1
0
ファイル: HW2_Q4.java プロジェクト: pscuderi/academic-samples
  public static void main(String[] args) throws Exception {
    String inputDirectory = "/home/cs246/Desktop/HW2/input";
    String outputDirectory = "/home/cs246/Desktop/HW2/output";
    String centroidDirectory = "/home/cs246/Desktop/HW2/config";

    int iterations = 20;

    for (int i = 1; i <= iterations; i++) {
      Configuration conf = new Configuration();

      String cFile = centroidDirectory + "/c" + i + ".txt";
      String nextCFile = centroidDirectory + "/c" + (i + 1) + ".txt";
      conf.set("CFILE", cFile);
      conf.set("NEXTCFILE", nextCFile);

      String cFile = centroidDirectory + "/c" + i + ".txt";
      String nextCFile = centroidDirectory + "/c" + (i + 1) + ".txt";
      conf.set("CFILE", cFile);
      conf.set("NEXTCFILE", nextCFile);

      Job job = new Job(conf, "HW2_Q4." + i);
      job.setJarByClass(HW2_Q4.class);
      job.setOutputKeyClass(IntWritable.class);
      job.setOutputValueClass(Text.class);
      job.setMapperClass(Map1.class);
      job.setReducerClass(Reduce1.class);
      job.setInputFormatClass(TextInputFormat.class);
      job.setOutputFormatClass(TextOutputFormat.class);

      FileInputFormat.addInputPath(job, new Path(inputDirectory));
      FileOutputFormat.setOutputPath(job, new Path(outputDirectory + "/output" + i));

      job.waitForCompletion(true);
    }
  }
コード例 #2
0
  /** Job configuration. */
  public static Job configureJob(Configuration conf, String[] args) throws IOException {
    String tableName = args[0];
    String columnFamily = args[1];
    String outputPath = args[2];
    String rowKeyType = args[3];
    conf.set("row.key.type", rowKeyType);
    conf.set("table.name", tableName);
    Scan scan = new Scan();
    scan.addFamily(Bytes.toBytes(columnFamily));
    scan.setBatch(ConstantsTruthy.TRUTHY_TABLE_SCAN_BATCH);

    conf.set("mapred.map.tasks.speculative.execution", "false");
    conf.set("mapred.reduce.tasks.speculative.execution", "false");
    Job job =
        Job.getInstance(
            conf, "Count the column count and indexRecordSize for each row in " + tableName);
    job.setJarByClass(TruthyIndexFeatureCounter.class);
    TableMapReduceUtil.initTableMapperJob(
        tableName, scan, TfcMapper.class, Text.class, Text.class, job, true);
    job.setNumReduceTasks(0);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    FileOutputFormat.setOutputPath(job, new Path(outputPath));
    TableMapReduceUtil.addDependencyJars(job);
    return job;
  }
コード例 #3
0
ファイル: TestHiveUtility.java プロジェクト: hanzheng/hiho
  @Test
  public void testGetLoadQuery() throws HIHOException {
    ColumnInfo intColumn = new ColumnInfo(0, Types.INTEGER, "intColumn");
    ColumnInfo stringColumn = new ColumnInfo(1, Types.VARCHAR, "stringColumn");
    ArrayList<ColumnInfo> columns = new ArrayList<ColumnInfo>();
    columns.add(intColumn);
    columns.add(stringColumn);
    // HiveUtility.tableName = "employee";
    GenericDBWritable writable = new GenericDBWritable(columns, null);
    Configuration config = new Configuration();
    // String partitionBy = "country:string";
    String partitionBy1 = "country:string:us";
    config.set(HIHOConf.INPUT_OUTPUT_PATH, "/user/nube/tableForHiho");
    config.set(HIHOConf.HIVE_TABLE_NAME, "employee");
    config.set(HIHOConf.HIVE_PARTITION_BY, "country:string:us");
    assertEquals(
        "LOAD DATA INPATH '/user/nube/tableForHiho' OVERWRITE INTO TABLE `employee` PARTITION ( country='us')",
        HiveUtility.getLoadQuery(
            config, config.get(HIHOConf.INPUT_OUTPUT_PATH), writable, partitionBy1));

    Configuration config1 = new Configuration();
    String partitionBy = "country:string";
    // String partitionBy1 = "country:string:us";
    config1.set(HIHOConf.INPUT_OUTPUT_PATH, "/user/nube/tableForHiho");
    config1.set(HIHOConf.HIVE_TABLE_NAME, "employee");
    // config1.set(HIHOConf.HIVE_PARTITION_BY, "country:string:us");
    assertEquals(
        "LOAD DATA INPATH '/user/nube/tableForHiho' OVERWRITE INTO TABLE `employee`",
        HiveUtility.getLoadQuery(config1, config.get(HIHOConf.INPUT_OUTPUT_PATH), writable));
  }
コード例 #4
0
ファイル: FTPFileSystem.java プロジェクト: shahidminhas/abc
  public void initialize(URI uri, Configuration conf) throws IOException { // get
    super.initialize(uri, conf);
    // get host information from uri (overrides info in conf)
    String host = uri.getHost();
    host = (host == null) ? conf.get("fs.ftp.host", null) : host;
    if (host == null) {
      throw new IOException("Invalid host specified");
    }
    conf.set("fs.ftp.host", host);

    // get port information from uri, (overrides info in conf)
    int port = uri.getPort();
    port = (port == -1) ? FTP.DEFAULT_PORT : port;
    conf.setInt("fs.ftp.host.port", port);

    // get user/password information from URI (overrides info in conf)
    String userAndPassword = uri.getUserInfo();
    if (userAndPassword == null) {
      userAndPassword =
          (conf.get("fs.ftp.user." + host, null) + ":" + conf.get("fs.ftp.password." + host, null));
      if (userAndPassword == null) {
        throw new IOException("Invalid user/passsword specified");
      }
    }
    String[] userPasswdInfo = userAndPassword.split(":");
    conf.set("fs.ftp.user." + host, userPasswdInfo[0]);
    if (userPasswdInfo.length > 1) {
      conf.set("fs.ftp.password." + host, userPasswdInfo[1]);
    } else {
      conf.set("fs.ftp.password." + host, null);
    }
    setConf(conf);
    this.uri = uri;
  }
コード例 #5
0
 @Before
 public void setup() throws Exception {
   FileContext files = FileContext.getLocalFSFileContext();
   Path workSpacePath = new Path(workSpace.getAbsolutePath());
   files.mkdir(workSpacePath, null, true);
   FileUtil.chmod(workSpace.getAbsolutePath(), "777");
   File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
   files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false);
   File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
   files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false);
   String exec_path = System.getProperty("container-executor.path");
   if (exec_path != null && !exec_path.isEmpty()) {
     Configuration conf = new Configuration(false);
     LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH + "=" + exec_path);
     conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
     exec = new LinuxContainerExecutor();
     exec.setConf(conf);
     conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
     conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
     dirsHandler = new LocalDirsHandlerService();
     dirsHandler.init(conf);
   }
   appSubmitter = System.getProperty("application.submitter");
   if (appSubmitter == null || appSubmitter.isEmpty()) {
     appSubmitter = "nobody";
   }
 }
コード例 #6
0
  protected void setupCluster(boolean simulated, long minFileSize, String[] racks, String[] hosts)
      throws IOException {
    conf = new Configuration();
    localFileSys = FileSystem.getLocal(conf);
    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
    conf.set("dfs.replication.pending.timeout.sec", "2");
    conf.setLong("dfs.block.size", 1L);
    conf.set(
        "dfs.block.replicator.classname",
        "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");
    conf.setLong("hdfs.raid.min.filesize", minFileSize);
    Utils.loadTestCodecs(conf, 5, 5, 1, 3, "/raid", "/raidrs", false, true);
    conf.setInt("io.bytes.per.checksum", 1);
    excludeFile = new Path(TEST_DIR, "exclude" + System.currentTimeMillis());
    cleanFile(excludeFile);
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
    writeConfigFile(excludeFile, null);

    if (!simulated) {
      cluster = new MiniDFSCluster(conf, hosts.length, true, racks, hosts);
    } else {
      long[] capacities = new long[] {CAPACITY, CAPACITY, CAPACITY};
      cluster = new MiniDFSCluster(0, conf, hosts.length, true, true, null, racks, capacities);
    }
    cluster.waitActive();
    namesystem = cluster.getNameNode().getNamesystem();
    Assert.assertTrue(
        "BlockPlacementPolicy type is not correct.",
        namesystem.replicator instanceof BlockPlacementPolicyRaid);
    policy = (BlockPlacementPolicyRaid) namesystem.replicator;
    fs = cluster.getFileSystem();
    dfs = (DistributedFileSystem) fs;
    TestDirectoryRaidDfs.setupStripeStore(conf, fs);
  }
コード例 #7
0
  /**
   * Helper API to support YARN and MapReduce format for specifying users and groups. Format
   * supports a comma-separated list of users and groups with the users and groups separated by
   * whitespace. e.g. "user1,user2 group1,group2" If the value specified is "*", all users are
   * allowed to do the operation.
   *
   * @param viewACLsStr
   * @param modifyACLsStr
   */
  public DAGAccessControls(String viewACLsStr, String modifyACLsStr) {
    final Configuration conf = new Configuration(false);
    conf.set(TezConstants.TEZ_DAG_VIEW_ACLS, (viewACLsStr != null ? viewACLsStr : ""));
    conf.set(TezConstants.TEZ_DAG_MODIFY_ACLS, (modifyACLsStr != null ? modifyACLsStr : ""));
    ACLConfigurationParser parser = new ACLConfigurationParser(conf, true);

    this.usersWithViewACLs = new HashSet<String>();
    this.usersWithModifyACLs = new HashSet<String>();
    this.groupsWithViewACLs = new HashSet<String>();
    this.groupsWithModifyACLs = new HashSet<String>();

    Map<ACLType, Set<String>> allowedUsers = parser.getAllowedUsers();
    Map<ACLType, Set<String>> allowedGroups = parser.getAllowedGroups();

    if (allowedUsers.containsKey(ACLType.DAG_VIEW_ACL)) {
      this.usersWithViewACLs.addAll(allowedUsers.get(ACLType.DAG_VIEW_ACL));
    }
    if (allowedUsers.containsKey(ACLType.DAG_MODIFY_ACL)) {
      this.usersWithModifyACLs.addAll(allowedUsers.get(ACLType.DAG_MODIFY_ACL));
    }
    if (allowedGroups.containsKey(ACLType.DAG_VIEW_ACL)) {
      this.groupsWithViewACLs.addAll(allowedGroups.get(ACLType.DAG_VIEW_ACL));
    }
    if (allowedGroups.containsKey(ACLType.DAG_MODIFY_ACL)) {
      this.groupsWithModifyACLs.addAll(allowedGroups.get(ACLType.DAG_MODIFY_ACL));
    }
  }
コード例 #8
0
  @Test
  public void test() throws Exception {
    createTable(TABLE_NAME, getBasicSchema(), getBasicCreateTableOptions());

    KuduTableOutputFormat output = new KuduTableOutputFormat();
    Configuration conf = new Configuration();
    conf.set(KuduTableOutputFormat.MASTER_ADDRESSES_KEY, getMasterAddresses());
    conf.set(KuduTableOutputFormat.OUTPUT_TABLE_KEY, TABLE_NAME);
    output.setConf(conf);

    String multitonKey = conf.get(KuduTableOutputFormat.MULTITON_KEY);
    KuduTable table = KuduTableOutputFormat.getKuduTable(multitonKey);
    assertNotNull(table);

    Insert insert = table.newInsert();
    PartialRow row = insert.getRow();
    row.addInt(0, 1);
    row.addInt(1, 2);
    row.addInt(2, 3);
    row.addString(3, "a string");
    row.addBoolean(4, true);

    RecordWriter<NullWritable, Operation> rw = output.getRecordWriter(null);
    rw.write(NullWritable.get(), insert);
    rw.close(null);
    AsyncKuduScanner.AsyncKuduScannerBuilder builder = client.newScannerBuilder(table);
    assertEquals(1, countRowsInScan(builder.build()));
  }
コード例 #9
0
  /** Runs this tool. */
  public int run(String[] args) throws Exception {
    if (args.length != 2) {
      printUsage();
      return -1;
    }

    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);

    String indexPath = args[0];

    Path p = new Path(indexPath);
    if (!fs.exists(p)) {
      LOG.warn("Index path doesn't exist...");
      return -1;
    }

    int numReducers = Integer.parseInt(args[1]);

    LOG.info("Tool name: " + BuildPositionalIndexIP.class.getCanonicalName());
    LOG.info(" - Index path: " + indexPath);

    conf.set(Constants.IndexPath, indexPath);
    conf.setInt(Constants.NumReduceTasks, numReducers);
    conf.set(
        Constants.PostingsListsType,
        ivory.core.data.index.PostingsListDocSortedNonPositional.class.getCanonicalName());

    new BuildIPInvertedIndexDocSorted(conf).run();
    new BuildIntPostingsForwardIndex(conf).run();

    return 0;
  }
コード例 #10
0
ファイル: TestCoordSubmitCommand.java プロジェクト: rvs/oozie
  /**
   * Basic test
   *
   * @throws Exception
   */
  public void testBasicSubmit() throws Exception {
    Configuration conf = new XConfiguration();
    String appPath = getTestCaseDir();
    String appXml =
        "<coordinator-app name=\"NAME\" frequency=\"${coord:days(1)}\" start=\"2009-02-01T01:00Z\" end=\"2009-02-03T23:59Z\" timezone=\"UTC\" "
            + "xmlns=\"uri:oozie:coordinator:0.1\"> <controls> <concurrency>2</concurrency> "
            + "<execution>LIFO</execution> </controls> <datasets> "
            + "<dataset name=\"a\" frequency=\"${coord:days(7)}\" initial-instance=\"2009-02-01T01:00Z\" "
            + "timezone=\"UTC\"> <uri-template>file:///tmp/coord/workflows/${YEAR}/${DAY}</uri-template> </dataset> "
            + "<dataset name=\"local_a\" frequency=\"${coord:days(7)}\" initial-instance=\"2009-02-01T01:00Z\" "
            + "timezone=\"UTC\"> <uri-template>file:///tmp/coord/workflows/${YEAR}/${DAY}</uri-template> </dataset> "
            + "</datasets> <input-events> "
            + "<data-in name=\"A\" dataset=\"a\"> <instance>${coord:latest(0)}</instance> </data-in>  "
            + "</input-events> "
            + "<output-events> <data-out name=\"LOCAL_A\" dataset=\"local_a\"> "
            + "<instance>${coord:current(-1)}</instance> </data-out> </output-events> <action> <workflow> <app-path>hdfs:///tmp/workflows/</app-path> "
            + "<configuration> <property> <name>inputA</name> <value>${coord:dataIn('A')}</value> </property> "
            + "<property> <name>inputB</name> <value>${coord:dataOut('LOCAL_A')}</value> "
            + "</property></configuration> </workflow> </action> </coordinator-app>";
    writeToFile(appXml, appPath);
    conf.set(OozieClient.COORDINATOR_APP_PATH, appPath);
    conf.set(OozieClient.USER_NAME, getTestUser());
    conf.set(OozieClient.GROUP_NAME, "other");
    CoordSubmitCommand sc = new CoordSubmitCommand(conf, "UNIT_TESTING");
    String jobId = sc.call();

    assertEquals(jobId.substring(jobId.length() - 2), "-C");
    CoordinatorJobBean job = checkCoordJobs(jobId);
    if (job != null) {
      assertEquals(
          job.getTimeout(),
          Services.get().getConf().getInt("oozie.service.coord.normal.default.timeout", -2));
    }
  }
コード例 #11
0
  public static void main(String[] args)
      throws IOException, InterruptedException, ClassNotFoundException {
    Configuration conf = new Configuration();
    conf.set("I", args[3]); // Num of Row (=Columns)
    conf.set("IB", args[4]); // RowBlock Size of Matrix

    Job job = new Job(conf, "CalculateCC");

    job.setJarByClass(CorrelationCoefficient.class);

    job.setReducerClass(Reduce.class);

    job.setMapOutputKeyClass(IntWritable.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(DoubleWritable.class);

    // Mapperごとに読み込むファイルを変える。
    MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, MapAll.class);
    MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, MapDiag.class);
    FileOutputFormat.setOutputPath(job, new Path(args[2]));

    boolean success = job.waitForCompletion(true);
    System.out.println(success);
  }
コード例 #12
0
ファイル: TestCoordSubmitCommand.java プロジェクト: rvs/oozie
 /**
  * test schema error. Negative test case.
  *
  * @throws Exception
  */
 public void testSchemaError() throws Exception {
   Configuration conf = new XConfiguration();
   String appPath = getTestCaseDir();
   String appXml =
       "<coordinator-app name=\"NAME\" frequencyERROR=\"10\" start=\"2009-02-01T01:00Z\" end=\"2009-02-03T23:59Z\" timezone=\"UTC\" "
           + "xmlns=\"uri:oozie:coordinator:0.1\"> <controls> <timeout>10</timeout> <concurrency>2</concurrency> "
           + "<execution>LIFO</execution> </controls> <datasets> "
           + "<dataset name=\"a\" frequency=\"60\" initial-instance=\"2009-02-01T01:00Z\" "
           + "timezone=\"UTC\"> <uri-template>file:///tmp/coord/workflows/${YEAR}/${DAY}</uri-template> </dataset> "
           + "<dataset name=\"local_a\" frequency=\"120\" initial-instance=\"2009-02-01T01:00Z\" "
           + "timezone=\"UTC\"> <uri-template>file:///tmp/coord/workflows/${YEAR}/${DAY}</uri-template> </dataset> "
           + "</datasets> <input-events> "
           + "<data-in name=\"A\" dataset=\"a\"> <instance>${coord:latest(0)}</instance> </data-in>  "
           + "</input-events> "
           + "<output-events> <data-out name=\"LOCAL_A\" dataset=\"local_a\"> "
           + "<instance>${coord:current(-1)}</instance> </data-out> </output-events> <action> <workflow> <app-path>hdfs:///tmp/workflows/</app-path> "
           + "<configuration> <property> <name>inputA</name> <value>${coord:dataIn('A')}</value> </property> "
           + "<property> <name>inputB</name> <value>${coord:dataOut('LOCAL_A')}</value> "
           + "</property></configuration> </workflow> </action> </coordinator-app>";
   writeToFile(appXml, appPath);
   conf.set(OozieClient.COORDINATOR_APP_PATH, appPath);
   conf.set(OozieClient.USER_NAME, getTestUser());
   conf.set(OozieClient.GROUP_NAME, "other");
   CoordSubmitCommand sc = new CoordSubmitCommand(conf, "UNIT_TESTING");
   String jobId = null;
   try {
     sc.call();
     fail("Exception expected if schema has errors!");
   } catch (CommandException e) {
     // should come here for schema errors
   }
 }
コード例 #13
0
  /**
   * Tests backward compatibility. Configuration can be either set with old param dfs.umask that
   * takes decimal umasks or dfs.umaskmode that takes symbolic or octal umask.
   */
  public void testBackwardCompatibility() {
    // Test 1 - old configuration key with decimal
    // umask value should be handled when set using
    // FSPermission.setUMask() API
    FsPermission perm = new FsPermission((short) 18);
    Configuration conf = new Configuration();
    FsPermission.setUMask(conf, perm);
    assertEquals(18, FsPermission.getUMask(conf).toShort());

    // Test 2 - old configuration key set with decimal
    // umask value should be handled
    perm = new FsPermission((short) 18);
    conf = new Configuration();
    conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
    assertEquals(18, FsPermission.getUMask(conf).toShort());

    // Test 3 - old configuration key overrides the new one
    conf = new Configuration();
    conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
    conf.set(FsPermission.UMASK_LABEL, "000");
    assertEquals(18, FsPermission.getUMask(conf).toShort());

    // Test 4 - new configuration key is handled
    conf = new Configuration();
    conf.set(FsPermission.UMASK_LABEL, "022");
    assertEquals(18, FsPermission.getUMask(conf).toShort());
  }
コード例 #14
0
 @Test(timeout = 60000)
 public void testExceptionDuringInitialization() throws Exception {
   Configuration conf = TEST_UTIL.getConfiguration();
   conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
   conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
   conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
   TEST_UTIL.startMiniCluster(2);
   try {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     // Trigger one regionserver to fail as if it came up with a coprocessor
     // that fails during initialization
     final HRegionServer regionServer = cluster.getRegionServer(0);
     conf.set(
         CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         FailedInitializationObserver.class.getName());
     regionServer
         .getRegionServerCoprocessorHost()
         .loadSystemCoprocessors(conf, CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
     TEST_UTIL.waitFor(
         10000,
         1000,
         new Predicate<Exception>() {
           @Override
           public boolean evaluate() throws Exception {
             return regionServer.isAborted();
           }
         });
   } finally {
     TEST_UTIL.shutdownMiniCluster();
   }
 }
コード例 #15
0
  @Override
  public boolean initializeReader(Properties configuration) {
    try {
      hdfsConfiguration = new org.apache.hadoop.conf.Configuration(false);
      hdfsConfiguration.set("fs.defaultFS", configuration.getProperty("hdfs.url"));
      hdfsConfiguration.set(
          "fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
      hdfsConfiguration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());

      hdfsConfiguration.set(
          "hdfs.url",
          configuration.getProperty("hdfs.url")); // "hdfs://snf-618466.vm.okeanos.grnet.gr:8020");
      // hdfsConfiguration.set("prefix", "/user/vagvaz/");

      hdfsConfiguration.set("prefix", configuration.getProperty("prefix"));
      // fileSystem = FileSystem.get(hdfsConfiguration);
      fileSystem =
          FileSystem.get(
              getDefaultUri(hdfsConfiguration),
              hdfsConfiguration,
              configuration.getProperty("hdfs.user"));

      if (!fileSystem.exists(basePath)) {
        log.info("Creating base path on HDFS " + configuration.getProperty("hdfs.url"));
        fileSystem.mkdirs(basePath);
      }

    } catch (Exception e) {
      log.error(
          "Could not create HDFS remote FileSystem using \n" + configuration.toString() + "\n");
      return false;
    }
    return true;
  }
コード例 #16
0
  @Test
  public void testWithConfig() {
    Configuration conf = new Configuration();
    conf.set("B", "2b");
    conf.set("C", "33");
    conf.set("D", "4");

    CompoundConfiguration compoundConf = new CompoundConfiguration().add(baseConf).add(conf);
    assertEquals("1", compoundConf.get("A"));
    assertEquals("2b", compoundConf.get("B"));
    assertEquals(33, compoundConf.getInt("C", 0));
    assertEquals("4", compoundConf.get("D"));
    assertEquals(4, compoundConf.getInt("D", 0));
    assertNull(compoundConf.get("E"));
    assertEquals(6, compoundConf.getInt("F", 6));

    int cnt = 0;
    for (Map.Entry<String, String> entry : compoundConf) {
      cnt++;
      if (entry.getKey().equals("B")) assertEquals("2b", entry.getValue());
      else if (entry.getKey().equals("G")) assertEquals(null, entry.getValue());
    }
    // verify that entries from ImmutableConfigMap's are merged in the iterator's view
    assertEquals(baseConfSize + 1, cnt);
  }
コード例 #17
0
ファイル: HBaseSpanReceiver.java プロジェクト: hicha/htrace
  @Override
  public void configure(HTraceConfiguration conf) {
    this.conf = conf;
    this.hconf = HBaseConfiguration.create();
    this.table = Bytes.toBytes(conf.get(TABLE_KEY, DEFAULT_TABLE));
    this.cf = Bytes.toBytes(conf.get(COLUMNFAMILY_KEY, DEFAULT_COLUMNFAMILY));
    this.maxSpanBatchSize = conf.getInt(MAX_SPAN_BATCH_SIZE_KEY, DEFAULT_MAX_SPAN_BATCH_SIZE);
    String quorum = conf.get(COLLECTOR_QUORUM_KEY, DEFAULT_COLLECTOR_QUORUM);
    hconf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
    String znodeParent = conf.get(ZOOKEEPER_ZNODE_PARENT_KEY, DEFAULT_ZOOKEEPER_ZNODE_PARENT);
    hconf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, znodeParent);
    int clientPort = conf.getInt(ZOOKEEPER_CLIENT_PORT_KEY, DEFAULT_ZOOKEEPER_CLIENT_PORT);
    hconf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, clientPort);

    // If there are already threads runnnig tear them down.
    if (this.service != null) {
      this.service.shutdownNow();
      this.service = null;
    }
    int numThreads = conf.getInt(NUM_THREADS_KEY, DEFAULT_NUM_THREADS);
    this.service = Executors.newFixedThreadPool(numThreads, tf);
    for (int i = 0; i < numThreads; i++) {
      this.service.submit(new WriteSpanRunnable());
    }
  }
コード例 #18
0
  public void testStubInput() throws Exception {
    String queueName = "default";

    Configuration configuration = new Configuration(false);
    configuration.set(AppEngineJobContext.CONTROLLER_QUEUE_KEY, queueName);
    configuration.set(AppEngineJobContext.WORKER_QUEUE_KEY, queueName);
    configuration.set(AppEngineJobContext.MAPPER_SHARD_COUNT_KEY, "2");
    configuration.set(AppEngineJobContext.MAPPER_INPUT_PROCESSING_RATE_KEY, "1000");
    configuration.setClass("mapreduce.inputformat.class", StubInputFormat.class, InputFormat.class);
    configuration.setClass("mapreduce.map.class", StubMapper.class, AppEngineMapper.class);

    AppEngineMapreduce.INSTANCE.start(configuration, "test", "/mapreduce/");

    executeTasksUntilEmpty(queueName);

    assertTrue(StubMapper.cleanupCalled);
    assertTrue(StubMapper.setupCalled);
    assertTrue(StubMapper.taskCleanupCalled);
    assertTrue(StubMapper.taskSetupCalled);

    Collection<IntWritable> expectedKeys = new HashSet<IntWritable>();
    expectedKeys.addAll(StubInputSplit.KEYS);
    expectedKeys.addAll(StubInputSplit.KEYS);
    assertEquals(expectedKeys, new HashSet<IntWritable>(StubMapper.invocationKeys));
  }
コード例 #19
0
  /**
   * Merge the dag acls with the AM acls in the configuration object. The config object will contain
   * the updated acls.
   *
   * @param conf The AM config.
   */
  @Private
  public synchronized void mergeIntoAmAcls(Configuration conf) {
    ACLConfigurationParser parser = new ACLConfigurationParser(conf, false);
    parser.addAllowedGroups(
        ImmutableMap.of(
            ACLType.AM_VIEW_ACL, groupsWithViewACLs, ACLType.AM_MODIFY_ACL, groupsWithModifyACLs));
    parser.addAllowedUsers(
        ImmutableMap.of(
            ACLType.AM_VIEW_ACL, usersWithViewACLs, ACLType.AM_MODIFY_ACL, usersWithModifyACLs));

    Set<String> viewUsers = parser.getAllowedUsers().get(ACLType.AM_VIEW_ACL);
    Set<String> viewGroups = parser.getAllowedGroups().get(ACLType.AM_VIEW_ACL);
    if (viewUsers.contains(ACLManager.WILDCARD_ACL_VALUE)) {
      conf.set(TezConfiguration.TEZ_AM_VIEW_ACLS, ACLManager.WILDCARD_ACL_VALUE);
    } else {
      String userList = ACLManager.toCommaSeparatedString(viewUsers);
      String groupList = ACLManager.toCommaSeparatedString(viewGroups);
      conf.set(TezConfiguration.TEZ_AM_VIEW_ACLS, userList + " " + groupList);
    }

    Set<String> modifyUsers = parser.getAllowedUsers().get(ACLType.AM_MODIFY_ACL);
    Set<String> modifyGroups = parser.getAllowedGroups().get(ACLType.AM_MODIFY_ACL);
    if (modifyUsers.contains(ACLManager.WILDCARD_ACL_VALUE)) {
      conf.set(TezConfiguration.TEZ_AM_MODIFY_ACLS, ACLManager.WILDCARD_ACL_VALUE);
    } else {
      String userList = ACLManager.toCommaSeparatedString(modifyUsers);
      String groupList = ACLManager.toCommaSeparatedString(modifyGroups);
      conf.set(TezConfiguration.TEZ_AM_MODIFY_ACLS, userList + " " + groupList);
    }
  }
コード例 #20
0
 private static final void setParameter(
     final Configuration config,
     final Class<?> scope,
     final Object val,
     final ParameterEnum configItem) {
   if (val != null) {
     if (val instanceof Long) {
       config.setLong(
           GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()), ((Long) val));
     } else if (val instanceof Double) {
       config.setDouble(
           GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()), ((Double) val));
     } else if (val instanceof Boolean) {
       config.setBoolean(
           GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()), ((Boolean) val));
     } else if (val instanceof Integer) {
       config.setInt(
           GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()), ((Integer) val));
     } else if (val instanceof Class) {
       config.setClass(
           GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
           ((Class) val),
           ((Class) val));
     } else if (val instanceof byte[]) {
       config.set(
           GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
           ByteArrayUtils.byteArrayToString((byte[]) val));
     } else {
       config.set(GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()), val.toString());
     }
   }
 }
コード例 #21
0
  public ConnectionlessQueryServicesImpl(
      QueryServices services, ConnectionInfo connInfo, Properties info) {
    super(services);
    userName = connInfo.getPrincipal();
    metaData = newEmptyMetaData();

    // Use KeyValueBuilder that builds real KeyValues, as our test utils require this
    this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
    Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
    for (Entry<String, String> entry : services.getProps()) {
      config.set(entry.getKey(), entry.getValue());
    }
    if (info != null) {
      for (Object key : info.keySet()) {
        config.set((String) key, info.getProperty((String) key));
      }
    }
    for (Entry<String, String> entry : connInfo.asProps()) {
      config.set(entry.getKey(), entry.getValue());
    }

    // Without making a copy of the configuration we cons up, we lose some of our properties
    // on the server side during testing.
    config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config);
    TransactionManager txnManager = new TransactionManager(config);
    this.txSystemClient = new InMemoryTxSystemClient(txnManager);
  }
コード例 #22
0
 @Test
 public void testExtractorMapperCSV() throws Exception {
   FeatureExtractorMapper mapper = new FeatureExtractorMapper();
   Configuration conf = getConfiguration();
   conf.set("vector.implementation.class.name", "org.apache.mahout.math.RandomAccessSparseVector");
   conf.set(FeatureExtractorKeySet.FEATURE_NAMES, RAW_CSV[0]);
   conf.set(FeatureExtractorKeySet.SELECTED_DEPENDENT, DEPENDENT);
   conf.set(FeatureExtractorKeySet.SELECTED_INDEPENDENT, INDEPENDENT);
   conf.set(FeatureExtractorKeySet.SELECTED_INTERACTION, INTERACTION);
   conf.set(FeatureExtractorKeySet.SEPARATOR, SEP_CSV);
   DummyRecordWriter<Text, VectorWritable> writer = new DummyRecordWriter<Text, VectorWritable>();
   Mapper<LongWritable, Text, Text, VectorWritable>.Context context =
       DummyRecordWriter.build(mapper, conf, writer);
   mapper.setup(context);
   for (int i = 0; i < RAW_CSV.length; ++i) {
     mapper.map(new LongWritable(i), new Text(RAW_CSV[i]), context);
   }
   assertEquals("Number of map results", 1, writer.getData().size());
   assertEquals("Number of map results", 1, writer.getData().size());
   for (int i = 0; i < writer.getValue(new Text("5")).size(); ++i) {
     assertEquals(
         "Features: ",
         getFormatedOutput(writer.getValue(new Text("5")).get(i)),
         getFormatedOutput(RAW_DATA[i]));
   }
 }
コード例 #23
0
  @Test
  public void testLocalUser() throws Exception {
    try {
      // nonsecure default
      Configuration conf = new YarnConfiguration();
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
      UserGroupInformation.setConfiguration(conf);
      LinuxContainerExecutor lce = new LinuxContainerExecutor();
      lce.setConf(conf);
      Assert.assertEquals(
          YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER, lce.getRunAsUser("foo"));

      // nonsecure custom setting
      conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
      lce = new LinuxContainerExecutor();
      lce.setConf(conf);
      Assert.assertEquals("bar", lce.getRunAsUser("foo"));

      // secure
      conf = new YarnConfiguration();
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
      UserGroupInformation.setConfiguration(conf);
      lce = new LinuxContainerExecutor();
      lce.setConf(conf);
      Assert.assertEquals("foo", lce.getRunAsUser("foo"));
    } finally {
      Configuration conf = new YarnConfiguration();
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
      UserGroupInformation.setConfiguration(conf);
    }
  }
コード例 #24
0
  @Test
  public void testNodeRegistrationVersionLessThanRM() throws Exception {
    writeToHostsFile("host2");
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile.getAbsolutePath());
    conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION, "EqualToRM");
    rm = new MockRM(conf);
    rm.start();
    String nmVersion = "1.9.9";

    ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService();
    RegisterNodeManagerRequest req = Records.newRecord(RegisterNodeManagerRequest.class);
    NodeId nodeId = NodeId.newInstance("host2", 1234);
    Resource capability = BuilderUtils.newResource(1024, 1);
    req.setResource(capability);
    req.setNodeId(nodeId);
    req.setHttpPort(1234);
    req.setNMVersion(nmVersion);
    // trying to register a invalid node.
    RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req);
    Assert.assertEquals(NodeAction.SHUTDOWN, response.getNodeAction());
    Assert.assertTrue(
        "Diagnostic message did not contain: 'Disallowed NodeManager "
            + "Version "
            + nmVersion
            + ", is less than the minimum version'",
        response
            .getDiagnosticsMessage()
            .contains(
                "Disallowed NodeManager Version "
                    + nmVersion
                    + ", is less than the minimum version "));
  }
コード例 #25
0
  /**
   * Create a map-only Hadoop Job out of the passed in parameters.  Does not set the
   * Job name.
   *
   * @see #getCustomJobName(String, org.apache.hadoop.mapreduce.JobContext, Class, Class)
   */
  @SuppressWarnings("rawtypes")
	public static Job prepareJob(Path inputPath,
                                 Path outputPath,
                                 Class<? extends InputFormat> inputFormat,
                                 Class<? extends Mapper> mapper,
                                 Class<? extends Writable> mapperKey,
                                 Class<? extends Writable> mapperValue,
                                 Class<? extends OutputFormat> outputFormat, Configuration conf) throws IOException {

    //Job job = new Job(new Configuration(conf));
  	Job job = Job.getInstance(conf);
    Configuration jobConf = job.getConfiguration();

    if (mapper.equals(Mapper.class)) {
        throw new IllegalStateException("Can't figure out the user class jar file from mapper/reducer");
    }
    job.setJarByClass(mapper);

    job.setInputFormatClass(inputFormat);
    jobConf.set("mapred.input.dir", inputPath.toString());

    job.setMapperClass(mapper);
    job.setMapOutputKeyClass(mapperKey);
    job.setMapOutputValueClass(mapperValue);
    job.setOutputKeyClass(mapperKey);
    job.setOutputValueClass(mapperValue);
    jobConf.setBoolean("mapred.compress.map.output", true);
    job.setNumReduceTasks(0);

    job.setOutputFormatClass(outputFormat);
    jobConf.set("mapred.output.dir", outputPath.toString());

    return job;
  }
コード例 #26
0
ファイル: WordCount.java プロジェクト: paohaijiao/mapreduce
 public static void printEnv(Job job) {
   Configuration conf = job.getConfiguration();
   conf.set("mapreduce.framework.name", "yarn");
   conf.set("yarn.resourcemanager.hostname", "yun12-01");
   System.out.println("###########################################");
   System.out.println("fs.defaultFS:" + conf.get("fs.defaultFS"));
   System.out.println("mapred.job.tracker:" + conf.get("mapred.job.tracker"));
   System.out.println("mapreduce.framework.name" + ":" + conf.get("mapreduce.framework.name"));
   System.out.println(
       "yarn.nodemanager.aux-services" + ":" + conf.get("yarn.nodemanager.aux-services"));
   System.out.println(
       "yarn.resourcemanager.address" + ":" + conf.get("yarn.resourcemanager.address"));
   System.out.println(
       "yarn.resourcemanager.scheduler.address"
           + ":"
           + conf.get("yarn.resourcemanager.scheduler.address"));
   System.out.println(
       "yarn.resourcemanager.resource-tracker.address"
           + ":"
           + conf.get("yarn.resourcemanager.resource-tracker.address"));
   System.out.println("yarn.application.classpath" + ":" + conf.get("yarn.application.classpath"));
   System.out.println("zkhost:" + conf.get("zkhost"));
   System.out.println("namespace:" + conf.get("namespace"));
   System.out.println("project:" + conf.get("project"));
   System.out.println("collection:" + conf.get("collection"));
   System.out.println("shard:" + conf.get("shard"));
   System.out.println("###########################################");
 }
コード例 #27
0
  /**
   * Constructor.
   *
   * @param conf Configuration to use. Post construction has the master's address.
   * @param noMasters Count of masters to start.
   * @param noRegionServers Count of regionservers to start.
   * @param masterClass
   * @param regionServerClass
   * @throws IOException
   */
  @SuppressWarnings("unchecked")
  public LocalHBaseCluster(
      final Configuration conf,
      final int noMasters,
      final int noRegionServers,
      final Class<? extends HMaster> masterClass,
      final Class<? extends HRegionServer> regionServerClass)
      throws IOException {
    this.conf = conf;
    // Always have masters and regionservers come up on port '0' so we don't
    // clash over default ports.
    conf.set(HConstants.MASTER_PORT, "0");
    conf.set(HConstants.REGIONSERVER_PORT, "0");
    conf.set(HConstants.REGIONSERVER_INFO_PORT, "0");

    this.masterClass =
        (Class<? extends HMaster>) conf.getClass(HConstants.MASTER_IMPL, masterClass);
    // Start the HMasters.
    for (int i = 0; i < noMasters; i++) {
      addMaster(new Configuration(conf), i);
    }
    // Start the HRegionServers.
    this.regionServerClass =
        (Class<? extends HRegionServer>)
            conf.getClass(HConstants.REGION_SERVER_IMPL, regionServerClass);

    for (int i = 0; i < noRegionServers; i++) {
      addRegionServer(new Configuration(conf), i);
    }
  }
コード例 #28
0
  public static void main(String[] args) throws Exception {

    Configuration conf = new Configuration();
    conf.set("mapred.job.tracker", "localhost:8021");
    conf.set("fs.default.name", "hdfs://localhost:8020");

    // String[] ars = new String[]{"/user/zhwang1988/input01/sample.txt",
    // "/user/zhwang1988/newout01"};
    // String[] otherArgs = new GenericOptionsParser(conf, ars).getRemainingArgs();
    //  if (otherArgs.length != 2) {
    //  System.err.println("Usage: wordcount <in> <out>");
    // System.exit(2);
    // }
    Job job = new Job(conf, "Max temperature");

    File jarFile = EJob.createTempJar("bin");
    EJob.addClasspath("/Users/zhwang1988/desktop/hadoop-1.2.1/conf");
    ClassLoader classLoader = EJob.getClassLoader();
    Thread.currentThread().setContextClassLoader(classLoader);
    job.setJarByClass(WordCount.class);
    ((JobConf) job.getConfiguration()).setJar(jarFile.toString());

    job.setNumReduceTasks(2);
    //

    job.setMapperClass(MaxTemperatureMapper.class);
    job.setReducerClass(MapTemperatureReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(
        job, new Path("hdfs://localhost:8020/user/zhwang1988/input01/1901"));
    FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:8020/user/zhwang1988/output02"));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
  @Test
  public void testRefreshSuperUserGroupsWithFileSystemBasedConfigurationProvider()
      throws IOException, YarnException {
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

    // upload default configurations
    uploadDefaultConfiguration();

    try {
      rm = new MockRM(configuration);
      rm.init(configuration);
      rm.start();
    } catch (Exception ex) {
      fail("Should not get any exceptions");
    }

    Configuration coreConf = new Configuration(false);
    coreConf.set("hadoop.proxyuser.test.groups", "test_groups");
    coreConf.set("hadoop.proxyuser.test.hosts", "test_hosts");
    uploadConfiguration(coreConf, "core-site.xml");

    rm.adminService.refreshSuperUserGroupsConfiguration(
        RefreshSuperUserGroupsConfigurationRequest.newInstance());
    Assert.assertTrue(ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
    Assert.assertTrue(
        ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));

    Assert.assertTrue(ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
    Assert.assertTrue(
        ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
  }
コード例 #30
0
  @BeforeClass
  public static void setup() throws Exception {
    String testDir = System.getProperty("test.data.dir", "./");
    testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);
    workDir.mkdirs();

    warehousedir = new Path(workDir + "/warehouse");

    // Run hive metastore server
    t = new Thread(new RunMS());
    t.start();

    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    Configuration conf = new Configuration(true);
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    fs.mkdirs(warehousedir);

    initializeSetup();
  }