Example #1
0
  static {
    try {
      // call newInstance() instead of using a shared instance from a cache
      // to avoid accidentally having it closed by someone else
      FileSystem fs = FileSystem.newInstance(FileSystem.getDefaultUri(CONF), CONF);
      if (!(fs instanceof DistributedFileSystem)) {
        String error =
            "Cannot connect to HDFS. "
                + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY
                + "("
                + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)
                + ")"
                + " might be set incorrectly";
        throw new RuntimeException(error);
      }
      DFS = (DistributedFileSystem) fs;
    } catch (IOException e) {
      throw new RuntimeException("couldn't retrieve FileSystem:\n" + e.getMessage(), e);
    }

    SUPPORTS_VOLUME_ID =
        CONF.getBoolean(
            DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
            DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
  }
  @Test
  public void testMRMaxLine() throws Exception {
    final int MAXPOS = 1024 * 1024;
    final int MAXLINE = 10 * 1024;
    final int BUF = 64 * 1024;
    final InputStream infNull =
        new InputStream() {
          int position = 0;
          final int MAXPOSBUF = 1024 * 1024 + BUF; // max LRR pos + LineReader buf

          @Override
          public int read() {
            ++position;
            return 0;
          }

          @Override
          public int read(byte[] b) {
            assertTrue("Read too many bytes from the stream", position < MAXPOSBUF);
            Arrays.fill(b, (byte) 0);
            position += b.length;
            return b.length;
          }
        };
    final LongWritable key = new LongWritable();
    final Text val = new Text();
    LOG.info("Reading a line from /dev/null");
    final Configuration conf = new Configuration(false);
    conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH, MAXLINE);
    conf.setInt("io.file.buffer.size", BUF); // used by LRR
    final LineRecordReader lrr = new LineRecordReader(infNull, 0, MAXPOS, conf);
    assertFalse("Read a line from null", lrr.next(key, val));
  }
Example #3
0
  public static void main(String[] args) throws Exception {
    final Configuration configuration = HBaseConfiguration.create();
    configuration.addResource("grade.xml");
    String tables = configuration.get("hbase.cdn.tables");
    if (Strings.isNullOrEmpty(tables)) {
      return;
    }
    List<String> list = Lists.newArrayList(Splitter.on(",").split(tables));
    List<String> results =
        Lists.transform(
            list,
            new Function<String, String>() {
              @Override
              public String apply(@Nullable java.lang.String input) {
                return String.format(
                    configuration.get("hdfs.directory.base.db"), new Date(), input);
              }
            });

    String[] arrays =
        new String[] {
          Joiner.on(",").join(results),
          String.format(configuration.get("hdfs.directory.num.middle"), new Date()),
          String.format(configuration.get("hdfs.directory.num.result"), new Date())
        };
    AbstractJob job = new TopNJob();
    //        job.setStart(true);
    int i = ToolRunner.run(configuration, job, arrays);
    System.exit(i);
  }
 /**
  * Add a {@link Path} to the list of inputs for the map-reduce job.
  *
  * @param job The {@link Job} to modify
  * @param path {@link Path} to be added to the list of inputs for the map-reduce job.
  */
 public static void addInputPath(Job job, Path path) throws IOException {
   Configuration conf = job.getConfiguration();
   path = path.getFileSystem(conf).makeQualified(path);
   String dirStr = StringUtils.escapeString(path.toString());
   String dirs = conf.get(INPUT_DIR);
   conf.set(INPUT_DIR, dirs == null ? dirStr : dirs + "," + dirStr);
 }
Example #5
0
 @Test
 public void testNonDefaultFS() throws IOException {
   FileSystem fs = cluster.getFileSystem();
   Configuration conf = fs.getConf();
   conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
   TestTrash.trashNonDefaultFS(conf);
 }
Example #6
0
 private static void dumpConfig(Configuration conf, StringBuilder sb) {
   Iterator<Map.Entry<String, String>> configIter = conf.iterator();
   List<Map.Entry<String, String>> configVals = new ArrayList<>();
   while (configIter.hasNext()) {
     configVals.add(configIter.next());
   }
   Collections.sort(
       configVals,
       new Comparator<Map.Entry<String, String>>() {
         @Override
         public int compare(Map.Entry<String, String> ent, Map.Entry<String, String> ent2) {
           return ent.getKey().compareTo(ent2.getKey());
         }
       });
   for (Map.Entry<String, String> entry : configVals) {
     // use get() to make sure variable substitution works
     if (entry.getKey().toLowerCase().contains("path")) {
       StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator);
       sb.append(entry.getKey()).append("=\n");
       while (st.hasMoreTokens()) {
         sb.append("    ").append(st.nextToken()).append(File.pathSeparator).append('\n');
       }
     } else {
       sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n');
     }
   }
 }
Example #7
0
 private static Map<String, Set<Path>> getPermMap(Configuration conf) {
   String permLoc = conf.get("hdfsproxy.user.permissions.file.location", "user-permissions.xml");
   if (conf.getResource(permLoc) == null) {
     LOG.warn("HdfsProxy user permissions file not found");
     return null;
   }
   Configuration permConf = new Configuration(false);
   permConf.addResource(permLoc);
   Map<String, Set<Path>> map = new HashMap<String, Set<Path>>();
   for (Map.Entry<String, String> e : permConf) {
     String k = e.getKey();
     String v = e.getValue();
     if (k != null && k.length() != 0 && v != null && v.length() != 0) {
       Set<Path> pathSet = new HashSet<Path>();
       String[] paths = v.split(",\\s*");
       for (String p : paths) {
         if (p.length() != 0) {
           pathSet.add(new Path(p));
         }
       }
       map.put(k, pathSet);
     }
   }
   return map;
 }
 /**
  * Remove the @Ignore to try out timeout and retry asettings
  *
  * @throws IOException
  */
 @Ignore
 @Test
 public void testTimeoutAndRetries() throws IOException {
   Configuration localConfig = HBaseConfiguration.create(this.conf);
   // This override mocks up our exists/get call to throw a RegionServerStoppedException.
   localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName());
   HTable table = new HTable(localConfig, TableName.META_TABLE_NAME);
   Throwable t = null;
   LOG.info("Start");
   try {
     // An exists call turns into a get w/ a flag.
     table.exists(new Get(Bytes.toBytes("abc")));
   } catch (SocketTimeoutException e) {
     // I expect this exception.
     LOG.info("Got expected exception", e);
     t = e;
   } catch (RetriesExhaustedException e) {
     // This is the old, unwanted behavior.  If we get here FAIL!!!
     fail();
   } finally {
     table.close();
   }
   LOG.info("Stop");
   assertTrue(t != null);
 }
 /**
  * Test that operation timeout prevails over rpc default timeout and retries, etc.
  *
  * @throws IOException
  */
 @Test
 public void testRocTimeout() throws IOException {
   Configuration localConfig = HBaseConfiguration.create(this.conf);
   // This override mocks up our exists/get call to throw a RegionServerStoppedException.
   localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName());
   int pause = 10;
   localConfig.setInt("hbase.client.pause", pause);
   localConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
   // Set the operation timeout to be < the pause.  Expectation is that after first pause, we will
   // fail out of the rpc because the rpc timeout will have been set to the operation tiemout
   // and it has expired.  Otherwise, if this functionality is broke, all retries will be run --
   // all ten of them -- and we'll get the RetriesExhaustedException exception.
   localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1);
   HTable table = new HTable(localConfig, TableName.META_TABLE_NAME);
   Throwable t = null;
   try {
     // An exists call turns into a get w/ a flag.
     table.exists(new Get(Bytes.toBytes("abc")));
   } catch (SocketTimeoutException e) {
     // I expect this exception.
     LOG.info("Got expected exception", e);
     t = e;
   } catch (RetriesExhaustedException e) {
     // This is the old, unwanted behavior.  If we get here FAIL!!!
     fail();
   } finally {
     table.close();
   }
   assertTrue(t != null);
 }
Example #10
0
 @Override
 protected void setup(Reducer<Text, Text, Text, Text>.Context context)
     throws IOException, InterruptedException {
   Configuration conf = context.getConfiguration();
   a = Double.parseDouble(conf.get("noise_value_0")); // 平滑因子
   b = Double.parseDouble(conf.get("noise_value_1")); // 平滑因子
 };
Example #11
0
  @Test
  public void testConnectionPing() throws Exception {
    Configuration conf = new Configuration();
    int pingInterval = 50;
    conf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
    conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
    final Server server =
        new RPC.Builder(conf)
            .setProtocol(TestProtocol.class)
            .setInstance(new TestImpl())
            .setBindAddress(ADDRESS)
            .setPort(0)
            .setNumHandlers(5)
            .setVerbose(true)
            .build();
    server.start();

    final TestProtocol proxy =
        RPC.getProxy(TestProtocol.class, TestProtocol.versionID, server.getListenerAddress(), conf);
    try {
      // this call will throw exception if server couldn't decode the ping
      proxy.sleep(pingInterval * 4);
    } finally {
      if (proxy != null) RPC.stopProxy(proxy);
    }
  }
  @Test
  public void testRefreshSuperUserGroupsWithFileSystemBasedConfigurationProvider()
      throws IOException, YarnException {
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

    // upload default configurations
    uploadDefaultConfiguration();

    try {
      rm = new MockRM(configuration);
      rm.init(configuration);
      rm.start();
    } catch (Exception ex) {
      fail("Should not get any exceptions");
    }

    Configuration coreConf = new Configuration(false);
    coreConf.set("hadoop.proxyuser.test.groups", "test_groups");
    coreConf.set("hadoop.proxyuser.test.hosts", "test_hosts");
    uploadConfiguration(coreConf, "core-site.xml");

    rm.adminService.refreshSuperUserGroupsConfiguration(
        RefreshSuperUserGroupsConfigurationRequest.newInstance());
    Assert.assertTrue(ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
    Assert.assertTrue(
        ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));

    Assert.assertTrue(ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
    Assert.assertTrue(
        ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
  }
 @Test(timeout = 20000)
 public void testAppSubmissionWithInvalidDelegationToken() throws Exception {
   Configuration conf = new Configuration();
   conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
   UserGroupInformation.setConfiguration(conf);
   MockRM rm = new MockRM(conf);
   ByteBuffer tokens = ByteBuffer.wrap("BOGUS".getBytes());
   ContainerLaunchContext amContainer =
       ContainerLaunchContext.newInstance(
           new HashMap<String, LocalResource>(),
           new HashMap<String, String>(),
           new ArrayList<String>(),
           new HashMap<String, ByteBuffer>(),
           tokens,
           new HashMap<ApplicationAccessType, String>());
   ApplicationSubmissionContext appSubContext =
       ApplicationSubmissionContext.newInstance(
           ApplicationId.newInstance(1234121, 0),
           "BOGUS",
           "default",
           Priority.UNDEFINED,
           amContainer,
           false,
           true,
           1,
           Resource.newInstance(1024, 1),
           "BOGUS");
   SubmitApplicationRequest request = SubmitApplicationRequest.newInstance(appSubContext);
   try {
     rm.getClientRMService().submitApplication(request);
     fail("Error was excepted.");
   } catch (YarnException e) {
     Assert.assertTrue(e.getMessage().contains("Bad header found in token storage"));
   }
 }
  /** Test that all open files are closed when client dies abnormally. */
  public void testDFSClientDeath() throws IOException {
    Configuration conf = new Configuration();
    System.out.println("Testing adbornal client death.");
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSClient dfsclient = dfs.dfs;
    try {

      // create a new file in home directory. Do not close it.
      //
      Path file1 = new Path("/clienttest.dat");
      FSDataOutputStream stm = createFile(fs, file1, 1);
      System.out.println("Created file clienttest.dat");

      // write to file
      writeFile(stm);

      // close the dfsclient before closing the output stream.
      // This should close all existing file.
      dfsclient.close();

      // reopen file system and verify that file exists.
      assertTrue(
          file1 + " does not exist.",
          AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
    } finally {
      cluster.shutdown();
    }
  }
    @Override
    public void setup(Reducer<IntWritable, Text, NullWritable, NullWritable>.Context context) {
      Configuration conf = context.getConfiguration();
      FileSystem fs;
      try {
        fs = FileSystem.get(conf);
      } catch (Exception e) {
        throw new RuntimeException("Error opening the FileSystem!");
      }

      RetrievalEnvironment env = null;
      try {
        env = new RetrievalEnvironment(conf.get(Constants.IndexPath), fs);
      } catch (IOException e) {
        throw new RuntimeException("Unable to create RetrievalEnvironment!");
      }

      collectionDocumentCount = env.readCollectionDocumentCount();

      try {
        out = fs.create(new Path(env.getTermDocVectorsForwardIndex()), true);
        out.writeInt(env.readDocnoOffset());
        out.writeInt(collectionDocumentCount);
      } catch (Exception e) {
        throw new RuntimeException("Error in creating files!");
      }
    }
  public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    GenericOptionsParser gop = new GenericOptionsParser(conf, args);
    conf = gop.getConfiguration();

    Job job = new Job(conf, conf.get("job_name"));
    FileInputFormat.addInputPaths(job, conf.get("input_dir"));
    Path output = new Path(conf.get("output_dir"));
    FileOutputFormat.setOutputPath(job, output);
    output.getFileSystem(conf).delete(output, true);

    job.setJarByClass(BrowerLogFormatMR.class);
    job.setMapperClass(BrowerLogFormatMapper.class);
    job.setReducerClass(BrowerLogFormatReducer.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(Text.class);
    job.setNumReduceTasks(1);

    int code = job.waitForCompletion(true) ? 0 : 1;
    return code;
  }
Example #17
0
  @Override
  public void configure(Configuration conf, FileSystem fs) {
    // read stopwords from file (stopwords will be empty set if file does not exist or is empty)
    String stopwordsFile = conf.get(Constants.StopwordList);
    stopwords = readInput(fs, stopwordsFile);
    String stemmedStopwordsFile = conf.get(Constants.StemmedStopwordList);
    stemmedStopwords = readInput(fs, stemmedStopwordsFile);
    isStopwordRemoval = !stopwords.isEmpty();
    isStemming = conf.getBoolean(Constants.Stemming, true);

    VocabularyWritable vocab;
    try {
      vocab =
          (VocabularyWritable)
              HadoopAlign.loadVocab(new Path(conf.get(Constants.CollectionVocab)), fs);
      setVocab(vocab);
    } catch (Exception e) {
      LOG.warn("No vocabulary provided to tokenizer.");
      vocab = null;
    }

    LOG.warn(
        "Stemming is "
            + isStemming
            + "; Stopword removal is "
            + isStopwordRemoval
            + "; number of stopwords: "
            + stopwords.size()
            + "; stemmed: "
            + stemmedStopwords.size());
  }
Example #18
0
  @Override
  public synchronized void serviceInit(Configuration conf) {
    this.maxTaskFailuresPerNode =
        conf.getInt(
            TezConfiguration.TEZ_MAX_TASK_FAILURES_PER_NODE,
            TezConfiguration.TEZ_MAX_TASK_FAILURES_PER_NODE_DEFAULT);
    this.nodeBlacklistingEnabled =
        conf.getBoolean(
            TezConfiguration.TEZ_NODE_BLACKLISTING_ENABLED,
            TezConfiguration.TEZ_NODE_BLACKLISTING_ENABLED_DEFAULT);
    this.blacklistDisablePercent =
        conf.getInt(
            TezConfiguration.TEZ_NODE_BLACKLISTING_IGNORE_THRESHOLD,
            TezConfiguration.TEZ_NODE_BLACKLISTING_IGNORE_THRESHOLD_DEFAULT);

    LOG.info(
        "blacklistDisablePercent is "
            + blacklistDisablePercent
            + ", blacklistingEnabled: "
            + nodeBlacklistingEnabled
            + ", maxTaskFailuresPerNode: "
            + maxTaskFailuresPerNode);

    if (blacklistDisablePercent < -1 || blacklistDisablePercent > 100) {
      throw new TezUncheckedException(
          "Invalid blacklistDisablePercent: "
              + blacklistDisablePercent
              + ". Should be an integer between 0 and 100 or -1 to disabled");
    }
  }
Example #19
0
 private static Map<String, Set<BigInteger>> getCertsMap(Configuration conf) {
   String certsLoc = conf.get("hdfsproxy.user.certs.file.location", "user-certs.xml");
   if (conf.getResource(certsLoc) == null) {
     LOG.warn("HdfsProxy user certs file not found");
     return null;
   }
   Configuration certsConf = new Configuration(false);
   certsConf.addResource(certsLoc);
   Map<String, Set<BigInteger>> map = new HashMap<String, Set<BigInteger>>();
   for (Map.Entry<String, String> e : certsConf) {
     String k = e.getKey();
     String v = e.getValue().trim();
     if (k != null && k.length() != 0 && v != null && v.length() != 0) {
       Set<BigInteger> numSet = new HashSet<BigInteger>();
       String[] serialnumbers = v.split("\\s*,\\s*");
       for (String num : serialnumbers) {
         if (num.length() != 0) {
           numSet.add(new BigInteger(num, 16));
         }
       }
       map.put(k, numSet);
     }
   }
   return map;
 }
  private LogisticRegressionModel instantiateSparkModel() {
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", topologyConfig.getProperty("hdfs.url"));

    double[] sparkModelInfo = null;

    try {
      sparkModelInfo =
          getSparkModelInfoFromHDFS(
              new Path(topologyConfig.getProperty("hdfs.url") + "/tmp/sparkML_weights"), conf);
    } catch (Exception e) {
      LOG.error("Couldn't instantiate Spark model in prediction bolt: " + e.getMessage());
      e.printStackTrace();

      throw new RuntimeException(e);
    }

    // all numbers besides the last value are the weights
    double[] weights = Arrays.copyOfRange(sparkModelInfo, 0, sparkModelInfo.length - 1);

    // the last number in the array is the intercept
    double intercept = sparkModelInfo[sparkModelInfo.length - 1];

    org.apache.spark.mllib.linalg.Vector weightsV = (Vectors.dense(weights));
    return new LogisticRegressionModel(weightsV, intercept);
  }
 /**
  * Get a PathFilter instance of the filter set for the input paths.
  *
  * @return the PathFilter instance set for the job, NULL if none has been set.
  */
 public static PathFilter getInputPathFilter(JobContext context) {
   Configuration conf = context.getConfiguration();
   Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null, PathFilter.class);
   return (filterClass != null)
       ? (PathFilter) ReflectionUtils.newInstance(filterClass, conf)
       : null;
 }
  @VisibleForTesting
  void initConfig() throws IOException {

    this.cgroupPrefix =
        conf.get(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn");
    this.cgroupMount = conf.getBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
    this.cgroupMountPath = conf.get(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);

    this.deleteCgroupTimeout =
        conf.getLong(
            YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT,
            YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT);
    // remove extra /'s at end or start of cgroupPrefix
    if (cgroupPrefix.charAt(0) == '/') {
      cgroupPrefix = cgroupPrefix.substring(1);
    }

    this.strictResourceUsageMode =
        conf.getBoolean(
            YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE,
            YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE);

    int len = cgroupPrefix.length();
    if (cgroupPrefix.charAt(len - 1) == '/') {
      cgroupPrefix = cgroupPrefix.substring(0, len - 1);
    }
  }
  @Test
  public void testJSONBulkImporter() throws Exception {
    // Prepare input file:
    File inputFile = File.createTempFile("TestJSONImportInput", ".txt", getLocalTempDir());
    TestingResources.writeTextFile(
        inputFile, TestingResources.get(BulkImporterTestUtils.JSON_IMPORT_DATA));

    Configuration conf = getConf();
    conf.set(
        DescribedInputTextBulkImporter.CONF_FILE,
        BulkImporterTestUtils.localResource(BulkImporterTestUtils.FOO_IMPORT_DESCRIPTOR));

    // Run the bulk-import:
    final KijiMapReduceJob job =
        KijiBulkImportJobBuilder.create()
            .withConf(conf)
            .withBulkImporter(JSONBulkImporter.class)
            .withInput(MapReduceJobInputs.newTextMapReduceJobInput(new Path(inputFile.toString())))
            .withOutput(new DirectKijiTableMapReduceJobOutput(mTable.getURI()))
            .build();
    assertTrue(job.run());

    final Counters counters = job.getHadoopJob().getCounters();
    assertEquals(
        3, counters.findCounter(JobHistoryCounters.BULKIMPORTER_RECORDS_PROCESSED).getValue());
    assertEquals(
        1, counters.findCounter(JobHistoryCounters.BULKIMPORTER_RECORDS_INCOMPLETE).getValue());
    assertEquals(
        0, counters.findCounter(JobHistoryCounters.BULKIMPORTER_RECORDS_REJECTED).getValue());

    // Validate output:
    final KijiRowScanner scanner = mReader.getScanner(KijiDataRequest.create("info"));
    BulkImporterTestUtils.validateImportedRows(scanner, false);
    scanner.close();
  }
Example #24
0
  @Test
  public void testPopulateConfiguration() {
    String[] args =
        new String[] {
          "-inputPath",
          "input",
          "-url",
          "jdbc:mysql://localhost:3306/hiho",
          "-userName",
          "root",
          "-password",
          "newpwd",
          "-querySuffix",
          "mrTest fields terminated by ','"
        };
    ExportToMySQLDB exportToMySQLDB = new ExportToMySQLDB();

    Configuration conf = new Configuration();
    exportToMySQLDB.populateConfiguration(args, conf);

    assertEquals("jdbc:mysql://localhost:3306/hiho", conf.get(DBConfiguration.URL_PROPERTY));
    assertEquals("root", conf.get(DBConfiguration.USERNAME_PROPERTY));
    assertEquals("newpwd", conf.get(DBConfiguration.PASSWORD_PROPERTY));
    assertEquals("mrTest fields terminated by ','", conf.get(HIHOConf.LOAD_QUERY_SUFFIX));
  }
Example #25
0
  @Override
  protected void configureJob(Job job) throws IOException {
    Configuration conf = job.getConfiguration();

    job.setJarByClass(PartialBuilder.class);

    FileInputFormat.setInputPaths(job, getDataPath());
    FileOutputFormat.setOutputPath(job, getOutputPath(conf));

    job.setOutputKeyClass(TreeID.class);
    job.setOutputValueClass(MapredOutput.class);

    job.setMapperClass(Step1Mapper.class);
    job.setNumReduceTasks(0); // no reducers

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    // For this implementation to work, mapred.map.tasks needs to be set to the actual
    // number of mappers Hadoop will use:
    TextInputFormat inputFormat = new TextInputFormat();
    List<?> splits = inputFormat.getSplits(job);
    if (splits == null || splits.isEmpty()) {
      log.warn("Unable to compute number of splits?");
    } else {
      int numSplits = splits.size();
      log.info("Setting mapred.map.tasks = {}", numSplits);
      conf.setInt("mapred.map.tasks", numSplits);
    }
  }
  @Override
  protected synchronized void startInternal() throws Exception {
    // create filesystem only now, as part of service-start. By this time, RM is
    // authenticated with kerberos so we are good to create a file-system
    // handle.
    fsConf = new Configuration(getConfig());
    fsConf.setBoolean("dfs.client.retry.policy.enabled", true);
    String retryPolicy =
        fsConf.get(
            YarnConfiguration.FS_RM_STATE_STORE_RETRY_POLICY_SPEC,
            YarnConfiguration.DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC);
    fsConf.set("dfs.client.retry.policy.spec", retryPolicy);

    String scheme = fsWorkingPath.toUri().getScheme();
    if (scheme == null) {
      scheme = FileSystem.getDefaultUri(fsConf).getScheme();
    }
    if (scheme != null) {
      String disableCacheName = String.format("fs.%s.impl.disable.cache", scheme);
      fsConf.setBoolean(disableCacheName, true);
    }

    fs = fsWorkingPath.getFileSystem(fsConf);
    mkdirsWithRetries(rmDTSecretManagerRoot);
    mkdirsWithRetries(rmAppRoot);
    mkdirsWithRetries(amrmTokenSecretManagerRoot);
    mkdirsWithRetries(reservationRoot);
  }
Example #27
0
 @Override
 public int run(String[] strings) throws Exception {
   Configuration configuration = getConf();
   configuration.setLong("mapred.min.split.size", 512 * 1024 * 1024L);
   Job numJob = new Job(configuration, "calculate film program seed num job ");
   Path[] paths = getPaths(strings[0].split(","));
   HadoopUtils.deleteIfExist(strings[1]);
   MapReduceUtils.initMapperJob(
       NumCountMapper.class, Text.class, Text.class, this.getClass(), numJob, paths);
   // TableMapReduceUtil.initTableReducerJob(strings[1], NumCountReducer.class, numJob);
   MapReduceUtils.initReducerJob(new Path(strings[1]), NumCountReducer.class, numJob);
   numJob.waitForCompletion(true);
   Job programeSets = new Job(configuration, "calculate program set num job");
   HadoopUtils.deleteIfExist(strings[2]);
   MapReduceUtils.initMapperJob(
       NumProgramSetsMapper.class,
       Text.class,
       Text.class,
       this.getClass(),
       programeSets,
       new Path(strings[1]));
   programeSets.setCombinerClass(NumProgramSetCombiner.class);
   MapReduceUtils.initReducerJob(new Path(strings[2]), NumProgramSetsReducer.class, programeSets);
   return programeSets.waitForCompletion(true) ? 0 : 1;
   //        return 0;
 }
Example #28
0
  /** v1 service implementation to submit a job, either workflow or coordinator */
  @Override
  protected JSONObject submitJob(HttpServletRequest request, Configuration conf)
      throws XServletException, IOException {
    JSONObject json = null;

    String jobType = request.getParameter(RestConstants.JOBTYPE_PARAM);

    if (jobType == null) {
      String wfPath = conf.get(OozieClient.APP_PATH);
      String coordPath = conf.get(OozieClient.COORDINATOR_APP_PATH);
      String bundlePath = conf.get(OozieClient.BUNDLE_APP_PATH);

      ServletUtilities.ValidateAppPath(wfPath, coordPath, bundlePath);

      if (wfPath != null) {
        json = submitWorkflowJob(request, conf);
      } else if (coordPath != null) {
        json = submitCoordinatorJob(request, conf);
      } else {
        json = submitBundleJob(request, conf);
      }
    } else { // This is a http submission job
      if (jobType.equals("pig") || jobType.equals("mapreduce")) {
        json = submitHttpJob(request, conf, jobType);
      } else {
        throw new XServletException(
            HttpServletResponse.SC_BAD_REQUEST,
            ErrorCode.E0303,
            RestConstants.JOBTYPE_PARAM,
            jobType);
      }
    }
    return json;
  }
  static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
      DatanodeID datanodeid,
      Configuration conf,
      int socketTimeout,
      boolean connectToDnViaHostname,
      LocatedBlock locatedBlock)
      throws IOException {
    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
    InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
    }

    // Since we're creating a new UserGroupInformation here, we know that no
    // future RPC proxies will be able to re-use the same connection. And
    // usages of this proxy tend to be one-off calls.
    //
    // This is a temporary fix: callers should really achieve this by using
    // RPC.stopProxy() on the resulting object, but this is currently not
    // working in trunk. See the discussion on HDFS-1965.
    Configuration confWithNoIpcIdle = new Configuration(conf);
    confWithNoIpcIdle.setInt(
        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

    UserGroupInformation ticket =
        UserGroupInformation.createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
    ticket.addToken(locatedBlock.getBlockToken());
    return createClientDatanodeProtocolProxy(
        addr, ticket, confWithNoIpcIdle, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
  }
  @Test
  public void testGetTokensForViewFS() throws IOException, URISyntaxException {
    Configuration conf = new Configuration(jConf);
    FileSystem dfs = dfsCluster.getFileSystem();
    String serviceName = dfs.getCanonicalServiceName();

    Path p1 = new Path("/mount1");
    Path p2 = new Path("/mount2");
    p1 = dfs.makeQualified(p1);
    p2 = dfs.makeQualified(p2);

    conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
    conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
    Credentials credentials = new Credentials();
    Path lp1 = new Path("viewfs:///dir1");
    Path lp2 = new Path("viewfs:///dir2");
    Path[] paths = new Path[2];
    paths[0] = lp1;
    paths[1] = lp2;
    TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);

    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> tt : tns) {
      System.out.println("token=" + tt);
      if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && tt.getService().equals(new Text(serviceName))) {
        found = true;
      }
      assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
    }
  }