/** Verify name-node port usage. */
  public void testNameNodePorts() throws Exception {
    NameNode nn = null;
    try {
      nn = startNameNode();

      // start another namenode on the same port
      Configuration conf2 = new Configuration(config);
      conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
      NameNode.format(conf2);
      boolean started = canStartNameNode(conf2);
      assertFalse(started); // should fail

      // start on a different main port
      FileSystem.setDefaultUri(conf2, "hdfs://" + NAME_NODE_HOST + "0");
      started = canStartNameNode(conf2);
      assertFalse(started); // should fail again

      // reset conf2 since NameNode modifies it
      FileSystem.setDefaultUri(conf2, "hdfs://" + NAME_NODE_HOST + "0");
      // different http port
      conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
      started = canStartNameNode(conf2);
      assertTrue(started); // should start now
    } finally {
      stopNameNode(nn);
    }
  }
  private void mySetup(int stripeLength) throws Exception {
    if (System.getProperty("hadoop.log.dir") == null) {
      String base = new File(".").getAbsolutePath();
      System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
    }

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:" + MiniDFSCluster.getFreePort());
    conf.set("mapred.raid.http.address", "localhost:0");

    Utils.loadTestCodecs(
        conf, stripeLength, stripeLength, 1, 3, "/destraid", "/destraidrs", false, true);

    conf.setBoolean("dfs.permissions", false);
    // Make sure initial repl is smaller than NUM_DATANODES
    conf.setInt(RaidNode.RAID_PARITY_INITIAL_REPL_KEY, 1);

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
    conf.set(RaidNode.RAID_CHECKSUM_STORE_CLASS_KEY, "org.apache.hadoop.raid.LocalChecksumStore");
    conf.setBoolean(RaidNode.RAID_CHECKSUM_STORE_REQUIRED_KEY, true);
    conf.set(LocalChecksumStore.LOCAL_CHECK_STORE_DIR_KEY, CHECKSUM_STORE_DIR);
    conf.set(RaidNode.RAID_STRIPE_STORE_CLASS_KEY, "org.apache.hadoop.raid.LocalStripeStore");
    conf.set(LocalStripeStore.LOCAL_STRIPE_STORE_DIR_KEY, STRIPE_STORE_DIR);
    ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
    cb.addPolicy("RaidTest1", "/user/dhruba/raidtest", 1, 1);
    cb.addPolicy("RaidTest2", "/user/dhruba/raidtestrs", 1, 1, "rs");
    cb.persist();
  }
예제 #3
0
파일: Warehouse.java 프로젝트: cnhans/tdw
  public boolean deleteDirThrowExp(Path f, boolean recursive) throws MetaException {
    LOG.info("deleting  " + f);
    try {
      FileSystem fs = getFs(f);
      if (!fs.exists(f)) {
        return false;
      }

      Configuration dupConf = new Configuration(conf);
      FileSystem.setDefaultUri(dupConf, fs.getUri());

      Trash trashTmp = new Trash(dupConf);
      if (trashTmp.moveToTrash(f)) {
        LOG.info("Moved to trash: " + f);
        return true;
      }
      if (fs.delete(f, true)) {
        LOG.info("Deleted the diretory " + f);
        return true;
      }
      if (fs.exists(f)) {
        throw new MetaException("Unable to delete directory: " + f);
      }
    } catch (FileNotFoundException e) {
      return true;
    } catch (Exception e) {
      MetaStoreUtils.logAndThrowMetaException(e);
    }
    return false;
  }
예제 #4
0
 static {
   try {
     FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
     CONF.set("dfs.http.address", "0.0.0.0:0");
     NameNode.format(CONF);
     namenode = new NameNode(CONF);
   } catch (IOException e) {
     e.printStackTrace();
     throw (RuntimeException) new RuntimeException().initCause(e);
   }
   FSNamesystem fsNamesystem = namenode.getNamesystem();
   replicator = fsNamesystem.blockManager.replicator;
   cluster = fsNamesystem.clusterMap;
   // construct network topology
   for (int i = 0; i < NUM_OF_DATANODES; i++) {
     cluster.add(dataNodes[i]);
   }
   for (int i = 0; i < NUM_OF_DATANODES; i++) {
     dataNodes[i].updateHeartbeat(
         2 * FSConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,
         0L,
         2 * FSConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,
         0);
   }
 }
예제 #5
0
  @Before
  public void setUpNameDirs() throws Exception {
    config = new Configuration();
    String baseDir = System.getProperty("test.build.data", "/tmp");

    hdfsDir = new File(baseDir, "dfs");
    if (hdfsDir.exists()) {
      FileUtil.fullyDelete(hdfsDir);
    }

    hdfsDir.mkdir();
    path1 = new File(hdfsDir, "name1");
    path2 = new File(hdfsDir, "name2");
    path3 = new File(hdfsDir, "name3");

    path1.mkdir();
    path2.mkdir();
    path3.mkdir();

    String nameDir = new String(path1.getPath() + "," + path2.getPath());
    config.set("dfs.name.dir", nameDir);
    config.set("dfs.name.edits.dir", nameDir + "," + path3.getPath());
    config.set("fs.checkpoint.dir", new File(hdfsDir, "secondary").getPath());

    FileSystem.setDefaultUri(config, "hdfs://" + NAME_NODE_HOST + "0");
    config.set("dfs.secondary.http.address", "0.0.0.0:0");
    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  }
 private Configuration getConf() throws IOException {
   Configuration conf = new Configuration();
   FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
   conf.set("dfs.namenode.http-address", "0.0.0.0:0");
   conf.set("dfs.namenode.secondary.http-address", "0.0.0.0:0");
   conf.setBoolean("dfs.permissions.enabled", false);
   return conf;
 }
 @CliCommand(
     value = {PREFIX + "fs"},
     help = "Sets the Hadoop namenode")
 public void setFs(
     @CliOption(
             key = {"", "namenode"},
             mandatory = true,
             help = "namenode address - can be local|<namenode:port>")
         String namenode) {
   FileSystem.setDefaultUri(hadoopConfiguration, namenode);
 }
  @BeforeClass
  public static void setUpClass() throws Exception {
    conf = new Configuration();

    // create a fake FileSystem (MyFS) and assosiate it
    // with "hdfs" schema.
    URI uri = new URI(DelegationTokenRenewer.SCHEME + "://localhost:0");
    System.out.println("scheme is : " + uri.getScheme());
    conf.setClass("fs." + uri.getScheme() + ".impl", MyFS.class, DistributedFileSystem.class);
    FileSystem.setDefaultUri(conf, uri);
    LOG.info("filesystem uri = " + FileSystem.getDefaultUri(conf).toString());
  }
  @Before
  public void setUp() throws Exception {
    config = new Configuration();
    config.setClass(
        "hadoop.security.group.mapping",
        TestRefreshUserMappings.MockUnixGroupsMapping.class,
        GroupMappingServiceProvider.class);
    config.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
    Groups.getUserToGroupsMappingService(config);

    FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
    cluster = new MiniDFSCluster(0, config, 1, true, true, true, null, null, null, null);
    cluster.waitActive();
  }
예제 #10
0
 static JobConf configureJobConf(
     JobConf conf,
     String namenode,
     int jobTrackerPort,
     int jobTrackerInfoPort,
     UserGroupInformation ugi) {
   JobConf result = new JobConf(conf);
   FileSystem.setDefaultUri(result, namenode);
   result.set("mapred.job.tracker", "localhost:" + jobTrackerPort);
   result.set("mapred.job.tracker.http.address", "127.0.0.1:" + jobTrackerInfoPort);
   // for debugging have all task output sent to the test output
   JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL);
   return result;
 }
예제 #11
0
 static JobConf configureJobConf(
     JobConf conf,
     String namenode,
     int jobTrackerPort,
     int jobTrackerInfoPort,
     UserGroupInformation ugi) {
   JobConf result = new JobConf(conf);
   FileSystem.setDefaultUri(result, namenode);
   result.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
   result.set(JTConfig.JT_IPC_ADDRESS, "localhost:" + jobTrackerPort);
   result.set(JTConfig.JT_HTTP_ADDRESS, "127.0.0.1:" + jobTrackerInfoPort);
   // for debugging have all task output sent to the test output
   JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL);
   return result;
 }
  /** Start the name-node. */
  public NameNode startNameNode() throws IOException {
    String dataDir = System.getProperty("test.build.data");
    hdfsDir = new File(dataDir, "dfs");
    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
    }
    config = new Configuration();
    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
    FileSystem.setDefaultUri(config, "hdfs://" + NAME_NODE_HOST + "0");
    config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
    NameNode.format(config);

    String[] args = new String[] {};
    // NameNode will modify config with the ports it bound to
    return NameNode.createNameNode(args, config);
  }
예제 #13
0
  public void mySetup(boolean inlineChecksum) throws Exception {
    conf = new Configuration();
    if (System.getProperty("hadoop.log.dir") == null) {
      String base = new File(".").getAbsolutePath();
      System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
    }

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists

    conf.setInt("fs.trash.interval", 1440);
    conf.setInt("dfs.block.size", BLOCK_SIZE);
    conf.setBoolean("dfs.use.inline.checksum", inlineChecksum);

    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();
    hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
  }
  @BeforeClass
  public static void setupCluster() throws IOException {
    Configuration conf = new HdfsConfiguration();
    final String[] racks = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2", "/rack2"};
    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);
    int blockSize = 1024;

    dnrList = new ArrayList<DatanodeRegistration>();
    dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

    // Register DNs
    for (int i = 0; i < 6; i++) {
      DatanodeRegistration dnr =
          new DatanodeRegistration(
              dataNodes[i],
              new StorageInfo(NodeType.DATA_NODE),
              new ExportedBlockKeys(),
              VersionInfo.getVersion());
      dnrList.add(dnr);
      dnManager.registerDatanode(dnr);
      dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
          2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, 0L,
          2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, 0L);
      dataNodes[i].updateHeartbeat(
          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]), 0L, 0L, 0, 0, null);
    }
  }
예제 #15
0
  /**
   * Modify configuration according user-specified generic options
   *
   * @param conf Configuration to be modified
   * @param line User-specified generic options
   */
  private void processGeneralOptions(Configuration conf, CommandLine line) throws IOException {
    if (line.hasOption("fs")) {
      FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
    }

    if (line.hasOption("jt")) {
      conf.set("mapred.job.tracker", line.getOptionValue("jt"), "from -jt command line option");
    }
    if (line.hasOption("conf")) {
      String[] values = line.getOptionValues("conf");
      for (String value : values) {
        conf.addResource(new Path(value));
      }
    }
    if (line.hasOption("libjars")) {
      conf.set(
          "tmpjars",
          validateFiles(line.getOptionValue("libjars"), conf),
          "from -libjars command line option");
      // setting libjars in client classpath
      URL[] libjars = getLibJars(conf);
      if (libjars != null && libjars.length > 0) {
        conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
        Thread.currentThread()
            .setContextClassLoader(
                new URLClassLoader(libjars, Thread.currentThread().getContextClassLoader()));
      }
    }
    if (line.hasOption("files")) {
      conf.set(
          "tmpfiles",
          validateFiles(line.getOptionValue("files"), conf),
          "from -files command line option");
    }
    if (line.hasOption("archives")) {
      conf.set(
          "tmparchives",
          validateFiles(line.getOptionValue("archives"), conf),
          "from -archives command line option");
    }
    if (line.hasOption('D')) {
      String[] property = line.getOptionValues('D');
      for (String prop : property) {
        String[] keyval = prop.split("=", 2);
        if (keyval.length == 2) {
          conf.set(keyval[0], keyval[1], "from command line");
        }
      }
    }
    conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);

    // tokensFile
    if (line.hasOption("tokenCacheFile")) {
      String fileName = line.getOptionValue("tokenCacheFile");
      // check if the local file exists
      FileSystem localFs = FileSystem.getLocal(conf);
      Path p = new Path(fileName);
      if (!localFs.exists(p)) {
        throw new FileNotFoundException("File " + fileName + " does not exist.");
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("setting conf tokensFile: " + fileName);
      }
      conf.set(
          "mapreduce.job.credentials.json",
          localFs.makeQualified(p).toString(),
          "from -tokenCacheFile command line option");
    }
  }
예제 #16
0
파일: NameNode.java 프로젝트: imace/hops
 protected void setRpcServerAddress(Configuration conf, InetSocketAddress rpcAddress) {
   FileSystem.setDefaultUri(conf, getUri(rpcAddress));
 }
예제 #17
0
  /** Check that we can reach a NameNode or a JobTracker using a specific socket factory */
  public void testSocketFactory() throws IOException {
    // Create a standard mini-cluster
    Configuration sconf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(sconf, 1, true, null);
    final int nameNodePort = cluster.getNameNodePort();

    // Get a reference to its DFS directly
    FileSystem fs = cluster.getFileSystem();
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem directDfs = (DistributedFileSystem) fs;

    // Get another reference via network using a specific socket factory
    Configuration cconf = new Configuration();
    FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/", nameNodePort + 10));
    cconf.set(
        "hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set(
        "hadoop.rpc.socket.factory.class.ClientProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set(
        "hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");

    fs = FileSystem.get(cconf);
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    JobClient client = null;
    MiniMRCluster mr = null;
    try {
      // This will test RPC to the NameNode only.
      // could we test Client-DataNode connections?
      Path filePath = new Path("/dir");

      assertFalse(directDfs.exists(filePath));
      assertFalse(dfs.exists(filePath));

      directDfs.mkdirs(filePath);
      assertTrue(directDfs.exists(filePath));
      assertTrue(dfs.exists(filePath));

      // This will test TPC to a JobTracker
      mr = new MiniMRCluster(1, fs.getUri().toString(), 1);
      final int jobTrackerPort = mr.getJobTrackerPort();

      JobConf jconf = new JobConf(cconf);
      jconf.set("mapred.job.tracker", String.format("localhost:%d", jobTrackerPort + 10));
      client = new JobClient(jconf);

      JobStatus[] jobs = client.jobsToComplete();
      assertTrue(jobs.length == 0);

    } finally {
      try {
        if (client != null) client.close();
      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (dfs != null) dfs.close();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (directDfs != null) directDfs.close();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (cluster != null) cluster.shutdown();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      if (mr != null) {
        try {
          mr.shutdown();
        } catch (Exception ignored) {
          ignored.printStackTrace();
        }
      }
    }
  }
  /** creates a MiniDFS instance with a raided file in it */
  private void setUp(boolean doHar) throws IOException, ClassNotFoundException {

    final int timeBeforeHar;
    if (doHar) {
      timeBeforeHar = 0;
    } else {
      timeBeforeHar = -1;
    }

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // make all deletions not go through Trash
    conf.set("fs.shell.delete.classname", "org.apache.hadoop.hdfs.DFSClient");

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    // use local block fixer
    conf.set("raid.blockfix.classname", "org.apache.hadoop.raid.LocalBlockFixer");

    conf.set("raid.server.address", "localhost:0");
    conf.setInt("hdfs.raid.stripeLength", STRIPE_BLOCKS);
    conf.set("hdfs.raid.locations", RAID_DIR);

    conf.setInt("dfs.corruptfilesreturned.max", 500);

    conf.setBoolean("dfs.permissions", false);

    cluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    cluster.waitActive();
    dfs = (DistributedFileSystem) cluster.getFileSystem();
    String namenode = dfs.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);

    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
    fileWriter.write("<?xml version=\"1.0\"?>\n");
    String str =
        "<configuration> "
            + "  <srcPath prefix=\""
            + DIR_PATH
            + "\"> "
            + "    <policy name = \"RaidTest1\"> "
            + "      <erasureCode>xor</erasureCode> "
            + "      <destPath> "
            + RAID_DIR
            + " </destPath> "
            + "      <property> "
            + "        <name>targetReplication</name> "
            + "        <value>1</value> "
            + "        <description>after RAIDing, decrease the replication "
            + "factor of a file to this value.</description> "
            + "      </property> "
            + "      <property> "
            + "        <name>metaReplication</name> "
            + "        <value>1</value> "
            + "        <description> replication factor of parity file</description> "
            + "      </property> "
            + "      <property> "
            + "        <name>modTimePeriod</name> "
            + "        <value>2000</value> "
            + "        <description>time (milliseconds) after a file is modified "
            + "to make it a candidate for RAIDing</description> "
            + "      </property> ";

    if (timeBeforeHar >= 0) {
      str +=
          "      <property> "
              + "        <name>time_before_har</name> "
              + "        <value>"
              + timeBeforeHar
              + "</value> "
              + "        <description> amount of time waited before har'ing parity "
              + "files</description> "
              + "     </property> ";
    }

    str += "    </policy>" + "  </srcPath>" + "</configuration>";

    fileWriter.write(str);
    fileWriter.close();

    createTestFile(FILE_PATH0);
    createTestFile(FILE_PATH1);

    Path[] filePaths = {FILE_PATH0, FILE_PATH1};
    raidTestFiles(RAID_PATH, filePaths, doHar);

    clientConf = new Configuration(raidConf);
    clientConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
    clientConf.set("fs.raid.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

    // prepare shell and arguments
    shell = new RaidShell(clientConf);
    args = new String[2];
    args[0] = "-fsck";
    args[1] = DIR_PATH;
  }