Пример #1
0
  private static DatanodeInfo chooseDatanode(
      final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset)
      throws IOException {
    if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      final HdfsFileStatus status = namenode.getFileInfo(path);
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN && (openOffset < 0L || openOffset >= len)) {
        throw new IOException(
            "Offset="
                + openOffset
                + " out of the range [0, "
                + len
                + "); "
                + op
                + ", path="
                + path);
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
        final LocatedBlocks locations = namenode.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0));
        }
      }
    }

    return namenode.getNamesystem().getRandomDatanode();
  }
Пример #2
0
 @Override // NamenodeProtocol
 public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException {
   verifyRequest(registration);
   if (!nn.isRole(NamenodeRole.NAMENODE))
     throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
   return namesystem.startCheckpoint(registration, nn.setRegistration());
 }
Пример #3
0
  /**
   * Generate a dummy namenode proxy instance that utilizes our hacked {@link
   * LossyRetryInvocationHandler}. Proxy instance generated using this method will proactively drop
   * RPC responses. Currently this method only support HA setup. null will be returned if the given
   * configuration is not for HA.
   *
   * @param config the configuration containing the required IPC properties, client failover
   *     configurations, etc.
   * @param nameNodeUri the URI pointing either to a specific NameNode or to a logical nameservice.
   * @param xface the IPC interface which should be created
   * @param numResponseToDrop The number of responses to drop for each RPC call
   * @param fallbackToSimpleAuth set to true or false during calls to indicate if a secure client
   *     falls back to simple auth
   * @return an object containing both the proxy and the associated delegation token service it
   *     corresponds to. Will return null of the given configuration does not support HA.
   * @throws IOException if there is an error creating the proxy
   */
  @SuppressWarnings("unchecked")
  public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
      Configuration config,
      URI nameNodeUri,
      Class<T> xface,
      int numResponseToDrop,
      AtomicBoolean fallbackToSimpleAuth)
      throws IOException {
    Preconditions.checkArgument(numResponseToDrop > 0);
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
        createFailoverProxyProvider(config, nameNodeUri, xface, true, fallbackToSimpleAuth);

    if (failoverProxyProvider != null) { // HA case
      int delay =
          config.getInt(
              HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
              HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
      int maxCap =
          config.getInt(
              HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
              HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
      int maxFailoverAttempts =
          config.getInt(
              HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
              HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
      int maxRetryAttempts =
          config.getInt(
              HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
              HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
      InvocationHandler dummyHandler =
          new LossyRetryInvocationHandler<T>(
              numResponseToDrop,
              failoverProxyProvider,
              RetryPolicies.failoverOnNetworkException(
                  RetryPolicies.TRY_ONCE_THEN_FAIL,
                  maxFailoverAttempts,
                  Math.max(numResponseToDrop + 1, maxRetryAttempts),
                  delay,
                  maxCap));

      T proxy =
          (T)
              Proxy.newProxyInstance(
                  failoverProxyProvider.getInterface().getClassLoader(),
                  new Class[] {xface},
                  dummyHandler);
      Text dtService;
      if (failoverProxyProvider.useLogicalURI()) {
        dtService =
            HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME);
      } else {
        dtService = SecurityUtil.buildTokenService(NameNode.getAddress(nameNodeUri));
      }
      return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri));
    } else {
      LOG.warn(
          "Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup");
      return null;
    }
  }
Пример #4
0
 @Test
 public void testNameNode() throws IOException {
   Configuration conf = MyConf.getConfiguration();
   new File("D:\\home\\hdfs\\tmp\\dfs\\name").mkdirs();
   NameNode namenode = new NameNode(conf);
   System.out.println(namenode.getFsImageName().getName());
 }
Пример #5
0
 static {
   try {
     FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
     CONF.set("dfs.http.address", "0.0.0.0:0");
     NameNode.format(CONF);
     namenode = new NameNode(CONF);
   } catch (IOException e) {
     e.printStackTrace();
     throw (RuntimeException) new RuntimeException().initCause(e);
   }
   FSNamesystem fsNamesystem = namenode.getNamesystem();
   replicator = fsNamesystem.blockManager.replicator;
   cluster = fsNamesystem.clusterMap;
   // construct network topology
   for (int i = 0; i < NUM_OF_DATANODES; i++) {
     cluster.add(dataNodes[i]);
   }
   for (int i = 0; i < NUM_OF_DATANODES; i++) {
     dataNodes[i].updateHeartbeat(
         2 * FSConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,
         0L,
         2 * FSConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,
         0);
   }
 }
Пример #6
0
  /** Regression test for HDFS-2934. */
  @Test
  public void testSomeConfsNNSpecificSomeNSSpecific() {
    final HdfsConfiguration conf = new HdfsConfiguration();

    String key = DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
    conf.set(key, "global-default");
    conf.set(key + ".ns1", "ns1-override");
    conf.set(key + ".ns1.nn1", "nn1-override");

    // A namenode in another nameservice should get the global default.
    Configuration newConf = new Configuration(conf);
    NameNode.initializeGenericKeys(newConf, "ns2", "nn1");
    assertEquals("global-default", newConf.get(key));

    // A namenode in another non-HA nameservice should get global default.
    newConf = new Configuration(conf);
    NameNode.initializeGenericKeys(newConf, "ns2", null);
    assertEquals("global-default", newConf.get(key));

    // A namenode in the same nameservice should get the ns setting
    newConf = new Configuration(conf);
    NameNode.initializeGenericKeys(newConf, "ns1", "nn2");
    assertEquals("ns1-override", newConf.get(key));

    // The nn with the nn-specific setting should get its own override
    newConf = new Configuration(conf);
    NameNode.initializeGenericKeys(newConf, "ns1", "nn1");
    assertEquals("nn1-override", newConf.get(key));
  }
Пример #7
0
  /**
   * The function will verify the token with NameNode if available and will create a
   * UserGroupInformation.
   *
   * <p>Code in this function is copied from JspHelper.getTokenUGI
   *
   * @param identifier Delegation token identifier
   * @param password Delegation token password
   * @param kind the kind of token
   * @param service the service for this token
   * @param servletContext Jetty servlet context which contains the NN address
   * @throws SecurityException Thrown when authentication fails
   */
  private static void verifyToken(
      byte[] identifier, byte[] password, Text kind, Text service, ServletContext servletContext) {
    try {
      Token<DelegationTokenIdentifier> token =
          new Token<DelegationTokenIdentifier>(identifier, password, kind, service);

      ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
      DataInputStream in = new DataInputStream(buf);
      DelegationTokenIdentifier id = new DelegationTokenIdentifier();
      id.readFields(in);

      final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(servletContext);
      if (nn != null) {
        nn.getNamesystem().verifyToken(id, token.getPassword());
      }

      UserGroupInformation userGroupInformation = id.getUser();
      userGroupInformation.addToken(token);
      LOG.debug(
          "user "
              + userGroupInformation.getUserName()
              + " ("
              + userGroupInformation.getShortUserName()
              + ") authenticated");

      // re-login if necessary
      userGroupInformation.checkTGTAndReloginFromKeytab();
    } catch (IOException e) {
      throw new SecurityException("Failed to verify delegation token " + e, e);
    }
  }
  /**
   * Regression test for HDFS-2742. The issue in this bug was: - DN does a block report while file
   * is open. This BR contains the block in RBW state. - Standby queues the RBW state in
   * PendingDatanodeMessages - Standby processes edit logs during failover. Before fixing this bug,
   * it was mistakenly applying the RBW reported state after the block had been completed, causing
   * the block to get marked corrupt. Instead, we should now be applying the RBW message on OP_ADD,
   * and then the FINALIZED message on OP_CLOSE.
   */
  @Test
  public void testBlockReportsWhileFileBeingWritten() throws Exception {
    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
      AppendTestUtil.write(out, 0, 10);
      out.hflush();

      // Block report will include the RBW replica, but will be
      // queued on the StandbyNode.
      cluster.triggerBlockReports();

    } finally {
      IOUtils.closeStream(out);
    }

    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);

    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

    DFSTestUtil.readFile(fs, TEST_FILE_PATH);
  }
Пример #9
0
  private void checkForCorruptOpenFiles(FileStatus file, List<FileStatus> corruptFiles)
      throws IOException {
    String filePath = file.getPath().toUri().getPath();

    if (file.isDir()) {
      for (FileStatus fileStatus : nn.namesystem.dir.getListing(filePath)) {
        checkForCorruptOpenFiles(fileStatus, corruptFiles);
      }

    } else {
      LeaseManager.Lease lease = nn.getNamesystem().leaseManager.getLeaseByPath(filePath);
      // Condition:
      //  1. lease has expired hard limit
      //  2. the file is open for write
      //  3. the last block has 0 locations
      if (lease != null && lease.expiredHardLimit()) {
        LocatedBlocks blocks = nn.getNamesystem().getBlockLocations(filePath, 0, file.getLen());
        List<LocatedBlock> locatedBlockList = blocks.getLocatedBlocks();
        LocatedBlock lastBlock = locatedBlockList.get(locatedBlockList.size() - 1);

        if (blocks.isUnderConstruction() && lastBlock.getLocations().length == 0) {
          corruptFiles.add(file);
        }
      }
    }
  }
 void register() throws IOException {
   // get versions from the namenode
   nsInfo = nameNode.versionRequest();
   dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
   DataNode.setNewStorageID(dnRegistration);
   // register datanode
   dnRegistration = nameNode.register(dnRegistration);
 }
Пример #11
0
  static DatanodeInfo chooseDatanode(
      final NameNode namenode,
      final String path,
      final HttpOpParam.Op op,
      final long openOffset,
      final long blocksize,
      Configuration conf)
      throws IOException {
    final BlockManager bm = namenode.getNamesystem().getBlockManager();

    if (op == PutOpParam.Op.CREATE) {
      // choose a datanode near to client
      final DatanodeDescriptor clientNode =
          bm.getDatanodeManager().getDatanodeByHost(getRemoteAddress());
      if (clientNode != null) {
        final DatanodeDescriptor[] datanodes =
            bm.getBlockPlacementPolicy().chooseTarget(path, 1, clientNode, null, blocksize);
        if (datanodes.length > 0) {
          return datanodes[0];
        }
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      // choose a datanode containing a replica
      final NamenodeProtocols np = namenode.getRpcServer();
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException(
              "Offset="
                  + openOffset
                  + " out of the range [0, "
                  + len
                  + "); "
                  + op
                  + ", path="
                  + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
        }
      }
    }

    return (DatanodeDescriptor)
        bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT);
  }
Пример #12
0
 private void doMetasave(NameNode nn2) {
   nn2.getNamesystem().writeLock();
   try {
     PrintWriter pw = new PrintWriter(System.err);
     nn2.getNamesystem().getBlockManager().metaSave(pw);
     pw.flush();
   } finally {
     nn2.getNamesystem().writeUnlock();
   }
 }
  /**
   * testing that APPEND operation can handle token expiration when re-establishing pipeline is
   * needed
   */
  @Test
  public void testAppend() throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    Configuration conf = getConf(numDataNodes);

    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
      cluster.waitActive();
      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second)
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
      Path fileToAppend = new Path(FILE_TO_APPEND);
      FileSystem fs = cluster.getFileSystem();
      byte[] expected = generateBytes(FILE_SIZE);
      // write a one-byte file
      FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
      stm.write(expected, 0, 1);
      stm.close();
      // open the file again for append
      stm = fs.append(fileToAppend);
      int mid = expected.length - 1;
      stm.write(expected, 1, mid - 1);
      stm.hflush();

      /*
       * wait till token used in stm expires
       */
      Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
      while (!SecurityTestUtil.isBlockTokenExpired(token)) {
        try {
          Thread.sleep(10);
        } catch (InterruptedException ignored) {
        }
      }

      // remove a datanode to force re-establishing pipeline
      cluster.stopDataNode(0);
      // append the rest of the file
      stm.write(expected, mid, expected.length - mid);
      stm.close();
      // check if append is successful
      FSDataInputStream in5 = fs.open(fileToAppend);
      assertTrue(checkFile1(in5, expected));
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Пример #14
0
  /**
   * Another regression test for HDFS-2742. This tests the following sequence: - DN does a block
   * report while file is open. This BR contains the block in RBW state. - The block report is
   * delayed in reaching the standby. - The file is closed. - The standby processes the OP_ADD and
   * OP_CLOSE operations before the RBW block report arrives. - The standby should not mark the
   * block as corrupt.
   */
  @Test
  public void testRBWReportArrivesAfterEdits() throws Exception {
    final CountDownLatch brFinished = new CountDownLatch(1);
    DelayAnswer delayer =
        new GenericTestUtils.DelayAnswer(LOG) {
          @Override
          protected Object passThrough(InvocationOnMock invocation) throws Throwable {
            try {
              return super.passThrough(invocation);
            } finally {
              // inform the test that our block report went through.
              brFinished.countDown();
            }
          }
        };

    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
      AppendTestUtil.write(out, 0, 10);
      out.hflush();

      DataNode dn = cluster.getDataNodes().get(0);
      DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.spyOnBposToNN(dn, nn2);

      Mockito.doAnswer(delayer)
          .when(spy)
          .blockReport(
              Mockito.<DatanodeRegistration>anyObject(),
              Mockito.anyString(),
              Mockito.<StorageBlockReport[]>anyObject());
      dn.scheduleAllBlockReport(0);
      delayer.waitForCall();

    } finally {
      IOUtils.closeStream(out);
    }

    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);

    delayer.proceed();
    brFinished.await();

    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

    DFSTestUtil.readFile(fs, TEST_FILE_PATH);
  }
Пример #15
0
  /**
   * Test that, when a block is re-opened for append, the related datanode messages are correctly
   * queued by the SBN because they have future states and genstamps.
   */
  @Test
  public void testQueueingWithAppend() throws Exception {
    int numQueued = 0;
    int numDN = cluster.getDataNodes().size();

    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
      AppendTestUtil.write(out, 0, 10);
      out.hflush();

      // Opening the file will report RBW replicas, but will be
      // queued on the StandbyNode.
      numQueued += numDN; // RBW messages
    } finally {
      IOUtils.closeStream(out);
      numQueued += numDN; // blockReceived messages
    }

    cluster.triggerBlockReports();
    numQueued += numDN;

    try {
      out = fs.append(TEST_FILE_PATH);
      AppendTestUtil.write(out, 10, 10);
      // RBW replicas once it's opened for append
      numQueued += numDN;

    } finally {
      IOUtils.closeStream(out);
      numQueued += numDN; // blockReceived
    }

    cluster.triggerBlockReports();
    numQueued += numDN;

    assertEquals(
        numQueued, cluster.getNameNode(1).getNamesystem().getPendingDataNodeMessageCount());

    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);

    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

    AppendTestUtil.check(fs, TEST_FILE_PATH, 20);
  }
Пример #16
0
  /** Test that the {@link PlacementMonitor.BlockAndDatanodeResolver} works correctly. */
  @Test
  public void testBlockAndDatanodeResolver() throws Exception {
    setupCluster();
    try {
      Path src = new Path("/dir/file");
      Path parity = new Path("/raid/dir/file");
      DFSTestUtil.createFile(fs, src, 20, (short) 2, 0L);
      DFSTestUtil.createFile(fs, parity, 11, (short) 2, 0L);
      DFSTestUtil.waitReplication(fs, src, (short) 2);
      DFSTestUtil.waitReplication(fs, parity, (short) 2);

      LocatedBlocks srcLbs, parityLbs;
      List<BlockInfo> srcInfos, parityInfos;
      srcLbs = namenode.getBlockLocations(src.toString(), 4, 10);
      srcInfos = placementMonitor.getBlockInfos(fs, src, 4, 10);
      parityLbs = namenode.getBlockLocations(parity.toString(), 3, 7);
      parityInfos = placementMonitor.getBlockInfos(fs, parity, 3, 7);

      Assert.assertEquals(10, srcLbs.getLocatedBlocks().size());
      Assert.assertEquals(7, parityLbs.getLocatedBlocks().size());
      Assert.assertEquals(10, srcInfos.size());
      Assert.assertEquals(7, parityInfos.size());

      BlockAndDatanodeResolver resolver = new BlockAndDatanodeResolver(src, fs, parity, fs);
      for (int i = 0; i < srcInfos.size(); ++i) {
        LocatedBlock lb = resolver.getLocatedBlock(srcInfos.get(i));
        Assert.assertEquals(srcLbs.get(i).getBlock(), lb.getBlock());
        for (String nodeName : srcInfos.get(i).getNames()) {
          DatanodeInfo node = resolver.getDatanodeInfo(nodeName);
          Assert.assertEquals(node.getName(), nodeName);
        }
      }
      for (int i = 0; i < parityInfos.size(); ++i) {
        LocatedBlock lb = resolver.getLocatedBlock(parityInfos.get(i));
        Assert.assertEquals(parityLbs.get(i).getBlock(), lb.getBlock());
        for (String nodeName : parityInfos.get(i).getNames()) {
          DatanodeInfo node = resolver.getDatanodeInfo(nodeName);
          Assert.assertEquals(node.getName(), nodeName);
        }
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
      if (placementMonitor != null) {
        placementMonitor.stop();
      }
    }
  }
Пример #17
0
  /**
   * Check files on DFS, starting from the indicated path.
   *
   * @throws Exception
   */
  public void fsck() throws IOException {
    NameNode.getNameNodeMetrics().numFsckOperations.inc();
    InjectionHandler.processEvent(InjectionEvent.NAMENODE_FSCK_START);

    try {
      FileStatus[] files = nn.namesystem.dir.getListing(path);
      FsckResult res = new FsckResult();
      if (!this.showFiles && !this.showBlocks && !this.showLocations && !this.showRacks) {
        res.totalRacks = nn.getNetworkTopology().getNumOfRacks();
        res.totalDatanodes = nn.namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
      }
      res.setReplication((short) conf.getInt("dfs.replication", 3));
      if (files != null) {
        if (showCorruptFileBlocks && showOpenFiles) {
          listCorruptOpenFiles();

          return;
        }

        if (showCorruptFileBlocks) {
          listCorruptFileBlocks();
          return;
        }

        for (int i = 0; i < files.length; i++) {
          check(files[i], res);
        }
        out.println(res);
        // DFSck client scans for the string HEALTHY/CORRUPT to check the status
        // of file system and return appropriate code. Changing the output
        // string might break testcases.
        if (res.isHealthy()) {
          out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
        } else {
          out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
        }
      } else {
        out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
      }
    } catch (Throwable e) {
      String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
      LOG.warn(errMsg, e);
      out.println(e.getMessage());
      out.print("\n\n" + errMsg);
    } finally {
      out.close();
    }
  }
  /**
   * when formating a namenode - we must provide clusterid.
   *
   * @param conf
   * @throws IOException
   */
  public static void formatNameNode(Configuration conf) throws IOException {
    String clusterId = StartupOption.FORMAT.getClusterId();
    if (clusterId == null || clusterId.isEmpty())
      StartupOption.FORMAT.setClusterId("testClusterID");

    NameNode.format(conf);
  }
Пример #19
0
  /**
   * Test to ensure nameservice specific keys in the configuration are copied to generic keys when
   * the namenode starts.
   */
  @Test
  public void testConfModificationFederationAndHa() {
    final HdfsConfiguration conf = new HdfsConfiguration();
    String nsId = "ns1";
    String nnId = "nn1";

    conf.set(DFS_NAMESERVICES, nsId);
    conf.set(DFS_NAMESERVICE_ID, nsId);
    conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId);

    // Set the nameservice specific keys with nameserviceId in the config key
    for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
      // Note: value is same as the key
      conf.set(DFSUtil.addKeySuffixes(key, nsId, nnId), key);
    }

    // Initialize generic keys from specific keys
    NameNode.initializeGenericKeys(conf, nsId, nnId);

    // Retrieve the keys without nameserviceId and Ensure generic keys are set
    // to the correct value
    for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
      assertEquals(key, conf.get(key));
    }
  }
  /** Verify name-node port usage. */
  public void testNameNodePorts() throws Exception {
    NameNode nn = null;
    try {
      nn = startNameNode();

      // start another namenode on the same port
      Configuration conf2 = new Configuration(config);
      conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
      NameNode.format(conf2);
      boolean started = canStartNameNode(conf2);
      assertFalse(started); // should fail

      // start on a different main port
      FileSystem.setDefaultUri(conf2, "hdfs://" + NAME_NODE_HOST + "0");
      started = canStartNameNode(conf2);
      assertFalse(started); // should fail again

      // reset conf2 since NameNode modifies it
      FileSystem.setDefaultUri(conf2, "hdfs://" + NAME_NODE_HOST + "0");
      // different http port
      conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
      started = canStartNameNode(conf2);
      assertTrue(started); // should start now
    } finally {
      stopNameNode(nn);
    }
  }
  /** Start the name-node. */
  public NameNode startNameNode() throws IOException {
    String dataDir = System.getProperty("test.build.data");
    hdfsDir = new File(dataDir, "dfs");
    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
    }
    config = new Configuration();
    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
    FileSystem.setDefaultUri(config, "hdfs://" + NAME_NODE_HOST + "0");
    config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
    NameNode.format(config);

    String[] args = new String[] {};
    // NameNode will modify config with the ports it bound to
    return NameNode.createNameNode(args, config);
  }
Пример #22
0
 /**
  * @return true if safemode is not running, or if safemode has already initialized the replication
  *     queues
  */
 public static boolean safeModeInitializedReplQueues(NameNode nn) {
   SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
   if (smi == null) {
     return true;
   }
   return smi.initializedReplQueues;
 }
Пример #23
0
 /** @return the number of blocks marked safe by safemode, or -1 if safemode is not running. */
 public static int getSafeModeSafeBlocks(NameNode nn) throws IOException {
   SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
   if (smi == null) {
     return -1;
   }
   return smi.blockSafe();
 }
Пример #24
0
  /** Initialize SecondaryNameNode. */
  private void initialize(Configuration conf) throws IOException {
    // initiate Java VM metrics
    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));

    // Create connection to the namenode.
    shouldRun = true;
    nameNodeAddr = NameNode.getAddress(conf);

    this.conf = conf;
    this.namenode =
        (NamenodeProtocol)
            RPC.waitForProxy(
                NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf);

    // initialize checkpoint directories
    fsName = getInfoServer();
    checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointImage = new CheckpointStorage(conf);
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

    // Initialize other scheduling parameters from the configuration
    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    String infoAddr =
        NetUtils.getServerAddress(
            conf,
            "dfs.secondary.info.bindAddress",
            "dfs.secondary.info.port",
            "dfs.secondary.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("name.system.image", checkpointImage);
    this.infoServer.setAttribute("name.conf", conf);
    infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
    infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    infoPort = infoServer.getPort();
    conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort);
    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
    LOG.warn(
        "Checkpoint Period   :"
            + checkpointPeriod
            + " secs "
            + "("
            + checkpointPeriod / 60
            + " min)");
    LOG.warn(
        "Log Size Trigger    :"
            + checkpointSize
            + " bytes "
            + "("
            + checkpointSize / 1024
            + " KB)");
  }
Пример #25
0
  public static void main(String argv[]) throws Exception {
    if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
      System.exit(0);
    }

    try {
      StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
      NameNode namenode = createNameNode(argv, null);
      if (namenode != null) {
        namenode.join();
      }
    } catch (Throwable e) {
      LOG.fatal("Exception in namenode join", e);
      terminate(1, e);
    }
  }
Пример #26
0
 public Lock getINodeLock(
     NameNode nameNode,
     TransactionLockTypes.INodeLockType lockType,
     TransactionLockTypes.INodeResolveType resolveType,
     boolean resolveLink,
     boolean ignoreLocalSubtreeLocks,
     String... paths) {
   return new INodeLock(
       lockType,
       resolveType,
       resolveLink,
       ignoreLocalSubtreeLocks,
       nameNode.getId(),
       nameNode.getActiveNameNodes().getActiveNodes(),
       paths);
 }
Пример #27
0
 public Lock getRenameINodeLock(
     NameNode nameNode,
     TransactionLockTypes.INodeLockType lockType,
     TransactionLockTypes.INodeResolveType resolveType,
     boolean ignoreLocalSubtreeLocks,
     String src,
     String dst) {
   return new RenameINodeLock(
       lockType,
       resolveType,
       ignoreLocalSubtreeLocks,
       nameNode.getId(),
       nameNode.getActiveNameNodes().getActiveNodes(),
       src,
       dst);
 }
Пример #28
0
 @Override // NamenodeProtocol
 public NamenodeRegistration register(NamenodeRegistration registration) throws IOException {
   verifyVersion(registration.getVersion());
   NamenodeRegistration myRegistration = nn.setRegistration();
   namesystem.registerBackupNode(registration, myRegistration);
   return myRegistration;
 }
Пример #29
0
  private void listCorruptOpenFiles() throws IOException {
    int matchedCorruptFilesCount = 0;
    // directory representation of path
    String pathdir = path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR;
    FileStatus pathFileStatus = nn.getNamesystem().getFileInfo(pathdir);
    List<FileStatus> corruptFileStatusList = new ArrayList<FileStatus>();
    checkForCorruptOpenFiles(pathFileStatus, corruptFileStatusList);

    for (FileStatus fileStatus : corruptFileStatusList) {
      String currentPath = fileStatus.getPath().toString();
      if (currentPath.startsWith(pathdir) || currentPath.equals(path)) {
        matchedCorruptFilesCount++;

        // print the header before listing first item
        if (matchedCorruptFilesCount == 1) {
          out.println("Here are a few files that may be corrupted:");
          out.println("===========================================");
        }

        out.println(currentPath);
      }
    }

    out.println();
    out.println(buildSummaryResultForListCorruptFiles(matchedCorruptFilesCount, path));
  }
Пример #30
0
 @Override // NamenodeProtocol
 public void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig)
     throws IOException {
   verifyRequest(registration);
   if (!nn.isRole(NamenodeRole.NAMENODE))
     throw new IOException("Only an ACTIVE node can invoke endCheckpoint.");
   namesystem.endCheckpoint(registration, sig);
 }