示例#1
0
  /**
   * recoverTransitionRead for a specific block pool
   *
   * @param datanode DataNode
   * @param bpID Block pool Id
   * @param nsInfo Namespace info of namenode corresponding to the block pool
   * @param dataDirs Storage directories
   * @param startOpt startup option
   * @throws IOException on error
   */
  void recoverTransitionRead(
      DataNode datanode,
      String bpID,
      NamespaceInfo nsInfo,
      Collection<File> dataDirs,
      StartupOption startOpt)
      throws IOException {
    // First ensure datanode level format/snapshot/rollback is completed
    recoverTransitionRead(datanode, nsInfo, dataDirs, startOpt);

    // Create list of storage directories for the block pool
    Collection<File> bpDataDirs = new ArrayList<File>();
    for (Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
      File dnRoot = it.next();
      File bpRoot = BlockPoolSliceStorage.getBpRoot(bpID, new File(dnRoot, STORAGE_DIR_CURRENT));
      bpDataDirs.add(bpRoot);
    }
    // mkdir for the list of BlockPoolStorage
    makeBlockPoolDataDir(bpDataDirs, null);
    BlockPoolSliceStorage bpStorage =
        new BlockPoolSliceStorage(
            nsInfo.getNamespaceID(), bpID, nsInfo.getCTime(), nsInfo.getClusterID());

    bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
    addBlockPoolStorage(bpID, bpStorage);
  }
示例#2
0
  /**
   * Upgrade -- Move current storage into a backup directory, and hardlink all its blocks into the
   * new current directory.
   *
   * <p>Upgrade from pre-0.22 to 0.22 or later release e.g. 0.19/0.20/ => 0.22/0.23
   *
   * <ul>
   *   <li>If <SD>/previous exists then delete it
   *   <li>Rename <SD>/current to <SD>/previous.tmp
   *   <li>Create new <SD>/current/<bpid>/current directory
   *   <li>
   *       <ul>
   *         <li>Hard links for block files are created from <SD>/previous.tmp to
   *             <SD>/current/<bpid>/current
   *         <li>Saves new version file in <SD>/current/<bpid>/current directory
   *       </ul>
   *   <li>Rename <SD>/previous.tmp to <SD>/previous
   * </ul>
   *
   * There should be only ONE namenode in the cluster for first time upgrade to 0.22
   *
   * @param sd storage directory
   * @throws IOException on error
   */
  void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
      clusterID = nsInfo.getClusterID();
      layoutVersion = nsInfo.getLayoutVersion();
      writeProperties(sd);
      return;
    }

    LOG.info(
        "Upgrading storage directory "
            + sd.getRoot()
            + ".\n   old LV = "
            + this.getLayoutVersion()
            + "; old CTime = "
            + this.getCTime()
            + ".\n   new LV = "
            + nsInfo.getLayoutVersion()
            + "; new CTime = "
            + nsInfo.getCTime());

    File curDir = sd.getCurrentDir();
    File prevDir = sd.getPreviousDir();
    File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);

    assert curDir.exists() : "Data node current directory must exist.";
    // Cleanup directory "detach"
    cleanupDetachDir(new File(curDir, STORAGE_DIR_DETACHED));

    // 1. delete <SD>/previous dir before upgrading
    if (prevDir.exists()) deleteDir(prevDir);
    // get previous.tmp directory, <SD>/previous.tmp
    File tmpDir = sd.getPreviousTmp();
    assert !tmpDir.exists() : "Data node previous.tmp directory must not exist.";

    // 2. Rename <SD>/current to <SD>/previous.tmp
    rename(curDir, tmpDir);

    // 3. Format BP and hard link blocks from previous directory
    File curBpDir = BlockPoolSliceStorage.getBpRoot(nsInfo.getBlockPoolID(), curDir);
    BlockPoolSliceStorage bpStorage =
        new BlockPoolSliceStorage(
            nsInfo.getNamespaceID(),
            nsInfo.getBlockPoolID(),
            nsInfo.getCTime(),
            nsInfo.getClusterID());
    bpStorage.format(curDir, nsInfo);
    linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));

    // 4. Write version file under <SD>/current
    layoutVersion = HdfsConstants.LAYOUT_VERSION;
    clusterID = nsInfo.getClusterID();
    writeProperties(sd);

    // 5. Rename <SD>/previous.tmp to <SD>/previous
    rename(tmpDir, prevDir);
    LOG.info("Upgrade of " + sd.getRoot() + " is complete");
    addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
  }
  /** Test that DataStorage and BlockPoolSliceStorage remove the failed volume after failure. */
  @Test(timeout = 150000)
  public void testFailedVolumeBeingRemovedFromDataNode()
      throws InterruptedException, IOException, TimeoutException {
    // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
    // volume failures which is currently not supported on Windows.
    assumeTrue(!Path.WINDOWS);

    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 2);

    File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
    DataNode dn0 = cluster.getDataNodes().get(0);
    checkDiskErrorSync(dn0);

    // Verify dn0Vol1 has been completely removed from DN0.
    // 1. dn0Vol1 is removed from DataStorage.
    DataStorage storage = dn0.getStorage();
    assertEquals(1, storage.getNumStorageDirs());
    for (int i = 0; i < storage.getNumStorageDirs(); i++) {
      Storage.StorageDirectory sd = storage.getStorageDir(i);
      assertFalse(sd.getRoot().getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
    }
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    BlockPoolSliceStorage bpsStorage = storage.getBPStorage(bpid);
    assertEquals(1, bpsStorage.getNumStorageDirs());
    for (int i = 0; i < bpsStorage.getNumStorageDirs(); i++) {
      Storage.StorageDirectory sd = bpsStorage.getStorageDir(i);
      assertFalse(sd.getRoot().getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
    }

    // 2. dn0Vol1 is removed from FsDataset
    FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
    try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
      for (FsVolumeSpi volume : vols) {
        assertNotEquals(
            new File(volume.getBasePath()).getAbsoluteFile(), dn0Vol1.getAbsoluteFile());
      }
    }

    // 3. all blocks on dn0Vol1 have been removed.
    for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
      assertNotNull(replica.getVolume());
      assertNotEquals(
          new File(replica.getVolume().getBasePath()).getAbsoluteFile(), dn0Vol1.getAbsoluteFile());
    }

    // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
    String[] dataDirStrs = dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
    assertEquals(1, dataDirStrs.length);
    assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
  }
示例#4
0
 /*
  * Finalize the upgrade for a block pool
  */
 void finalizeUpgrade(String bpID) throws IOException {
   // To handle finalizing a snapshot taken at datanode level while
   // upgrading to federation, if datanode level snapshot previous exists,
   // then finalize it. Else finalize the corresponding BP.
   for (StorageDirectory sd : storageDirs) {
     File prevDir = sd.getPreviousDir();
     if (prevDir.exists()) {
       // data node level storage finalize
       doFinalize(sd);
     } else {
       // block pool storage finalize using specific bpID
       BlockPoolSliceStorage bpStorage = bpStorageMap.get(bpID);
       bpStorage.doFinalize(sd.getCurrentDir());
     }
   }
 }
  /*
   * Roll back to old snapshot at the block pool level
   * If previous directory exists:
   * <ol>
   * <li>Rename <SD>/current/<bpid>/current to removed.tmp</li>
   * <li>Rename * <SD>/current/<bpid>/previous to current</li>
   * <li>Remove removed.tmp</li>
   * </ol>
   *
   * Do nothing if previous directory does not exist.
   * @param bpSd Block pool storage directory at <SD>/current/<bpid>
   */
  void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
    File prevDir = bpSd.getPreviousDir();
    // regular startup if previous dir does not exist
    if (!prevDir.exists()) return;
    // read attributes out of the VERSION file of previous directory
    BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
    prevInfo.readPreviousVersionProperties(bpSd);

    // We allow rollback to a state, which is either consistent with
    // the namespace state or can be further upgraded to it.
    // In another word, we can only roll back when ( storedLV >= software LV)
    // && ( DN.previousCTime <= NN.ctime)
    if (!(prevInfo.getLayoutVersion() >= HdfsConstants.DATANODE_LAYOUT_VERSION
        && prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
      throw new InconsistentFSStateException(
          bpSd.getRoot(),
          "Cannot rollback to a newer state.\nDatanode previous state: LV = "
              + prevInfo.getLayoutVersion()
              + " CTime = "
              + prevInfo.getCTime()
              + " is newer than the namespace state: LV = "
              + HdfsConstants.DATANODE_LAYOUT_VERSION
              + " CTime = "
              + nsInfo.getCTime());
    }

    LOG.info(
        "Rolling back storage directory "
            + bpSd.getRoot()
            + ".\n   target LV = "
            + nsInfo.getLayoutVersion()
            + "; target CTime = "
            + nsInfo.getCTime());
    File tmpDir = bpSd.getRemovedTmp();
    assert !tmpDir.exists() : "removed.tmp directory must not exist.";
    // 1. rename current to tmp
    File curDir = bpSd.getCurrentDir();
    assert curDir.exists() : "Current directory must exist.";
    rename(curDir, tmpDir);

    // 2. rename previous to current
    rename(prevDir, curDir);

    // 3. delete removed.tmp dir
    deleteDir(tmpDir);
    LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
  }