コード例 #1
0
  @Test
  public void testGetCompanionBLocks() throws IOException {

    try {
      setupCluster(false, 1L, racks1, hosts1);
      String[] files = new String[] {"/dir/file1", "/dir/file2", "/dir/file3"};
      Codec codec = Codec.getCodec("rs");
      for (String file : files) {
        TestRaidDfs.createTestFile(fs, new Path(file), 3, 2, 8192L);
      }
      FileStatus stat = fs.getFileStatus(new Path("/dir"));
      RaidNode.doRaid(
          conf,
          stat,
          new Path(codec.parityDirectory),
          codec,
          new RaidNode.Statistics(),
          RaidUtils.NULL_PROGRESSABLE,
          false,
          1,
          1);

      Collection<LocatedBlock> companionBlocks;

      for (int i = 0; i < 2; i++) {
        for (int j = 0; j < 2; j++) {
          companionBlocks =
              getCompanionBlocks(
                  namesystem, policy, getBlocks(namesystem, files[i]).get(j).getBlock());
          Assert.assertEquals(8, companionBlocks.size());
        }
      }

      companionBlocks =
          getCompanionBlocks(namesystem, policy, getBlocks(namesystem, files[2]).get(0).getBlock());
      Assert.assertEquals(8, companionBlocks.size());

      companionBlocks =
          getCompanionBlocks(namesystem, policy, getBlocks(namesystem, files[2]).get(1).getBlock());
      Assert.assertEquals(4, companionBlocks.size());

      String parityFile = "/raidrs/dir";

      for (int i = 0; i < 3; i++) {
        companionBlocks =
            getCompanionBlocks(
                namesystem, policy, getBlocks(namesystem, parityFile).get(i).getBlock());
        Assert.assertEquals(8, companionBlocks.size());
      }

      for (int i = 3; i < 6; i++) {
        companionBlocks =
            getCompanionBlocks(
                namesystem, policy, getBlocks(namesystem, parityFile).get(i).getBlock());
        Assert.assertEquals(4, companionBlocks.size());
      }
    } finally {
      closeCluster();
    }
  }
コード例 #2
0
  @After
  public void tearDown() throws Exception {
    if (rnode != null) {
      rnode.stop();
      rnode.join();
      rnode = null;
    }

    if (cluster != null) {
      cluster.shutdown();
      cluster = null;
    }

    dfs = null;

    LOG.info("Test cluster shut down");
  }
コード例 #3
0
 /*
  * This test start datanodes with simulated mode and keep running
  * chooseReplicaToDelete multiple times to get the average processing time
  * and number of allocated objects
  */
 @Test
 public void testDirXORChooseReplicasToDeletePerformance() throws Exception {
   try {
     setupCluster(true, 1L, racks1, hosts1);
     // create test files
     int numFiles = 1000;
     long blockSize = 1024L;
     String parentDir = "/dir/";
     for (int i = 0; i < numFiles; i++) {
       String file = parentDir + "file" + i;
       TestRaidDfs.createTestFile(fs, new Path(file), 3, 1, blockSize);
     }
     LOG.info("Created " + numFiles + " files");
     Codec code = Codec.getCodec("xor");
     FSNamesystem fsNameSys = cluster.getNameNode().namesystem;
     for (DatanodeDescriptor dd : fsNameSys.datanodeMap.values()) {
       LOG.info(dd);
     }
     // create fake parity file
     long numStripes = RaidNode.numStripes(numFiles, code.stripeLength);
     TestRaidDfs.createTestFile(
         fs,
         new Path(code.parityDirectory, "dir"),
         3,
         (int) numStripes * code.parityLength,
         blockSize);
     long startTime = System.currentTimeMillis();
     long total = 0L;
     fsNameSys.readLock();
     for (BlocksMap.BlockInfo bi : fsNameSys.blocksMap.getBlocks()) {
       fsNameSys.replicator.chooseReplicaToDelete(
           bi.getINode(),
           bi,
           (short) 3,
           fsNameSys.datanodeMap.values(),
           new ArrayList<DatanodeDescriptor>());
       total++;
     }
     fsNameSys.readUnlock();
     LOG.info(
         "Average chooseReplicaToDelete time: "
             + ((double) (System.currentTimeMillis() - startTime) / total));
   } finally {
     closeCluster();
   }
 }
コード例 #4
0
  /*
   * This test creates a directory with 3 files and its fake parity file.
   * We decommissioned all nodes in the rack2 to make sure all data are stored
   * in rack1 machine.
   * Then we bring rack2 machines to normal state and create a non-raided file
   * which is too small to be raided in the directory with 4 replicas
   * (1 in rack1 and 3 in rack2).
   * Then we reduce the replication to 3 to trigger chooseReplicatToDelete.
   * We verify remaining replicas has 1 in rack1 and 2 in rack2.
   */
  @Test
  public void testChooseReplicasToDeleteForSmallFile() throws Exception {
    try {
      setupCluster(false, 512L, racks2, hosts2);
      // create test files
      int numFiles = 4;
      long blockSize = 1024L;
      String parentDir = "/dir/";
      DFSClient client = getDfsClient(cluster.getNameNode(), conf);
      DatanodeInfo[] infos = client.datanodeReport(DatanodeReportType.LIVE);
      ArrayList<String> rack2nodes = new ArrayList<String>();
      ArrayList<DatanodeInfo> rack2di = new ArrayList<DatanodeInfo>();
      for (DatanodeInfo di : infos) {
        if (di.getHostName().contains("rack2")) {
          rack2nodes.add(di.getName());
          rack2di.add(cluster.getNameNode().namesystem.getDatanode(di));
        }
      }
      LOG.info("Decommission rack2 nodes");
      writeConfigFile(excludeFile, rack2nodes);
      cluster.getNameNode().namesystem.refreshNodes(conf);
      waitState(rack2di, AdminStates.DECOMMISSIONED);
      for (int i = 0; i < numFiles; i++) {
        if (i == 2) {
          continue;
        }
        String file = parentDir + "file" + i;
        Path filePath = new Path(file);
        TestRaidDfs.createTestFile(fs, filePath, 1, 1, blockSize);
        printLocatedBlocks(filePath);
      }
      LOG.info("Created " + (numFiles - 1) + " files");
      // create fake parity file
      Codec code = Codec.getCodec("xor");
      long numStripes = RaidNode.numStripes(numFiles, code.stripeLength);
      Path parityPath = new Path(code.parityDirectory, "dir");
      TestRaidDfs.createTestFile(
          fs, parityPath, 1, (int) numStripes * code.parityLength, blockSize);
      LOG.info("Create parity file: " + parityPath);
      printLocatedBlocks(parityPath);

      LOG.info("Bring back rack2 nodes out of decommission");
      writeConfigFile(excludeFile, null);
      cluster.getNameNode().namesystem.refreshNodes(conf);
      waitState(rack2di, AdminStates.NORMAL);

      Path smallFilePath = new Path(parentDir + "file2");
      TestRaidDfs.createTestFile(fs, smallFilePath, 4, 1, 256L);
      assertEquals(
          "all datanodes should have replicas", hosts2.length, printLocatedBlocks(smallFilePath));
      LOG.info("Created small file: " + smallFilePath);

      LOG.info("Reduce replication to 3");
      dfs.setReplication(smallFilePath, (short) 3);
      long startTime = System.currentTimeMillis();
      while (System.currentTimeMillis() - startTime < 120000
          && printLocatedBlocks(smallFilePath) == 4) {
        Thread.sleep(1000);
      }
      LocatedBlocks lbs = dfs.getLocatedBlocks(smallFilePath, 0L, Integer.MAX_VALUE);
      boolean hasRack1 = false;
      for (DatanodeInfo di : lbs.getLocatedBlocks().get(0).getLocations()) {
        if (di.getNetworkLocation().contains("rack1")) {
          hasRack1 = true;
          break;
        }
      }
      assertTrue("We should keep the nodes in rack1", hasRack1);
    } finally {
      closeCluster();
    }
  }
コード例 #5
0
  /** raids test file */
  private void raidTestFiles(Path raidPath, Path[] filePaths, boolean doHar)
      throws IOException, ClassNotFoundException {
    // create RaidNode
    raidConf = new Configuration(conf);
    raidConf.set(RaidNode.RAID_LOCATION_KEY, RAID_DIR);
    raidConf.setInt("raid.blockfix.interval", 1000);
    // the RaidNode does the raiding inline (instead of submitting to MR node)
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    rnode = RaidNode.createRaidNode(null, raidConf);

    for (Path filePath : filePaths) {
      long waitStart = System.currentTimeMillis();
      boolean raided = false;

      Path parityFilePath = new Path(RAID_DIR, filePath.toString().substring(1));

      while (!raided) {
        try {
          FileStatus[] listPaths = dfs.listStatus(raidPath);
          if (listPaths != null) {
            if (doHar) {
              // case with HAR
              for (FileStatus f : listPaths) {
                if (f.getPath().toString().endsWith(".har")) {
                  // check if the parity file is in the index
                  final Path indexPath = new Path(f.getPath(), "_index");
                  final FileStatus indexFileStatus = dfs.getFileStatus(indexPath);
                  final HarIndex harIndex =
                      new HarIndex(dfs.open(indexPath), indexFileStatus.getLen());
                  final HarIndex.IndexEntry indexEntry =
                      harIndex.findEntryByFileName(parityFilePath.toString());
                  if (indexEntry != null) {
                    LOG.info(
                        "raid file "
                            + parityFilePath.toString()
                            + " found in Har archive: "
                            + f.getPath().toString()
                            + " ts="
                            + indexEntry.mtime);
                    raided = true;
                    break;
                  }
                }
              }

            } else {
              // case without HAR
              for (FileStatus f : listPaths) {
                Path found = new Path(f.getPath().toUri().getPath());
                if (parityFilePath.equals(found)) {
                  LOG.info("raid file found: " + f.getPath().toString());
                  raided = true;
                  break;
                }
              }
            }
          }
        } catch (FileNotFoundException ignore) {
        }
        if (!raided) {
          if (System.currentTimeMillis() > waitStart + 40000L) {
            LOG.error("parity file not created after 40s");
            throw new IOException("parity file not HARed after 40s");
          } else {
            try {
              Thread.sleep(1000);
            } catch (InterruptedException ignore) {
            }
          }
        }
      }
    }

    rnode.stop();
    rnode.join();
    rnode = null;
    LOG.info("test file raided");
  }