示例#1
0
 @Test
 public void testFsUriSetProperly() throws Exception {
   HMaster master = UTIL.getMiniHBaseCluster().getMaster();
   MasterFileSystem fs = master.getMasterFileSystem();
   Path masterRoot = FSUtils.getRootDir(fs.conf);
   Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf());
   // make sure the fs and the found root dir have the same scheme
   LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf()));
   LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.conf));
   // make sure the set uri matches by forcing it.
   assertEquals(masterRoot, rootDir);
 }
示例#2
0
  /**
   * Remove the region from the table directory, archiving the region's hfiles.
   *
   * @param conf the {@link Configuration} to use
   * @param fs {@link FileSystem} from which to remove the region
   * @param tableDir {@link Path} to where the table is being stored
   * @param regionInfo {@link HRegionInfo} for region to be deleted
   * @throws IOException if the request cannot be completed
   */
  public static void deleteRegionFromFileSystem(
      final Configuration conf,
      final FileSystem fs,
      final Path tableDir,
      final HRegionInfo regionInfo)
      throws IOException {
    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
    Path regionDir = regionFs.getRegionDir();

    if (!fs.exists(regionDir)) {
      LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
      return;
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("DELETING region " + regionDir);
    }

    // Archive region
    Path rootDir = FSUtils.getRootDir(conf);
    HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

    // Delete empty region dir
    if (!fs.delete(regionDir, true)) {
      LOG.warn("Failed delete of " + regionDir);
    }
  }
示例#3
0
 /**
  * Get the directory to archive a store directory
  *
  * @param conf {@link Configuration} to read for the archive directory name.
  * @param region parent region information under which the store currently lives
  * @param tabledir directory for the table under which the store currently lives
  * @param family name of the family in the store
  * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
  *     not be archived
  */
 public static Path getStoreArchivePath(
     Configuration conf, HRegionInfo region, Path tabledir, byte[] family) throws IOException {
   TableName tableName = FSUtils.getTableName(tabledir);
   Path rootDir = FSUtils.getRootDir(conf);
   Path tableArchiveDir = getTableArchivePath(rootDir, tableName);
   return HStore.getStoreHomedir(tableArchiveDir, region, family);
 }
示例#4
0
 /** Returns the Hdfs size of the given region in bytes. */
 public long getHdfsSize(HRegionInfo info) throws IOException {
   Path tableDir =
       HTableDescriptor.getTableDir(
           FSUtils.getRootDir(hbaseConf_), Bytes.toBytes(hbaseTableName_));
   FileSystem fs = tableDir.getFileSystem(hbaseConf_);
   Path regionDir = tableDir.suffix("/" + info.getEncodedName());
   return fs.getContentSummary(regionDir).getLength();
 }
 /** Deletes a table's directory from the file system if exists. Used in unit tests. */
 public static void deleteTableDescriptorIfExists(String tableName, Configuration conf)
     throws IOException {
   FileSystem fs = FSUtils.getCurrentFileSystem(conf);
   FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
   // The below deleteDirectory works for either file or directory.
   if (status != null && fs.exists(status.getPath())) {
     FSUtils.deleteDirectory(fs, status.getPath());
   }
 }
示例#6
0
  /** Creates, flushes, and closes a new region. */
  public static HRegion createHDFSRegionDir(
      Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException {
    // Create HRegion
    Path root = FSUtils.getRootDir(conf);
    HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

    // Close the new region to flush to disk. Close log file too.
    region.close();
    return region;
  }
示例#7
0
 public void consolidate() throws IOException {
   if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
     Path rootDir = FSUtils.getRootDir(conf);
     LOG.info("Using old Snapshot Format");
     // write a copy of descriptor to the snapshot directory
     new FSTableDescriptors(conf, fs, rootDir)
         .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(htd), false);
   } else {
     LOG.debug("Convert to Single Snapshot Manifest");
     convertToV2SingleManifest();
   }
 }
 @Test
 public void testReadingArchiveDirectoryFromFS() throws IOException {
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   try {
     new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
         .get(HConstants.HFILE_ARCHIVE_DIRECTORY);
     fail("Shouldn't be able to read a table descriptor for the archive directory.");
   } catch (IOException e) {
     LOG.debug(
         "Correctly got error when reading a table descriptor from the archive directory: "
             + e.getMessage());
   }
 }
  protected HRegionInfo createRegion(
      Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException {
    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    HTableDescriptor htd = htbl.getTableDescriptor();
    HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
    Path rootDir = FSUtils.getRootDir(conf);
    FileSystem fs = rootDir.getFileSystem(conf);
    Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName());
    fs.mkdirs(p);
    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
    FSDataOutputStream out = fs.create(riPath);
    out.write(hri.toDelimitedByteArray());
    out.close();

    // add to meta.
    MetaTableAccessor.addRegionToMeta(meta, hri);
    meta.close();
    return hri;
  }
  protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey)
      throws IOException {

    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);

    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
      regions = rl.getAllRegionLocations();
    }

    for (HRegionLocation e : regions) {
      HRegionInfo hri = e.getRegionInfo();
      ServerName hsa = e.getServerName();
      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {

        LOG.info("RegionName: " + hri.getRegionNameAsString());
        byte[] deleteRow = hri.getRegionName();
        TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);

        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
        Path rootDir = FSUtils.getRootDir(conf);
        FileSystem fs = rootDir.getFileSystem(conf);
        Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
        fs.delete(p, true);

        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
          Delete delete = new Delete(deleteRow);
          meta.delete(delete);
        }
      }
      LOG.info(hri.toString() + hsa.toString());
    }

    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
  }
  @Override
  public void start(CoprocessorEnvironment e) throws IOException {
    region = (HRegion) ((RegionCoprocessorEnvironment) e).getRegion();
    String[] name = region.getTableDesc().getNameAsString().split(":");
    if (name.length == 2) {
      namespace = name[0];
      tableName = name[1];
    } else {
      tableName = name[0];
    }
    regionName = region.getRegionInfo().getEncodedName();

    path = HConfiguration.getConfiguration().getBackupPath() + "/" + tableName + "/" + regionName;
    conf = HConfiguration.unwrapDelegate();
    rootDir = FSUtils.getRootDir(conf);
    fs = FSUtils.getCurrentFileSystem(conf);
    backupDir =
        new Path(
            rootDir,
            BackupRestoreConstants.BACKUP_DIR + "/data/splice/" + tableName + "/" + regionName);
    preparing = false;
  }
 public MasterFileSystem(Server master, MasterServices services,
     MasterMetrics metrics, boolean masterRecovery)
 throws IOException {
   this.conf = master.getConfiguration();
   this.master = master;
   this.services = services;
   this.metrics = metrics;
   // Set filesystem to be that of this.rootdir else we get complaints about
   // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
   // default localfs.  Presumption is that rootdir is fully-qualified before
   // we get to here with appropriate fs scheme.
   //设置HBase根目录
   this.rootdir = FSUtils.getRootDir(conf);
   //HBase表创建和删除的临时目录/${hbase.rootdir}/.tmp
   this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
   // Cover both bases, the old way of setting default fs and the new.
   // We're supposed to run on 0.20 and 0.21 anyways.
   this.fs = this.rootdir.getFileSystem(conf);
   String fsUri = this.fs.getUri().toString();
   conf.set("fs.default.name", fsUri);
   conf.set("fs.defaultFS", fsUri);
   // make sure the fs has the same conf
   fs.setConf(conf);
   this.distributedLogSplitting =
     conf.getBoolean("hbase.master.distributed.log.splitting", true);
   if (this.distributedLogSplitting) {
     this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
         master.getConfiguration(), master, master.getServerName().toString());
     this.splitLogManager.finishInitialization(masterRecovery);
   } else {
     this.splitLogManager = null;
   }
   // setup the filesystem variable
   // set up the archived logs path
   this.oldLogDir = createInitialFileSystemLayout();
 }
 /**
  * Create new HTableDescriptor in HDFS. Happens when we are creating table. If forceCreation is
  * true then even if previous table descriptor is present it will be overwritten
  *
  * @param htableDescriptor
  * @param conf
  * @param forceCreation True if we are to overwrite existing file.
  */
 static boolean createTableDescriptor(
     final HTableDescriptor htableDescriptor, final Configuration conf, boolean forceCreation)
     throws IOException {
   FileSystem fs = FSUtils.getCurrentFileSystem(conf);
   return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, forceCreation);
 }
示例#14
0
  /**
   * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and
   * excluded from minor compaction. Without the fix of HBASE-6901, an
   * ArrayIndexOutOfBoundsException will be thrown.
   */
  @Ignore("Flakey: See HBASE-9051")
  @Test
  public void testExcludeAllFromMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);

    util.startMiniCluster();
    try (Connection conn = ConnectionFactory.createConnection();
        Admin admin = conn.getAdmin();
        Table table = util.createTable(TABLE_NAME, FAMILIES);
        RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
      final FileSystem fs = util.getDFSCluster().getFileSystem();
      assertEquals("Should start with empty table", 0, util.countRows(table));

      // deep inspection: get the StoreFile dir
      final Path storePath =
          new Path(
              FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
              new Path(
                  admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
                  Bytes.toString(FAMILIES[0])));
      assertEquals(0, fs.listStatus(storePath).length);

      // Generate two bulk load files
      conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);

      for (int i = 0; i < 2; i++) {
        Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
        runIncrementalPELoad(
            conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME), testDir);
        // Perform the actual load
        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
      }

      // Ensure data shows up
      int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));

      // should have a second StoreFile now
      assertEquals(2, fs.listStatus(storePath).length);

      // minor compactions shouldn't get rid of the file
      admin.compact(TABLE_NAME);
      try {
        quickPoll(
            new Callable<Boolean>() {
              @Override
              public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
              }
            },
            5000);
        throw new IOException("SF# = " + fs.listStatus(storePath).length);
      } catch (AssertionError ae) {
        // this is expected behavior
      }

      // a major compaction should work though
      admin.majorCompact(TABLE_NAME);
      quickPoll(
          new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

    } finally {
      util.shutdownMiniCluster();
    }
  }
示例#15
0
  @Test
  public void testExcludeMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);

    try {
      util.startMiniCluster();
      Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
      final FileSystem fs = util.getDFSCluster().getFileSystem();
      Admin admin = util.getHBaseAdmin();
      HTable table = util.createTable(TABLE_NAME, FAMILIES);
      assertEquals("Should start with empty table", 0, util.countRows(table));

      // deep inspection: get the StoreFile dir
      final Path storePath =
          HStore.getStoreHomedir(
              FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
              admin.getTableRegions(TABLE_NAME).get(0),
              FAMILIES[0]);
      assertEquals(0, fs.listStatus(storePath).length);

      // put some data in it and flush to create a storefile
      Put p = new Put(Bytes.toBytes("test"));
      p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
      table.put(p);
      admin.flush(TABLE_NAME);
      assertEquals(1, util.countRows(table));
      quickPoll(
          new Callable<Boolean>() {
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

      // Generate a bulk load file with more rows
      conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
      util.startMiniMapReduceCluster();
      runIncrementalPELoad(conf, table, testDir);

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows + 1,
          util.countRows(table));

      // should have a second StoreFile now
      assertEquals(2, fs.listStatus(storePath).length);

      // minor compactions shouldn't get rid of the file
      admin.compact(TABLE_NAME);
      try {
        quickPoll(
            new Callable<Boolean>() {
              public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
              }
            },
            5000);
        throw new IOException("SF# = " + fs.listStatus(storePath).length);
      } catch (AssertionError ae) {
        // this is expected behavior
      }

      // a major compaction should work though
      admin.majorCompact(TABLE_NAME);
      quickPoll(
          new Callable<Boolean>() {
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

    } finally {
      util.shutdownMiniMapReduceCluster();
      util.shutdownMiniCluster();
    }
  }
示例#16
0
  @Override
  public int run(String[] args) throws Exception {
    Path rootRegionDir = null;
    int numThreads = 1;
    long numIterations = 10000;
    int numFamilies = 1;
    boolean noSync = false;
    boolean verify = false;
    boolean verbose = false;
    boolean cleanup = true;
    boolean noclosefs = false;
    long roll = Long.MAX_VALUE;
    // Process command line args
    for (int i = 0; i < args.length; i++) {
      String cmd = args[i];
      try {
        if (cmd.equals("-threads")) {
          numThreads = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-iterations")) {
          numIterations = Long.parseLong(args[++i]);
        } else if (cmd.equals("-path")) {
          rootRegionDir = new Path(args[++i]);
        } else if (cmd.equals("-families")) {
          numFamilies = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-qualifiers")) {
          numQualifiers = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-keySize")) {
          keySize = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-valueSize")) {
          valueSize = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-nosync")) {
          noSync = true;
        } else if (cmd.equals("-verify")) {
          verify = true;
        } else if (cmd.equals("-verbose")) {
          verbose = true;
        } else if (cmd.equals("-nocleanup")) {
          cleanup = false;
        } else if (cmd.equals("-noclosefs")) {
          noclosefs = true;
        } else if (cmd.equals("-roll")) {
          roll = Long.parseLong(args[++i]);
        } else if (cmd.equals("-h")) {
          printUsageAndExit();
        } else if (cmd.equals("--help")) {
          printUsageAndExit();
        } else {
          System.err.println("UNEXPECTED: " + cmd);
          printUsageAndExit();
        }
      } catch (Exception e) {
        printUsageAndExit();
      }
    }

    // Run HLog Performance Evaluation
    // First set the fs from configs.  In case we are on hadoop1
    FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf()));
    FileSystem fs = FileSystem.get(getConf());
    LOG.info("FileSystem: " + fs);
    try {
      if (rootRegionDir == null) {
        rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("HLogPerformanceEvaluation");
      }
      rootRegionDir = rootRegionDir.makeQualified(fs);
      cleanRegionRootDir(fs, rootRegionDir);
      // Initialize Table Descriptor
      HTableDescriptor htd = createHTableDescriptor(numFamilies);
      final long whenToRoll = roll;
      HLog hlog =
          new FSHLog(fs, rootRegionDir, "wals", getConf()) {
            int appends = 0;

            @Override
            protected void doWrite(
                HRegionInfo info, HLogKey logKey, WALEdit logEdit, HTableDescriptor htd)
                throws IOException {
              this.appends++;
              if (this.appends % whenToRoll == 0) {
                LOG.info("Rolling after " + appends + " edits");
                rollWriter();
              }
              super.doWrite(info, logKey, logEdit, htd);
            };
          };
      hlog.rollWriter();
      HRegion region = null;
      try {
        region = openRegion(fs, rootRegionDir, htd, hlog);
        long putTime =
            runBenchmark(new HLogPutBenchmark(region, htd, numIterations, noSync), numThreads);
        logBenchmarkResult(
            "Summary: threads=" + numThreads + ", iterations=" + numIterations,
            numIterations * numThreads,
            putTime);
        if (region != null) {
          closeRegion(region);
          region = null;
        }
        if (verify) {
          Path dir = ((FSHLog) hlog).getDir();
          long editCount = 0;
          FileStatus[] fsss = fs.listStatus(dir);
          if (fsss.length == 0) throw new IllegalStateException("No WAL found");
          for (FileStatus fss : fsss) {
            Path p = fss.getPath();
            if (!fs.exists(p)) throw new IllegalStateException(p.toString());
            editCount += verify(p, verbose);
          }
          long expected = numIterations * numThreads;
          if (editCount != expected) {
            throw new IllegalStateException("Counted=" + editCount + ", expected=" + expected);
          }
        }
      } finally {
        if (region != null) closeRegion(region);
        // Remove the root dir for this test region
        if (cleanup) cleanRegionRootDir(fs, rootRegionDir);
      }
    } finally {
      // We may be called inside a test that wants to keep on using the fs.
      if (!noclosefs) fs.close();
    }

    return (0);
  }
示例#17
0
  @Override
  public int run(String[] args) throws Exception {
    Path rootRegionDir = null;
    int numThreads = 1;
    long numIterations = 1000000;
    int numFamilies = 1;
    int syncInterval = 0;
    boolean noSync = false;
    boolean verify = false;
    boolean verbose = false;
    boolean cleanup = true;
    boolean noclosefs = false;
    long roll = Long.MAX_VALUE;
    boolean compress = false;
    String cipher = null;
    int numRegions = 1;
    String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
    boolean trace = spanReceivers != null && !spanReceivers.isEmpty();
    double traceFreq = 1.0;
    // Process command line args
    for (int i = 0; i < args.length; i++) {
      String cmd = args[i];
      try {
        if (cmd.equals("-threads")) {
          numThreads = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-iterations")) {
          numIterations = Long.parseLong(args[++i]);
        } else if (cmd.equals("-path")) {
          rootRegionDir = new Path(args[++i]);
        } else if (cmd.equals("-families")) {
          numFamilies = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-qualifiers")) {
          numQualifiers = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-keySize")) {
          keySize = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-valueSize")) {
          valueSize = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-syncInterval")) {
          syncInterval = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-nosync")) {
          noSync = true;
        } else if (cmd.equals("-verify")) {
          verify = true;
        } else if (cmd.equals("-verbose")) {
          verbose = true;
        } else if (cmd.equals("-nocleanup")) {
          cleanup = false;
        } else if (cmd.equals("-noclosefs")) {
          noclosefs = true;
        } else if (cmd.equals("-roll")) {
          roll = Long.parseLong(args[++i]);
        } else if (cmd.equals("-compress")) {
          compress = true;
        } else if (cmd.equals("-encryption")) {
          cipher = args[++i];
        } else if (cmd.equals("-regions")) {
          numRegions = Integer.parseInt(args[++i]);
        } else if (cmd.equals("-traceFreq")) {
          traceFreq = Double.parseDouble(args[++i]);
        } else if (cmd.equals("-h")) {
          printUsageAndExit();
        } else if (cmd.equals("--help")) {
          printUsageAndExit();
        } else {
          System.err.println("UNEXPECTED: " + cmd);
          printUsageAndExit();
        }
      } catch (Exception e) {
        printUsageAndExit();
      }
    }

    if (compress) {
      Configuration conf = getConf();
      conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
    }

    if (cipher != null) {
      // Set up WAL for encryption
      Configuration conf = getConf();
      conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
      conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
      conf.setClass(
          "hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class, WAL.Reader.class);
      conf.setClass(
          "hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class, Writer.class);
      conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
      conf.set(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, cipher);
    }

    if (numThreads < numRegions) {
      LOG.warn("Number of threads is less than the number of regions; some regions will sit idle.");
    }

    // Internal config. goes off number of threads; if more threads than handlers, stuff breaks.
    // In regionserver, number of handlers == number of threads.
    getConf().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, numThreads);

    // Run WAL Performance Evaluation
    // First set the fs from configs.  In case we are on hadoop1
    FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf()));
    FileSystem fs = FileSystem.get(getConf());
    LOG.info("FileSystem: " + fs);

    SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
    TraceScope scope = Trace.startSpan("WALPerfEval", trace ? Sampler.ALWAYS : Sampler.NEVER);

    try {
      if (rootRegionDir == null) {
        rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("WALPerformanceEvaluation");
      }
      rootRegionDir = rootRegionDir.makeQualified(fs);
      cleanRegionRootDir(fs, rootRegionDir);
      FSUtils.setRootDir(getConf(), rootRegionDir);
      final WALFactory wals = new WALFactory(getConf(), null, "wals");
      final HRegion[] regions = new HRegion[numRegions];
      final Runnable[] benchmarks = new Runnable[numRegions];
      final MockRegionServerServices mockServices = new MockRegionServerServices(getConf());
      final LogRoller roller = new LogRoller(mockServices, mockServices);
      Threads.setDaemonThreadRunning(roller.getThread(), "WALPerfEval.logRoller");

      try {
        for (int i = 0; i < numRegions; i++) {
          // Initialize Table Descriptor
          // a table per desired region means we can avoid carving up the key space
          final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
          regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
          benchmarks[i] =
              Trace.wrap(
                  new WALPutBenchmark(
                      regions[i], htd, numIterations, noSync, syncInterval, traceFreq));
        }
        ConsoleReporter.enable(this.metrics, 30, TimeUnit.SECONDS);
        long putTime = runBenchmark(benchmarks, numThreads);
        logBenchmarkResult(
            "Summary: threads="
                + numThreads
                + ", iterations="
                + numIterations
                + ", syncInterval="
                + syncInterval,
            numIterations * numThreads,
            putTime);

        for (int i = 0; i < numRegions; i++) {
          if (regions[i] != null) {
            closeRegion(regions[i]);
            regions[i] = null;
          }
        }
        if (verify) {
          LOG.info("verifying written log entries.");
          Path dir =
              new Path(
                  FSUtils.getRootDir(getConf()), DefaultWALProvider.getWALDirectoryName("wals"));
          long editCount = 0;
          FileStatus[] fsss = fs.listStatus(dir);
          if (fsss.length == 0) throw new IllegalStateException("No WAL found");
          for (FileStatus fss : fsss) {
            Path p = fss.getPath();
            if (!fs.exists(p)) throw new IllegalStateException(p.toString());
            editCount += verify(wals, p, verbose);
          }
          long expected = numIterations * numThreads;
          if (editCount != expected) {
            throw new IllegalStateException("Counted=" + editCount + ", expected=" + expected);
          }
        }
      } finally {
        mockServices.stop("test clean up.");
        for (int i = 0; i < numRegions; i++) {
          if (regions[i] != null) {
            closeRegion(regions[i]);
          }
        }
        if (null != roller) {
          LOG.info("shutting down log roller.");
          Threads.shutdown(roller.getThread());
        }
        wals.shutdown();
        // Remove the root dir for this test region
        if (cleanup) cleanRegionRootDir(fs, rootRegionDir);
      }
    } finally {
      // We may be called inside a test that wants to keep on using the fs.
      if (!noclosefs) fs.close();
      scope.close();
      if (receiverHost != null) receiverHost.closeReceivers();
    }

    return (0);
  }
示例#18
0
 /**
  * Get the full path to the archive directory on the configured {@link FileSystem}
  *
  * @param conf to look for archive directory name and root directory. Cannot be null. Notes for
  *     testing: requires a FileSystem root directory to be specified.
  * @return the full {@link Path} to the archive directory, as defined by the configuration
  * @throws IOException if an unexpected error occurs
  */
 public static Path getArchivePath(Configuration conf) throws IOException {
   return getArchivePath(FSUtils.getRootDir(conf));
 }