예제 #1
0
 /*
  * Write some entries in the log file.
  * 7 different tables with name "testtb-%d"
  * 10 region per table with name "tableName-region-%d"
  * 50 entry with row key "row-%d"
  */
 private void writeTestLog(final Path logFile) throws IOException {
   fs.mkdirs(logFile.getParent());
   HLog.Writer writer = HLog.createWriter(fs, logFile, conf);
   try {
     for (int i = 0; i < 7; ++i) {
       byte[] tableName = getTableName(i);
       for (int j = 0; j < 10; ++j) {
         byte[] regionName = getRegionName(tableName, j);
         for (int k = 0; k < 50; ++k) {
           byte[] rowkey = Bytes.toBytes("row-" + k);
           HLogKey key =
               new HLogKey(
                   regionName,
                   tableName,
                   (long) k,
                   System.currentTimeMillis(),
                   HConstants.DEFAULT_CLUSTER_ID);
           WALEdit edit = new WALEdit();
           edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
           writer.append(new HLog.Entry(key, edit));
         }
       }
     }
   } finally {
     writer.close();
   }
 }
예제 #2
0
 @Override
 protected void chore() {
   try {
     FileStatus[] files = this.fs.listStatus(this.oldLogDir);
     int nbDeletedLog = 0;
     FILE:
     for (FileStatus file : files) {
       Path filePath = file.getPath();
       if (HLog.validateHLogFilename(filePath.getName())) {
         for (LogCleanerDelegate logCleaner : logCleanersChain) {
           if (!logCleaner.isLogDeletable(filePath)) {
             // this log is not deletable, continue to process next log file
             continue FILE;
           }
         }
         // delete this log file if it passes all the log cleaners
         this.fs.delete(filePath, true);
         nbDeletedLog++;
       } else {
         LOG.warn("Found a wrongly formated file: " + file.getPath().getName());
         this.fs.delete(filePath, true);
         nbDeletedLog++;
       }
       if (nbDeletedLog >= maxDeletedLogs) {
         break;
       }
     }
   } catch (IOException e) {
     e = RemoteExceptionHandler.checkIOException(e);
     LOG.warn("Error while cleaning the logs", e);
   }
 }
예제 #3
0
 /*
  * Verify that every logs in the table directory has just the specified table and regions.
  */
 private void verifyRecoverEdits(
     final Path tableDir, final byte[] tableName, final Map<byte[], byte[]> regionsMap)
     throws IOException {
   for (FileStatus regionStatus : FSUtils.listStatus(fs, tableDir)) {
     assertTrue(regionStatus.getPath().getName().startsWith(Bytes.toString(tableName)));
     Path regionEdits = HLog.getRegionDirRecoveredEditsDir(regionStatus.getPath());
     byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
     assertFalse(regionsMap.containsKey(regionName));
     for (FileStatus logStatus : FSUtils.listStatus(fs, regionEdits)) {
       HLog.Reader reader = HLog.getReader(fs, logStatus.getPath(), conf);
       try {
         HLog.Entry entry;
         while ((entry = reader.next()) != null) {
           HLogKey key = entry.getKey();
           assertArrayEquals(tableName, key.getTablename());
           assertArrayEquals(regionName, key.getEncodedRegionName());
         }
       } finally {
         reader.close();
       }
     }
   }
 }
예제 #4
0
    void process() throws IOException {
      try {
        for (HRegionInfo[] regionsToMerge = next();
            regionsToMerge != null;
            regionsToMerge = next()) {
          if (!merge(regionsToMerge)) {
            return;
          }
        }
      } finally {
        try {
          hlog.closeAndDelete();

        } catch (IOException e) {
          LOG.error(e);
        }
      }
    }
 @After
 public void tearDown() throws IOException {
   IOException ex = null;
   try {
     region.close();
   } catch (IOException e) {
     LOG.warn("Caught Exception", e);
     ex = e;
   }
   try {
     hlog.closeAndDelete();
   } catch (IOException e) {
     LOG.warn("Caught Exception", e);
     ex = e;
   }
   if (ex != null) {
     throw ex;
   }
 }
예제 #6
0
 @After
 public void tearDown() throws Exception {
   HLog hlog = region.getLog();
   region.close();
   hlog.closeAndDelete();
 }
  public void splitLog(final List<ServerName> serverNames) throws IOException {
    long splitTime = 0, splitLogSize = 0;
    List<Path> logDirs = new ArrayList<Path>();
    for(ServerName serverName: serverNames){
      Path logDir = new Path(this.rootdir,
        HLog.getHLogDirectoryName(serverName.toString()));
      Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
      // rename the directory so a rogue RS doesn't create more HLogs
      if (fs.exists(logDir)) {
        if (!this.fs.rename(logDir, splitDir)) {
          throw new IOException("Failed fs.rename for log split: " + logDir);
        }
        logDir = splitDir;
        LOG.debug("Renamed region directory: " + splitDir);
      } else if (!fs.exists(splitDir)) {
        LOG.info("Log dir for server " + serverName + " does not exist");
        continue;
      }
      logDirs.add(splitDir);
    }

    if (logDirs.isEmpty()) {
      LOG.info("No logs to split");
      return;
    }

    if (distributedLogSplitting) {
      splitLogManager.handleDeadWorkers(serverNames);
      splitTime = EnvironmentEdgeManager.currentTimeMillis();
      splitLogSize = splitLogManager.splitLogDistributed(logDirs);
      splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime;
    } else {
      for(Path logDir: logDirs){
        // splitLogLock ensures that dead region servers' logs are processed
        // one at a time
        this.splitLogLock.lock();
        try {
          HLogSplitter splitter = HLogSplitter.createLogSplitter(
            conf, rootdir, logDir, oldLogDir, this.fs);
          try {
            // If FS is in safe mode, just wait till out of it.
            FSUtils.waitOnSafeMode(conf, conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 1000));
            splitter.splitLog();
          } catch (OrphanHLogAfterSplitException e) {
            LOG.warn("Retrying splitting because of:", e);
            //An HLogSplitter instance can only be used once.  Get new instance.
            splitter = HLogSplitter.createLogSplitter(conf, rootdir, logDir,
              oldLogDir, this.fs);
            splitter.splitLog();
          }
          splitTime = splitter.getTime();
          splitLogSize = splitter.getSize();
        } finally {
          this.splitLogLock.unlock();
        }
      }
    }

    if (this.metrics != null) {
      this.metrics.addSplit(splitTime, splitLogSize);
    }
  }