public static void main(String[] args) throws IOException {
    Path baseDir = null;
    String localPath = null;
    String preservePath = null;
    String sIgnoreTablesFilename = null;
    String sNoPreserveFilename = null;
    String sDateString = null;
    long size = 0;

    // UNIX dates for right now
    long now = new java.util.Date().getTime() / 1000;
    long maxDate = now;

    for (int i = 0; i < args.length; i++) {
      if (args[i].equals("--hdfs-path")) {
        baseDir = new Path(args[++i]);
        continue;
      }
      if (args[i].equals("--local-path")) {
        localPath = args[++i];
        continue;
      }
      if (args[i].equals("--preserve-path")) {
        preservePath = args[++i];
        continue;
      }
      if (args[i].equals("--no-preserve")) {
        sNoPreserveFilename = args[++i];
        continue;
      }
      if (args[i].equals("--ignore-tables")) {
        sIgnoreTablesFilename = args[++i];
        continue;
      }
      if (args[i].equals("--sleep")) {
        try {
          m_nSleepSeconds = Integer.parseInt(args[++i]);
        } catch (Exception e) {
          System.err.println("ERROR: " + e.toString() + "\n");
          usage();
        }
        continue;
      }
      if (args[i].equals("--dry-run")) {
        m_bDryRun = true;
        continue;
      }
      if (args[i].equals("--date")) {
        sDateString = args[++i];
        continue;
      }
      if (args[i].equals("--max-date")) {
        maxDate = Long.parseLong(args[++i]);
        continue;
      }
      if (args[i].equals("--max-bytes")) {
        size = Long.parseLong(args[++i]);
        continue;
      }

      System.err.println("ERROR: unknown arg " + args[i]);
      usage();
    }

    if (baseDir == null || localPath == null || preservePath == null || sDateString == null) {
      usage();
    }

    long minDate;

    if ("yesterday".equals(sDateString)) {
      // figure out yesterday's dates
      Calendar cal = Calendar.getInstance();
      cal.roll(Calendar.DAY_OF_YEAR, -1);

      // yesterday midnight
      cal.set(Calendar.HOUR_OF_DAY, 0);
      cal.set(Calendar.MINUTE, 0);
      cal.set(Calendar.SECOND, 0);
      cal.set(Calendar.MILLISECOND, 0);

      minDate = cal.getTimeInMillis() / 1000;

      // yesterday end of day
      cal.set(Calendar.HOUR_OF_DAY, 23);
      cal.set(Calendar.MINUTE, 59);
      cal.set(Calendar.SECOND, 59);
      cal.set(Calendar.MILLISECOND, 999);

      maxDate = cal.getTimeInMillis() / 1000;
    } else if ("last-week".equals(sDateString)) {
      minDate = maxDate - (7 * 24 * 60 * 60);
    } else if ("last-day".equals(sDateString)) {
      minDate = maxDate - (24 * 60 * 60);
    } else {
      // UNIX date since epoch of last backup
      minDate = Long.parseLong(sDateString);
    }

    long tmpDate = 0;
    BackupHdfs bak = new BackupHdfs();

    // initialize the list of tables to ignore
    if (sIgnoreTablesFilename != null) {
      bak.initializeTablesToIgnore(sIgnoreTablesFilename);
    }

    // initialize list of files to not preserve
    if (sNoPreserveFilename != null) {
      bak.initializeNoPreserve(sNoPreserveFilename);
    }

    ArrayList<Path> pathList = new ArrayList<Path>(2000);
    HashMap<Path, Long> hmTimestamps = new HashMap<Path, Long>();

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    // If the HDFS path is a dir continue
    if (fs.getFileStatus(baseDir).isDir()) {
      Calendar cal = Calendar.getInstance();

      System.err.println("");
      cal.setTimeInMillis(minDate * 1000);
      System.err.println("min date = " + cal.getTime().toString());

      cal.setTimeInMillis(maxDate * 1000);
      System.err.println("max date = " + cal.getTime().toString());

      System.err.println("");
      System.err.println("Searching filesystem: " + baseDir.toUri().getPath());

      bak.checkDir(fs, minDate, maxDate, baseDir, pathList, hmTimestamps);

      System.err.println("");
      System.err.println("Skipped " + m_nIgnoredTables + " files due to ignored tables");

      System.err.println("");
      System.err.println("Number of files to backup = " + pathList.size());

      System.err.println("Total bytes to backup = " + prettyPrintBytes(m_nTotalBytes));

      System.err.println("");
      System.err.println("sorting list of files...");
      Collections.sort(pathList, new DateComparator(hmTimestamps));
      System.err.println("done");

      System.err.println("");
      System.err.println("starting backup...");
      tmpDate = bak.backupFiles(localPath, preservePath, fs, pathList, size);

      bak.closeFiles();

      System.err.println("");
      System.err.println("backup completed...");
    }

    if (tmpDate == 0) {
      // If not size limit reached print out date for right now
      System.out.println(maxDate);
    } else {
      // Print out date for last file backed up
      System.err.println("Size limit reached.");
      System.out.println(tmpDate);
    }

    System.exit(0);
  }
Exemple #2
0
 public H2OHdfsInputStream(Path p, long offset, ProgressMonitor pmon) throws IOException {
   super(offset, pmon);
   _path = p;
   _fs = FileSystem.get(p.toUri(), CONF);
   setExpectedSz(_fs.getFileStatus(p).getLen());
   open();
 }
Exemple #3
0
 public static void addFolder2(Path p, ArrayList<String> keys, ArrayList<String> failed)
     throws IOException {
   FileSystem fs = FileSystem.get(p.toUri(), PersistHdfs.CONF);
   if (!fs.exists(p)) {
     failed.add("Path does not exist: '" + p.toString() + "'");
     return;
   }
   addFolder2(fs, p, keys, failed);
 }
Exemple #4
0
 // Loading/Writing ice to HDFS
 PersistHdfs(URI uri) {
   try {
     _iceRoot = new Path(uri + "/ice" + H2O.SELF_ADDRESS.getHostAddress() + "-" + H2O.API_PORT);
     // Make the directory as-needed
     FileSystem fs = FileSystem.get(_iceRoot.toUri(), CONF);
     fs.mkdirs(_iceRoot);
   } catch (Exception e) {
     throw Log.errRTExcept(e);
   }
 }
 /**
  * Add a file path to the current set of classpath entries. It adds the file to cache as well.
  * Intended to be used by user code.
  *
  * @param file Path of the file to be added
  * @param conf Configuration that contains the classpath setting
  * @param fs FileSystem with respect to which {@code archivefile} should be interpreted.
  */
 public static void addFileToClassPath(Path file, Configuration conf, FileSystem fs)
     throws IOException {
   String filepath = file.toUri().getPath();
   String classpath = conf.get("mapred.job.classpath.files");
   conf.set(
       "mapred.job.classpath.files",
       classpath == null ? filepath : classpath + System.getProperty("path.separator") + filepath);
   URI uri = fs.makeQualified(file).toUri();
   addCacheFile(uri, conf);
 }
Exemple #6
0
 public static void addFolder(Path p, JsonArray succeeded, JsonArray failed) throws IOException {
   FileSystem fs = FileSystem.get(p.toUri(), PersistHdfs.CONF);
   if (!fs.exists(p)) {
     JsonObject o = new JsonObject();
     o.addProperty(Constants.FILE, p.toString());
     o.addProperty(Constants.ERROR, "Path does not exist!");
     failed.add(o);
     return;
   }
   addFolder(fs, p, succeeded, failed);
 }
Exemple #7
0
  /**
   * Make a path relative with respect to a root path. absPath is always assumed to descend from
   * root. Otherwise returned path is null.
   */
  static String makeRelative(Path root, Path absPath) {
    if (!absPath.isAbsolute()) {
      throw new IllegalArgumentException("!absPath.isAbsolute(), absPath=" + absPath);
    }
    String p = absPath.toUri().getPath();

    StringTokenizer pathTokens = new StringTokenizer(p, "/");
    for (StringTokenizer rootTokens = new StringTokenizer(root.toUri().getPath(), "/");
        rootTokens.hasMoreTokens(); ) {
      if (!rootTokens.nextToken().equals(pathTokens.nextToken())) {
        return null;
      }
    }
    StringBuilder sb = new StringBuilder();
    for (; pathTokens.hasMoreTokens(); ) {
      sb.append(pathTokens.nextToken());
      if (pathTokens.hasMoreTokens()) {
        sb.append(Path.SEPARATOR);
      }
    }
    return sb.length() == 0 ? "." : sb.toString();
  }
  /**
   * Add an archive path to the current set of classpath entries. It adds the archive to cache as
   * well. Intended to be used by user code.
   *
   * @param archive Path of the archive to be added
   * @param conf Configuration that contains the classpath setting
   * @param fs FileSystem with respect to which {@code archive} should be interpreted.
   */
  public static void addArchiveToClassPath(Path archive, Configuration conf, FileSystem fs)
      throws IOException {
    String archivepath = archive.toUri().getPath();
    String classpath = conf.get("mapred.job.classpath.archives");
    conf.set(
        "mapred.job.classpath.archives",
        classpath == null
            ? archivepath
            : classpath + System.getProperty("path.separator") + archivepath);
    URI uri = fs.makeQualified(archive).toUri();

    addCacheArchive(uri, conf);
  }
  private static URI addArchiveToClassPathHelper(Path archive, Configuration conf)
      throws IOException {

    String classpath = conf.get("mapred.job.classpath.archives");

    // the scheme/authority use ':' as separator. put the unqualified path in classpath
    String archivePath = archive.toUri().getPath();

    conf.set(
        "mapred.job.classpath.archives",
        classpath == null
            ? archivePath
            : classpath + System.getProperty("path.separator") + archivePath);
    return archive.makeQualified(archive.getFileSystem(conf)).toUri();
  }
 /**
  * Delete a local path with asyncDiskService if available, or otherwise synchronously with local
  * file system.
  */
 private static void deleteLocalPath(
     MRAsyncDiskService asyncDiskService, LocalFileSystem fs, Path path) throws IOException {
   boolean deleted = false;
   if (asyncDiskService != null) {
     // Try to delete using asyncDiskService
     String localPathToDelete = path.toUri().getPath();
     deleted = asyncDiskService.moveAndDeleteAbsolutePath(localPathToDelete);
     if (!deleted) {
       LOG.warn(
           "Cannot find DistributedCache path "
               + localPathToDelete
               + " on any of the asyncDiskService volumes!");
     }
   }
   if (!deleted) {
     // If no asyncDiskService, we will delete the files synchronously
     fs.delete(path, true);
   }
   LOG.info("Deleted path " + path);
 }
Exemple #11
0
  /**
   * Initialize DFSCopyFileMapper specific job-configuration.
   *
   * @param conf : The dfs/mapred configuration.
   * @param jobConf : The handle to the jobConf object to be initialized.
   * @param args Arguments
   */
  private static void setup(Configuration conf, JobConf jobConf, final Arguments args)
      throws IOException {
    jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString());

    // set boolean values
    final boolean update = args.flags.contains(Options.UPDATE);
    final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE);
    jobConf.setBoolean(Options.UPDATE.propertyname, update);
    jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite);
    jobConf.setBoolean(
        Options.IGNORE_READ_FAILURES.propertyname,
        args.flags.contains(Options.IGNORE_READ_FAILURES));
    jobConf.setBoolean(
        Options.PRESERVE_STATUS.propertyname, args.flags.contains(Options.PRESERVE_STATUS));

    final String randomId = getRandomId();
    JobClient jClient = new JobClient(jobConf);
    Path jobDirectory = new Path(jClient.getSystemDir(), NAME + "_" + randomId);
    jobConf.set(JOB_DIR_LABEL, jobDirectory.toString());

    FileSystem dstfs = args.dst.getFileSystem(conf);
    boolean dstExists = dstfs.exists(args.dst);
    boolean dstIsDir = false;
    if (dstExists) {
      dstIsDir = dstfs.getFileStatus(args.dst).isDir();
    }

    // default logPath
    Path logPath = args.log;
    if (logPath == null) {
      String filename = "_distcp_logs_" + randomId;
      if (!dstExists || !dstIsDir) {
        Path parent = args.dst.getParent();
        if (!dstfs.exists(parent)) {
          dstfs.mkdirs(parent);
        }
        logPath = new Path(parent, filename);
      } else {
        logPath = new Path(args.dst, filename);
      }
    }
    FileOutputFormat.setOutputPath(jobConf, logPath);

    // create src list, dst list
    FileSystem jobfs = jobDirectory.getFileSystem(jobConf);

    Path srcfilelist = new Path(jobDirectory, "_distcp_src_files");
    jobConf.set(SRC_LIST_LABEL, srcfilelist.toString());
    SequenceFile.Writer src_writer =
        SequenceFile.createWriter(
            jobfs,
            jobConf,
            srcfilelist,
            LongWritable.class,
            FilePair.class,
            SequenceFile.CompressionType.NONE);

    Path dstfilelist = new Path(jobDirectory, "_distcp_dst_files");
    SequenceFile.Writer dst_writer =
        SequenceFile.createWriter(
            jobfs, jobConf, dstfilelist, Text.class, Text.class, SequenceFile.CompressionType.NONE);

    Path dstdirlist = new Path(jobDirectory, "_distcp_dst_dirs");
    jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString());
    SequenceFile.Writer dir_writer =
        SequenceFile.createWriter(
            jobfs,
            jobConf,
            dstdirlist,
            Text.class,
            FilePair.class,
            SequenceFile.CompressionType.NONE);

    // handle the case where the destination directory doesn't exist
    // and we've only a single src directory OR we're updating/overwriting
    // the contents of the destination directory.
    final boolean special = (args.srcs.size() == 1 && !dstExists) || update || overwrite;
    int srcCount = 0, cnsyncf = 0, dirsyn = 0;
    long fileCount = 0L, byteCount = 0L, cbsyncs = 0L;
    try {
      for (Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext(); ) {
        final Path src = srcItr.next();
        FileSystem srcfs = src.getFileSystem(conf);
        FileStatus srcfilestat = srcfs.getFileStatus(src);
        Path root = special && srcfilestat.isDir() ? src : src.getParent();
        if (srcfilestat.isDir()) {
          ++srcCount;
        }

        Stack<FileStatus> pathstack = new Stack<FileStatus>();
        for (pathstack.push(srcfilestat); !pathstack.empty(); ) {
          FileStatus cur = pathstack.pop();
          FileStatus[] children = srcfs.listStatus(cur.getPath());
          for (int i = 0; i < children.length; i++) {
            boolean skipfile = false;
            final FileStatus child = children[i];
            final String dst = makeRelative(root, child.getPath());
            ++srcCount;

            if (child.isDir()) {
              pathstack.push(child);
            } else {
              // skip file if the src and the dst files are the same.
              skipfile = update && sameFile(srcfs, child, dstfs, new Path(args.dst, dst));
              // skip file if it exceed file limit or size limit
              skipfile |=
                  fileCount == args.filelimit || byteCount + child.getLen() > args.sizelimit;

              if (!skipfile) {
                ++fileCount;
                byteCount += child.getLen();

                if (LOG.isTraceEnabled()) {
                  LOG.trace("adding file " + child.getPath());
                }

                ++cnsyncf;
                cbsyncs += child.getLen();
                if (cnsyncf > SYNC_FILE_MAX || cbsyncs > BYTES_PER_MAP) {
                  src_writer.sync();
                  dst_writer.sync();
                  cnsyncf = 0;
                  cbsyncs = 0L;
                }
              }
            }

            if (!skipfile) {
              src_writer.append(
                  new LongWritable(child.isDir() ? 0 : child.getLen()), new FilePair(child, dst));
            }

            dst_writer.append(new Text(dst), new Text(child.getPath().toString()));
          }

          if (cur.isDir()) {
            String dst = makeRelative(root, cur.getPath());
            dir_writer.append(new Text(dst), new FilePair(cur, dst));
            if (++dirsyn > SYNC_FILE_MAX) {
              dirsyn = 0;
              dir_writer.sync();
            }
          }
        }
      }
    } finally {
      checkAndClose(src_writer);
      checkAndClose(dst_writer);
      checkAndClose(dir_writer);
    }

    FileStatus dststatus = null;
    try {
      dststatus = dstfs.getFileStatus(args.dst);
    } catch (FileNotFoundException fnfe) {
      LOG.info(args.dst + " does not exist.");
    }

    // create dest path dir if copying > 1 file
    if (dststatus == null) {
      if (srcCount > 1 && !dstfs.mkdirs(args.dst)) {
        throw new IOException("Failed to create" + args.dst);
      }
    }

    final Path sorted = new Path(jobDirectory, "_distcp_sorted");
    checkDuplication(jobfs, dstfilelist, sorted, conf);

    if (dststatus != null && args.flags.contains(Options.DELETE)) {
      deleteNonexisting(dstfs, dststatus, sorted, jobfs, jobDirectory, jobConf, conf);
    }

    Path tmpDir =
        new Path(
            (dstExists && !dstIsDir) || (!dstExists && srcCount == 1)
                ? args.dst.getParent()
                : args.dst,
            "_distcp_tmp_" + randomId);
    jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString());
    LOG.info("srcCount=" + srcCount);
    jobConf.setInt(SRC_COUNT_LABEL, srcCount);
    jobConf.setLong(TOTAL_SIZE_LABEL, byteCount);
    setMapCount(byteCount, jobConf);
  }
Exemple #12
0
 public static void addFolder(Path p, JsonArray succeeded, JsonArray failed) throws IOException {
   FileSystem fs = FileSystem.get(p.toUri(), PersistHdfs.CONF);
   addFolder(fs, p, succeeded, failed);
 }
  /**
   * Method to go though the HDFS filesystem in a DFS to find all files
   *
   * <p>fs:FileSystem object from HDFS minDate: Oldest date for files to be backed up maxDate:Newest
   * date for files to be backed up p:Path in HDFS to look for files pathList:Will be filled with
   * all files in p hmTimestamps: hashmap of timestamps for later sorting
   */
  public void checkDir(
      FileSystem fs,
      long minDate,
      long maxDate,
      Path p,
      ArrayList<Path> pathList,
      HashMap<Path, Long> hmTimestamps) {
    long tmpDate;
    FileStatus[] fStat;

    try {
      String sPath = p.toUri().getPath();

      // If this is a directory
      if (fs.getFileStatus(p).isDir()) {
        // ignore certain directories
        if ("dfstmp".equals(p.getName())
            || "tmp".equals(p.getName())
            || "jobtracker".equals(p.getName())
            || sPath.startsWith("/mapred")
            || "ops".equals(p.getName())
            || p.getName().startsWith("_distcp_logs")) {
          return;
        }

        // dump the mkdir and chmod commands for this
        // directory -- skip root directory only
        {
          FileStatus stat = fs.getFileStatus(p);

          if (!sPath.equals("/")) {
            m_wrMkdirs.println("hadoop fs -mkdir " + sPath);
          }

          m_wrChmods.println(
              "hadoop fs -chown " + stat.getOwner() + ":" + stat.getGroup() + " " + sPath);

          Short sh = new Short(stat.getPermission().toShort());
          m_wrChmods.println(
              "hadoop fs -chmod " + Long.toOctalString(sh.longValue()) + " " + sPath);
        }

        fStat = fs.listStatus(p);

        // Do a recursive call to all elements
        for (int i = 0; i < fStat.length; i++) {
          checkDir(fs, minDate, maxDate, fStat[i].getPath(), pathList, hmTimestamps);
        }
      } else {
        // If not a directory then we've found a file

        // ignore crc files
        if (p.getName().endsWith(".crc")) {
          return;
        }

        // ignore other files
        if (sPath.startsWith("/user/oozie/etl/workflows/")) {
          return;
        }

        // try to get the table name from the path. There are
        // various types of tables, from those replicated from
        // another database to regular hive tables to
        // partitioned hive tables.  We use table names to
        // both exclude some from the backup, and for the rest
        // to dump out the schema and partition name.
        if (m_ignoreTables != null && m_ignoreTables.doIgnoreFile(sPath)) {
          m_nIgnoredTables++;

          if (m_nIgnoredTables < 5) {
            System.out.println("Skipping ignore-table file: " + sPath);
          } else if (m_nIgnoredTables == 5) {
            System.out.println("(...not showing other skipped tables...)");
          }
          return;
        }

        FileStatus stat = fs.getFileStatus(p);

        tmpDate = stat.getModificationTime() / 1000;

        // store the chmods/chowns for all files
        m_wrChmods.println(
            "hadoop fs -chown " + stat.getOwner() + ":" + stat.getGroup() + " " + sPath);

        m_wrChmods.println("hadoop fs -chmod " + stat.getPermission().toShort() + " " + sPath);

        // check dates.  is it too young?
        if (tmpDate < minDate) {
          return;
        }

        // is the file too recent?
        if (tmpDate > maxDate) {
          // System.out.println("file too recent: " + sPath);
          return;
        }

        // file timestamp is ok
        pathList.add(p);

        hmTimestamps.put(p, new Long(tmpDate));

        // store info about total bytes neeed to backup
        m_nTotalBytes += fs.getContentSummary(p).getLength();
      }
    } catch (IOException e) {
      System.err.println("ERROR: could not open " + p + ": " + e);

      // System.exit(1) ;
    }
  }
  /**
   * Method to move files from HDFS to local filesystem
   *
   * <p>localPath: Path on the machines filesystem fs:FileSystem object from HDFS pathList:List of
   * paths for files that might need to be backed up size:max size in bytes to be backed up
   *
   * <p>ReturnsDate of the last files backed up if reached size limit, else, zero
   */
  public long backupFiles(
      String localPath, String preservePath, FileSystem fs, ArrayList<Path> pathList, long size) {
    Path fsPath;
    long tmpSize = 0;
    long tmpDate = 0;

    // Start iterating over all paths
    for (Path hdfsPath : pathList) {
      try {
        long nFileSize = fs.getContentSummary(hdfsPath).getLength();
        tmpSize = tmpSize + nFileSize;

        if ((tmpSize <= size) || (size == 0)) {
          FileStatus stat = fs.getFileStatus(hdfsPath);

          System.err.println(
              "File "
                  + hdfsPath.toUri().getPath()
                  + " "
                  + nFileSize
                  + " bytes, "
                  + "perms: "
                  + stat.getOwner()
                  + "/"
                  + stat.getGroup()
                  + ", "
                  + stat.getPermission().toString());

          tmpDate = stat.getModificationTime() / 1000;

          String sFsPath = localPath + hdfsPath.toUri().getPath();
          fsPath = new Path(sFsPath);

          File f = new File(sFsPath);

          // COMMENTED OUT: until a few backup cycles run
          // and the mtime gets in fact set on all copied
          // files.
          //
          // ignore it if the file exists and has the same mtime
          // if (f.exists() && f.isFile() && f.lastModified() == stat.getModificationTime())
          // {
          // System.out.println("no need to backup " + f.toString() + ", mtime matches hdfs");
          // continue;
          // }

          if (false == m_bDryRun) {
            // check if we need to back up the local file
            // (not directory), if it already exists.
            if (f.exists() && f.isFile()) {
              // ignore files with substrings in the
              // no-preserve file
              if (true == doPreserveFile(sFsPath)) {
                // move it to the backup path
                String sNewPath = preservePath + hdfsPath.toUri().getPath();
                File newFile = new File(sNewPath);

                // create directory structure for new file?
                if (false == newFile.getParentFile().exists()) {
                  if (false == newFile.getParentFile().mkdirs()) {
                    System.err.println("Failed to mkdirs " + newFile.getParentFile().toString());
                    System.exit(1);
                  }
                }

                // rename existing file to new location
                if (false == f.renameTo(newFile)) {
                  System.err.println(
                      "Failed to renameTo " + f.toString() + " to " + newFile.toString());
                  System.exit(1);
                }

                System.out.println("preserved " + f.toString() + " into " + newFile.toString());
              } else {
                System.out.println("skipped preservation of " + f.toString());
              }
            }

            // copy from hdfs to local filesystem
            fs.copyToLocalFile(hdfsPath, fsPath);

            // set the mtime to match hdfs file
            f.setLastModified(stat.getModificationTime());

            // compare checksums on both files
            compareChecksums(fs, hdfsPath, sFsPath);
          }

          // don't print the progress after every file -- go
          // by at least 1% increments
          long nPercentDone = (long) (100 * tmpSize / m_nTotalBytes);
          if (nPercentDone > m_nLastPercentBytesDone) {
            System.out.println(
                "progress: copied "
                    + prettyPrintBytes(tmpSize)
                    + ", "
                    + nPercentDone
                    + "% done"
                    + ", tstamp="
                    + tmpDate);

            m_nLastPercentBytesDone = nPercentDone;
          }

          if (m_nSleepSeconds > 0) {
            try {
              Thread.sleep(1000 * m_nSleepSeconds);
            } catch (Exception e2) {
              // ignore
            }
          }
        } else {
          return tmpDate;
        }
      } catch (IOException e) {
        System.err.println("FATAL ERROR: Something wrong with the file");
        System.err.println(e);
        System.out.println(tmpDate);
        System.exit(1);

        return 0;
      }
    }

    return 0;
  }