Esempio n. 1
0
 private static void ignoreAndWait(final Exception e, boolean printException) {
   H2O.ignore(e, "Hit HDFS reset problem, retrying...", printException);
   try {
     Thread.sleep(500);
   } catch (InterruptedException ie) {
   }
 }
Esempio n. 2
0
    /**
     * The run method lives for the life of the JobTracker, and removes Jobs that are not still
     * running, but which finished a long time ago.
     */
    public void run() {
      while (shouldRun) {
        try {
          Thread.sleep(RETIRE_JOB_CHECK_INTERVAL);
        } catch (InterruptedException ie) {
        }

        synchronized (jobs) {
          synchronized (jobInitQueue) {
            synchronized (jobsByArrival) {
              for (Iterator it = jobs.keySet().iterator(); it.hasNext(); ) {
                String jobid = (String) it.next();
                JobInProgress job = (JobInProgress) jobs.get(jobid);

                if (job.getStatus().getRunState() != JobStatus.RUNNING
                    && job.getStatus().getRunState() != JobStatus.PREP
                    && (job.getFinishTime() + RETIRE_JOB_INTERVAL < System.currentTimeMillis())) {
                  it.remove();

                  jobInitQueue.remove(job);
                  jobsByArrival.remove(job);
                }
              }
            }
          }
        }
      }
    }
Esempio n. 3
0
 public static InputStream openStream(Key k, ProgressMonitor pmon) throws IOException {
   H2OHdfsInputStream res = null;
   Path p = new Path(k.toString());
   try {
     res = new H2OHdfsInputStream(p, 0, pmon);
   } catch (IOException e) {
     try {
       Thread.sleep(1000);
     } catch (Exception ex) {
     }
     Log.warn("Error while opening HDFS key " + k.toString() + ", will wait and retry.");
     res = new H2OHdfsInputStream(p, 0, pmon);
   }
   return res;
 }
Esempio n. 4
0
    /**
     * The run method lives for the life of the JobTracker, and removes TaskTrackers that have not
     * checked in for some time.
     */
    public void run() {
      while (shouldRun) {
        //
        // Thread runs periodically to check whether trackers should be expired.
        // The sleep interval must be no more than half the maximum expiry time
        // for a task tracker.
        //
        try {
          Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL / 3);
        } catch (InterruptedException ie) {
        }

        //
        // Loop through all expired items in the queue
        //
        synchronized (taskTrackers) {
          synchronized (trackerExpiryQueue) {
            long now = System.currentTimeMillis();
            TaskTrackerStatus leastRecent = null;
            while ((trackerExpiryQueue.size() > 0)
                && ((leastRecent = (TaskTrackerStatus) trackerExpiryQueue.first()) != null)
                && (now - leastRecent.getLastSeen() > TASKTRACKER_EXPIRY_INTERVAL)) {

              // Remove profile from head of queue
              trackerExpiryQueue.remove(leastRecent);
              String trackerName = leastRecent.getTrackerName();

              // Figure out if last-seen time should be updated, or if tracker is dead
              TaskTrackerStatus newProfile =
                  (TaskTrackerStatus) taskTrackers.get(leastRecent.getTrackerName());
              // Items might leave the taskTracker set through other means; the
              // status stored in 'taskTrackers' might be null, which means the
              // tracker has already been destroyed.
              if (newProfile != null) {
                if (now - newProfile.getLastSeen() > TASKTRACKER_EXPIRY_INTERVAL) {
                  // Remove completely
                  updateTaskTrackerStatus(trackerName, null);
                  lostTaskTracker(leastRecent.getTrackerName());
                } else {
                  // Update time by inserting latest profile
                  trackerExpiryQueue.add(newProfile);
                }
              }
            }
          }
        }
      }
    }
Esempio n. 5
0
 public static void startTracker(Configuration conf) throws IOException {
   if (tracker != null) throw new IOException("JobTracker already running.");
   while (true) {
     try {
       tracker = new JobTracker(conf);
       break;
     } catch (IOException e) {
       LOG.log(Level.WARNING, "Starting tracker", e);
     }
     try {
       Thread.sleep(1000);
     } catch (InterruptedException e) {
     }
   }
   tracker.offerService();
 }
Esempio n. 6
0
  private static String lock(String lock) {
    String realPath = "";
    String parent = "/lock";
    String lockName = parent + "/" + lock;

    logger.debug("Getting lock " + lockName);

    try {
      if (zkInstance.exists(parent, false) == null)
        zkInstance.create(parent, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.fromFlag(0));
    } catch (Exception E) {
      logger.error("Error creating lock node: " + E.toString());
      return null;
    }

    List<String> children = new LinkedList<String>();
    try {
      // List <ACL> ACLList = zkInstance.getACL(lockName, zkInstance.exists(lock, false));

      realPath =
          zkInstance.create(
              lockName, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
      // children = zkInstance.getChildren(realPath, false);
      checkLock:
      while (true) {
        children = zkInstance.getChildren(parent, false);
        for (String curChild : children) {
          String child = parent + "/" + curChild;
          // System.out.println(child + " " + realPath + " " +
          // Integer.toString(child.compareTo(realPath)));
          if (child.compareTo(realPath) < 0
              && child.length() == realPath.length()
              && curChild.startsWith(lock)) {
            // System.out.println(child + " cmp to " + realPath);
            Thread.sleep(300);
            continue checkLock;
          }
        }
        logger.info("Got lock " + lockName);
        return realPath;
      }
    } catch (Exception E) {
      logger.error("Exception while trying to get lock " + lockName + " :" + E.toString());
      E.printStackTrace();
      return null;
    }
  }
Esempio n. 7
0
 /**
  * Map method. Copies one file from source file system to destination.
  *
  * @param key src len
  * @param value FilePair (FileStatus src, Path dst)
  * @param out Log of failed copies
  * @param reporter
  */
 public void map(
     LongWritable key,
     FilePair value,
     OutputCollector<WritableComparable<?>, Text> out,
     Reporter reporter)
     throws IOException {
   final FileStatus srcstat = value.input;
   final Path relativedst = new Path(value.output);
   try {
     copy(srcstat, relativedst, out, reporter);
   } catch (IOException e) {
     ++failcount;
     reporter.incrCounter(Counter.FAIL, 1);
     updateStatus(reporter);
     final String sfailure = "FAIL " + relativedst + " : " + StringUtils.stringifyException(e);
     out.collect(null, new Text(sfailure));
     LOG.info(sfailure);
     try {
       for (int i = 0; i < 3; ++i) {
         try {
           final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst);
           if (destFileSys.delete(tmp, true)) break;
         } catch (Throwable ex) {
           // ignore, we are just cleaning up
           LOG.debug("Ignoring cleanup exception", ex);
         }
         // update status, so we don't get timed out
         updateStatus(reporter);
         Thread.sleep(3 * 1000);
       }
     } catch (InterruptedException inte) {
       throw (IOException) new IOException().initCause(inte);
     }
   } finally {
     updateStatus(reporter);
   }
 }
Esempio n. 8
0
    public void incrementAndThrottle(int bytes) {
      if (bytesPerSec < 1) { // no throttle at all
        return;
      }
      long currentTime = System.currentTimeMillis();
      long timeDiff = currentTime - lastTime;
      if (timeDiff == 0) {
        timeDiff = 1;
      }

      double bytesPerSec = (bytes / (double) timeDiff) * 1000;
      if (bytesPerSec > this.bytesPerSec) {
        // Throttle
        double exceededByFactorOf = bytesPerSec / this.bytesPerSec;
        try {
          long mustSleep = (long) ((exceededByFactorOf - 1) * timeDiff);
          Thread.sleep(mustSleep);
        } catch (InterruptedException e) {
          e.printStackTrace();
        }
      }

      lastTime = System.currentTimeMillis();
    }
  /**
   * Method to move files from HDFS to local filesystem
   *
   * <p>localPath: Path on the machines filesystem fs:FileSystem object from HDFS pathList:List of
   * paths for files that might need to be backed up size:max size in bytes to be backed up
   *
   * <p>ReturnsDate of the last files backed up if reached size limit, else, zero
   */
  public long backupFiles(
      String localPath, String preservePath, FileSystem fs, ArrayList<Path> pathList, long size) {
    Path fsPath;
    long tmpSize = 0;
    long tmpDate = 0;

    // Start iterating over all paths
    for (Path hdfsPath : pathList) {
      try {
        long nFileSize = fs.getContentSummary(hdfsPath).getLength();
        tmpSize = tmpSize + nFileSize;

        if ((tmpSize <= size) || (size == 0)) {
          FileStatus stat = fs.getFileStatus(hdfsPath);

          System.err.println(
              "File "
                  + hdfsPath.toUri().getPath()
                  + " "
                  + nFileSize
                  + " bytes, "
                  + "perms: "
                  + stat.getOwner()
                  + "/"
                  + stat.getGroup()
                  + ", "
                  + stat.getPermission().toString());

          tmpDate = stat.getModificationTime() / 1000;

          String sFsPath = localPath + hdfsPath.toUri().getPath();
          fsPath = new Path(sFsPath);

          File f = new File(sFsPath);

          // COMMENTED OUT: until a few backup cycles run
          // and the mtime gets in fact set on all copied
          // files.
          //
          // ignore it if the file exists and has the same mtime
          // if (f.exists() && f.isFile() && f.lastModified() == stat.getModificationTime())
          // {
          // System.out.println("no need to backup " + f.toString() + ", mtime matches hdfs");
          // continue;
          // }

          if (false == m_bDryRun) {
            // check if we need to back up the local file
            // (not directory), if it already exists.
            if (f.exists() && f.isFile()) {
              // ignore files with substrings in the
              // no-preserve file
              if (true == doPreserveFile(sFsPath)) {
                // move it to the backup path
                String sNewPath = preservePath + hdfsPath.toUri().getPath();
                File newFile = new File(sNewPath);

                // create directory structure for new file?
                if (false == newFile.getParentFile().exists()) {
                  if (false == newFile.getParentFile().mkdirs()) {
                    System.err.println("Failed to mkdirs " + newFile.getParentFile().toString());
                    System.exit(1);
                  }
                }

                // rename existing file to new location
                if (false == f.renameTo(newFile)) {
                  System.err.println(
                      "Failed to renameTo " + f.toString() + " to " + newFile.toString());
                  System.exit(1);
                }

                System.out.println("preserved " + f.toString() + " into " + newFile.toString());
              } else {
                System.out.println("skipped preservation of " + f.toString());
              }
            }

            // copy from hdfs to local filesystem
            fs.copyToLocalFile(hdfsPath, fsPath);

            // set the mtime to match hdfs file
            f.setLastModified(stat.getModificationTime());

            // compare checksums on both files
            compareChecksums(fs, hdfsPath, sFsPath);
          }

          // don't print the progress after every file -- go
          // by at least 1% increments
          long nPercentDone = (long) (100 * tmpSize / m_nTotalBytes);
          if (nPercentDone > m_nLastPercentBytesDone) {
            System.out.println(
                "progress: copied "
                    + prettyPrintBytes(tmpSize)
                    + ", "
                    + nPercentDone
                    + "% done"
                    + ", tstamp="
                    + tmpDate);

            m_nLastPercentBytesDone = nPercentDone;
          }

          if (m_nSleepSeconds > 0) {
            try {
              Thread.sleep(1000 * m_nSleepSeconds);
            } catch (Exception e2) {
              // ignore
            }
          }
        } else {
          return tmpDate;
        }
      } catch (IOException e) {
        System.err.println("FATAL ERROR: Something wrong with the file");
        System.err.println(e);
        System.out.println(tmpDate);
        System.exit(1);

        return 0;
      }
    }

    return 0;
  }