コード例 #1
0
    public boolean requiresInitialization(FileLock lock) {
      if (!didRebuild) {
        if (validator != null && !validator.isValid()) {
          LOGGER.debug(
              "Invalidating {} as cache validator return false.",
              DefaultPersistentDirectoryCache.this);
          return true;
        }
      }

      if (!lock.getUnlockedCleanly()) {
        if (!lock.getState().isInInitialState()) {
          LOGGER.warn(
              "Invalidating {} as it was not closed cleanly.",
              DefaultPersistentDirectoryCache.this);
        }
        return true;
      }

      Properties cachedProperties = GUtil.loadProperties(propertiesFile);
      for (Map.Entry<?, ?> entry : properties.entrySet()) {
        String previousValue = cachedProperties.getProperty(entry.getKey().toString());
        String currentValue = entry.getValue().toString();
        if (!previousValue.equals(currentValue)) {
          LOGGER.debug(
              "Invalidating {} as cache property {} has changed from {} to {}.",
              DefaultPersistentDirectoryCache.this,
              entry.getKey(),
              previousValue,
              currentValue);
          return true;
        }
      }
      return false;
    }
コード例 #2
0
ファイル: HdfsSpout.java プロジェクト: CloudsDocker/storm
 private static void releaseLockAndLog(FileLock fLock, String spoutId) {
   try {
     if (fLock != null) {
       fLock.release();
       LOG.debug("Spout {} released FileLock. SpoutId = {}", fLock.getLockFile(), spoutId);
     }
   } catch (IOException e) {
     LOG.error("Unable to delete lock file : " + fLock.getLockFile() + " SpoutId =" + spoutId, e);
   }
 }
コード例 #3
0
ファイル: HdfsSpout.java プロジェクト: CloudsDocker/storm
  /**
   * If clocks in sync, then acquires the oldest expired lock Else, on first call, just remembers
   * the oldest expired lock, on next call check if the lock is updated. if not updated then
   * acquires the lock
   *
   * @return a lock object
   * @throws IOException
   */
  private FileLock getOldestExpiredLock() throws IOException {
    // 1 - acquire lock on dir
    DirLock dirlock = DirLock.tryLock(hdfs, lockDirPath);
    if (dirlock == null) {
      dirlock = DirLock.takeOwnershipIfStale(hdfs, lockDirPath, lockTimeoutSec);
      if (dirlock == null) {
        LOG.debug("Spout {} could not take over ownership of DirLock for {}", spoutId, lockDirPath);
        return null;
      }
      LOG.debug(
          "Spout {} now took over ownership of abandoned DirLock for {}", spoutId, lockDirPath);
    } else {
      LOG.debug("Spout {} now owns DirLock for {}", spoutId, lockDirPath);
    }

    try {
      // 2 - if clocks are in sync then simply take ownership of the oldest expired lock
      if (clocksInSync) {
        return FileLock.acquireOldestExpiredLock(hdfs, lockDirPath, lockTimeoutSec, spoutId);
      }

      // 3 - if clocks are not in sync ..
      if (lastExpiredLock == null) {
        // just make a note of the oldest expired lock now and check if its still unmodified after
        // lockTimeoutSec
        lastExpiredLock = FileLock.locateOldestExpiredLock(hdfs, lockDirPath, lockTimeoutSec);
        lastExpiredLockTime = System.currentTimeMillis();
        return null;
      }
      // see if lockTimeoutSec time has elapsed since we last selected the lock file
      if (hasExpired(lastExpiredLockTime)) {
        return null;
      }

      // If lock file has expired, then own it
      FileLock.LogEntry lastEntry = FileLock.getLastEntry(hdfs, lastExpiredLock.getKey());
      if (lastEntry.equals(lastExpiredLock.getValue())) {
        FileLock result =
            FileLock.takeOwnership(hdfs, lastExpiredLock.getKey(), lastEntry, spoutId);
        lastExpiredLock = null;
        return result;
      } else {
        // if lock file has been updated since last time, then leave this lock file alone
        lastExpiredLock = null;
        return null;
      }
    } finally {
      dirlock.release();
      LOG.debug("Released DirLock {}, SpoutID {} ", dirlock.getLockFile(), spoutId);
    }
  }
コード例 #4
0
ファイル: HdfsSpout.java プロジェクト: CloudsDocker/storm
  private FileReader pickNextFile() {
    try {
      // 1) If there are any abandoned files, pick oldest one
      lock = getOldestExpiredLock();
      if (lock != null) {
        LOG.debug(
            "Spout {} now took over ownership of abandoned FileLock {}",
            spoutId,
            lock.getLockFile());
        Path file = getFileForLockFile(lock.getLockFile(), sourceDirPath);
        String resumeFromOffset = lock.getLastLogEntry().fileOffset;
        LOG.info("Resuming processing of abandoned file : {}", file);
        return createFileReader(file, resumeFromOffset);
      }

      // 2) If no abandoned files, then pick oldest file in sourceDirPath, lock it and rename it
      Collection<Path> listing = HdfsUtils.listFilesByModificationTime(hdfs, sourceDirPath, 0);

      for (Path file : listing) {
        if (file.getName().endsWith(inprogress_suffix)) {
          continue;
        }
        if (file.getName().endsWith(ignoreSuffix)) {
          continue;
        }
        lock = FileLock.tryLock(hdfs, file, lockDirPath, spoutId);
        if (lock == null) {
          LOG.debug("Unable to get FileLock for {}, so skipping it.", file);
          continue; // could not lock, so try another file.
        }
        try {
          Path newFile = renameToInProgressFile(file);
          FileReader result = createFileReader(newFile);
          LOG.info("Processing : {} ", file);
          return result;
        } catch (Exception e) {
          LOG.error("Skipping file " + file, e);
          releaseLockAndLog(lock, spoutId);
          continue;
        }
      }

      return null;
    } catch (IOException e) {
      LOG.error("Unable to select next file for consumption " + sourceDirPath, e);
      return null;
    }
  }
コード例 #5
0
 private boolean onEndWork() {
   if (fileLock == null) {
     return false;
   }
   if (contended || fileLock.getMode() == Shared) {
     closeFileLock();
   }
   return true;
 }
コード例 #6
0
 private void closeFileLock() {
   try {
     cacheClosedCount++;
     try {
       // Close the caches and then notify them of the final state, in case the caches do work on
       // close
       new CompositeStoppable().add(caches).stop();
       FileLock.State state = fileLock.getState();
       for (MultiProcessSafePersistentIndexedCache cache : caches) {
         cache.onEndWork(state);
       }
     } finally {
       fileLock.close();
     }
   } finally {
     fileLock = null;
     stateAtOpen = null;
     contended = false;
   }
 }
コード例 #7
0
 protected void releaseFileLockAndCloseFileChannel() {
   try {
     if (fileLock != null) {
       fileLock.release();
     }
     if (fileChannel != null) {
       fileChannel.close();
     }
   } catch (IOException e) {
     stringLogger.warn("Could not close [" + storageFileName + "]", e);
   }
   fileChannel = null;
 }
コード例 #8
0
 public void initialize(FileLock fileLock) {
   for (File file : getBaseDir().listFiles()) {
     if (fileLock.isLockFile(file) || file.equals(propertiesFile)) {
       continue;
     }
     GFileUtils.forceDelete(file);
   }
   if (initAction != null) {
     initAction.execute(DefaultPersistentDirectoryCache.this);
   }
   GUtil.saveProperties(properties, propertiesFile);
   didRebuild = true;
 }
コード例 #9
0
ファイル: HdfsSpout.java プロジェクト: CloudsDocker/storm
 // will commit progress into lock file if commit threshold is reached
 private void commitProgress(FileOffset position) {
   if (position == null) {
     return;
   }
   if (lock != null && canCommitNow()) {
     try {
       String pos = position.toString();
       lock.heartbeat(pos);
       LOG.debug("{} Committed progress. {}", spoutId, pos);
       acksSinceLastCommit = 0;
       commitTimeElapsed.set(false);
       setupCommitElapseTimer();
     } catch (IOException e) {
       LOG.error("Unable to commit progress Will retry later. Spout ID = " + spoutId, e);
     }
   }
 }
コード例 #10
0
  private boolean onStartWork() {
    if (fileLock != null) {
      return false;
    }
    fileLock =
        lockManager.lock(
            lockFile,
            lockOptions.withMode(Exclusive),
            cacheDisplayName,
            operations.getDescription());
    stateAtOpen = fileLock.getState();
    for (UnitOfWorkParticipant cache : caches) {
      cache.onStartWork(operations.getDescription(), stateAtOpen);
    }

    lockManager.allowContention(fileLock, whenContended());

    return true;
  }
コード例 #11
0
 /**
  * Opens this cache access with the given lock mode. Calling this with {@link
  * org.gradle.cache.internal.FileLockManager.LockMode#Exclusive} will lock the cache for exclusive
  * access from all other threads (including those in this process and all other processes), until
  * {@link #close()} is called.
  *
  * @param lockOptions
  */
 public void open(LockOptions lockOptions) {
   lock.lock();
   try {
     if (owner != null) {
       throw new IllegalStateException(
           String.format("Cannot open the %s, as it is already in use.", cacheDisplayName));
     }
     this.lockOptions = lockOptions;
     if (lockOptions.getMode() == FileLockManager.LockMode.None) {
       return;
     }
     if (fileLock != null) {
       throw new IllegalStateException("File lock " + lockFile + " is already open.");
     }
     fileLock = lockManager.lock(lockFile, lockOptions, cacheDisplayName);
     stateAtOpen = fileLock.getState();
     takeOwnership(String.format("Access %s", cacheDisplayName));
     lockManager.allowContention(fileLock, whenContended());
   } finally {
     lock.unlock();
   }
 }