コード例 #1
0
ファイル: LastFileReader.java プロジェクト: geniot/elex
  /**
   * Something is wrong with this file. If there is no data in this file (the header is <= the file
   * header size) then move this last file aside and search the next "last" file. If the last file
   * does have data in it, return null and throw an exception back to the application, since we're
   * not sure what to do now.
   *
   * @param cause is a DatabaseException or ChecksumException.
   */
  private Long attemptToMoveBadFile(Exception cause)
      throws IOException, ChecksumException, DatabaseException {

    String fileName = fileManager.getFullFileNames(window.currentFileNum())[0];
    File problemFile = new File(fileName);

    if (problemFile.length() <= FileManager.firstLogEntryOffset()) {
      fileManager.clear(); // close all existing files
      /* Move this file aside. */
      Long lastNum = fileManager.getFollowingFileNum(window.currentFileNum(), false);
      if (!fileManager.renameFile(window.currentFileNum(), FileManager.BAD_SUFFIX)) {
        throw EnvironmentFailureException.unexpectedState(
            "Could not rename file: 0x" + Long.toHexString(window.currentFileNum()));
      }

      return lastNum;
    }
    /* There's data in this file, throw up to the app. */
    if (cause instanceof DatabaseException) {
      throw (DatabaseException) cause;
    }
    if (cause instanceof ChecksumException) {
      throw (ChecksumException) cause;
    }
    throw EnvironmentFailureException.unexpectedException(cause);
  }
コード例 #2
0
ファイル: VLSNReader.java プロジェクト: lzimm/qluster
  void setPosition(long startLsn)
      throws ChecksumException, FileNotFoundException, DatabaseException {

    if (startLsn == DbLsn.NULL_LSN) {
      return;
    }

    /*
     * An assertion: a reposition should never make the reader lose ground.
     */
    if (forward) {
      if (DbLsn.compareTo(getLastLsn(), startLsn) > 0) {
        throw EnvironmentFailureException.unexpectedState(
            "Feeder forward scanning should not be repositioned to "
                + " a position earlier than the current position. Current"
                + " lsn = "
                + DbLsn.getNoFormatString(getLastLsn())
                + " reposition = "
                + DbLsn.getNoFormatString(startLsn));
      }
    } else {
      if (DbLsn.compareTo(getLastLsn(), startLsn) < 0) {
        throw EnvironmentFailureException.unexpectedState(
            "Feeder backward scanning should not be repositioned to "
                + " a position later than the current position. Current"
                + " lsn = "
                + DbLsn.getNoFormatString(getLastLsn())
                + " reposition = "
                + DbLsn.getNoFormatString(startLsn));
      }
    }

    long fileNum = DbLsn.getFileNumber(startLsn);
    long offset = DbLsn.getFileOffset(startLsn);

    if (window.containsLsn(fileNum, offset)) {
      window.positionBuffer(offset);
    } else {
      window.slideAndFill(fileNum, offset, offset);
    }

    if (forward) {
      nextEntryOffset = offset;
    } else {
      currentEntryPrevOffset = offset;
    }
    nReposition++;
  }
コード例 #3
0
    /*
     * Reposition to the specified file, and fill starting at
     * startOffset. Position the window's buffer to point at the log entry
     * indicated by targetOffset
     */
    public void slideAndFill(
        long windowfileNum, long windowStartOffset, long targetOffset, boolean forward)
        throws ChecksumException, FileNotFoundException, DatabaseException {

      FileHandle fileHandle = fileManager.getFileHandle(windowfileNum);
      try {
        startOffset = windowStartOffset;
        setFileNum(windowfileNum, fileHandle.getLogVersion());
        boolean foundData = fillFromFile(fileHandle, targetOffset);

        /*
         * When reading backwards, we need to guarantee there is no log
         * gap, throws out an EnvironmentFailreException if it exists.
         */
        if (!foundData && !forward) {
          throw EnvironmentFailureException.unexpectedState(
              "Detected a log file gap when reading backwards. "
                  + "Target position = "
                  + DbLsn.getNoFormatString(DbLsn.makeLsn(windowfileNum, targetOffset))
                  + " starting position = "
                  + DbLsn.getNoFormatString(DbLsn.makeLsn(windowfileNum, windowStartOffset))
                  + " end position = "
                  + DbLsn.getNoFormatString(DbLsn.makeLsn(windowfileNum, endOffset)));
        }
      } finally {
        fileHandle.release();
      }
    }
コード例 #4
0
ファイル: LogFileFeeder.java プロジェクト: prat0318/dbms
  /**
   * Returns the SHA1 has associated with the file.
   *
   * @param file
   * @param length
   * @return
   * @throws IOException
   * @throws DatabaseException
   */
  static MessageDigest getSHA1Digest(File file, long length) throws IOException, DatabaseException {

    MessageDigest messageDigest = null;

    try {
      messageDigest = MessageDigest.getInstance("SHA1");
    } catch (NoSuchAlgorithmException e) {
      throw EnvironmentFailureException.unexpectedException(e);
    }
    final FileInputStream fileStream = new FileInputStream(file);
    try {
      ByteBuffer buffer = ByteBuffer.allocate(TRANSFER_BYTES);
      for (long bytes = length; bytes > 0; ) {
        int readSize = (int) Math.min(TRANSFER_BYTES, bytes);
        int readBytes = fileStream.read(buffer.array(), 0, readSize);
        if (readBytes == -1) {
          throw new IOException("Premature EOF. Was expecting: " + readSize);
        }
        messageDigest.update(buffer.array(), 0, readBytes);
        bytes -= readBytes;
      }
    } finally {
      fileStream.close();
    }
    return messageDigest;
  }
コード例 #5
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
  /**
   * Rollback all write operations that are logged with an LSN > the matchpointLsn parameter. This
   * is logically a truncation of the log entries written by this transaction. Any log entries
   * created by this transaction are marked obsolete.
   *
   * <p>Note that this is by no means a complete implementation of what would be needed to support
   * user visible savepoints. This method only rolls back write operations and doesn't handle other
   * types of state, like read locks and open cursors.
   *
   * <p>There are several key assumptions: - the transaction does not hold read locks. - the
   * transaction will either be resumed, and any rolled back operations will be repeated, or the
   * transaction will be aborted in its entirety.
   *
   * <p>If all operations in the transaction are rolled back, this transaction is also unregistered
   * and closed.
   *
   * <p>Rolling back a log entry through rollback is akin to truncating the transactional log. The
   * on-disk entries should not be referred to by anything in the in-memory tree or the transaction
   * chain. JE's append only storage and the fact that the transactional log entries are intertwined
   * through the physical log prohibits any log truncation. To mimic log truncation, any rolled back
   * log entry is marked as obsolete. Since only the last version of any data record is alive, any
   * future uses of this transaction must use the obsoleteDupsAllowed option (see
   * Txn.countObsoleteExact) to prevent asserts about duplicate obsolete offsets. For example,
   * suppose the transaction logs this:
   *
   * <p>100 LNa (version1) 200 LNa (version2) 300 LNa (version3)
   *
   * <p>At this point in time, LSN 100 and 200 are obsolete.
   *
   * <p>Now, suppose we roll back to LSN 100. LSNs 200 and 300 are marked obsolete by the
   * rollback.(although LSN 200 was already obsolete). It is true that for an instance in time LSN
   * 100 is incorrectly marked as obsolete, when it's really alive. But this transaction is going to
   * either abort or resume exactly as it was before, so LSN 100 is going to be obsolete again.
   *
   * <p>Suppose txn.abort() is called. The abort() logic will mark LSN 100 as obsolete, since it is
   * the latest version of the record in the transaction. Using the obsoleteDupsAllowed option
   * avoids an assertion on the double recording of LSN 100.
   *
   * <p>Alternatively, suppose LNa (version2) is retransmitted and logged as LSN 400. Normal
   * execution of LN.log() marks LSN 100 as obsolete, which would trigger the assertion were it not
   * for obsoleteDupsAllowed.
   *
   * @return list of LSNs that were rolled back
   */
  public Collection<Long> rollback(long matchpointLsn) throws DatabaseException {

    List<Long> rollbackLsns = new ArrayList<Long>();
    LoggerUtils.finest(logger, envImpl, "Partial Rollback of " + this);
    synchronized (this) {
      checkState(true);

      /* This transaction didn't log anything, nothing to rollback. */
      if (lastLoggedLsn == NULL_LSN) {
        return rollbackLsns;
      }

      /*
       * This transaction doesn't include any operations that are after
       * the matchpointLsn. There is nothing to rollback.
       */
      if (DbLsn.compareTo(lastLoggedLsn, matchpointLsn) <= 0) {
        return rollbackLsns;
      }

      setRollback();
      undoWrites(matchpointLsn, rollbackLsns);
    }

    /*
     * The call to undoWrites() may have rolled everything back, and set
     * lastLoggedLsn to NULL_LSN.
     */
    if (lastLoggedLsn == NULL_LSN) {
      /* Everything was rolled back. */
      try {

        /*
         * Purge any databaseImpls not needed as a result of the abort.
         * Be sure to do this outside the synchronization block, to
         * avoid conflict w/checkpointer.
         */
        cleanupDatabaseImpls(false);
      } finally {
        close(false /* isCommit */);
      }
    }

    /*
     * We don't expect there to be any database handles associated with
     * a ReplayTxn, because only DatabaseImpls are used. Because of that,
     * there should be no cleanup needed.
     */
    if (handleToHandleLockMap != null) {
      throw EnvironmentFailureException.unexpectedState(
          "Replay Txn " + getId() + " has a handleToHandleLockMap");
    }

    /*
     * There is no need to call cleanupDatabaseImpls if the txn still holds
     * locks. The operations in this txn will either be entirely aborted,
     * or will be repeated, so any cleanup will happen when the txn ends.
     */
    return rollbackLsns;
  }
コード例 #6
0
ファイル: NameIdPair.java プロジェクト: prat0318/dbms
 public void update(NameIdPair other) {
   if (!name.equals(other.getName())) {
     throw EnvironmentFailureException.unexpectedState(
         "Pair name mismatch: " + name + " <> " + other.getName());
   }
   setId(other.getId());
 }
コード例 #7
0
ファイル: NameIdPair.java プロジェクト: prat0318/dbms
 public NameIdPair(String name, int id) {
   if (name == null) {
     throw EnvironmentFailureException.unexpectedState("name argument was null");
   }
   this.name = name;
   this.id = id;
 }
コード例 #8
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
  /**
   * Commits the txn being replayed.
   *
   * @param syncPolicy to be used for the commit.
   * @param clientRepContext the replication context it encapsulates the VLSN associated with the
   *     txn.
   * @return the commit LSN
   * @throws DatabaseException
   */
  public long commit(
      SyncPolicy syncPolicy, ReplicationContext clientRepContext, int commitMasterNodeId)
      throws DatabaseException {

    LoggerUtils.fine(logger, envImpl, "commit called for " + getId());
    setRepContext(clientRepContext);
    Durability durability = null;
    if (syncPolicy == SyncPolicy.SYNC) {
      durability = Durability.COMMIT_SYNC;
    } else if (syncPolicy == SyncPolicy.NO_SYNC) {
      durability = Durability.COMMIT_NO_SYNC;
    } else if (syncPolicy == SyncPolicy.WRITE_NO_SYNC) {
      durability = Durability.COMMIT_WRITE_NO_SYNC;
    } else {
      throw EnvironmentFailureException.unexpectedState("Unknown sync policy: " + syncPolicy);
    }

    /*
     * Set the master id before commit is called, so getReplicatorNodeId()
     * will return this value and write the originating node's id into
     * the commit record on this log.
     */
    this.masterNodeId = commitMasterNodeId;
    long lsn = super.commit(durability);
    endTime = System.currentTimeMillis();

    return lsn;
  }
コード例 #9
0
  /** @return true if the current entry is part of replication stream. */
  public boolean entryIsReplicated() {

    if (currentEntryHeader == null) {
      throw EnvironmentFailureException.unexpectedState(
          "entryIsReplicated should not be used before reader is " + "initialized");
    }
    return currentEntryHeader.getReplicated();
  }
コード例 #10
0
  /*
   * Tests internal node removal APIs.
   */
  @Test
  public void testRemoveMember() {
    createGroup(groupSize);
    ReplicatedEnvironment master = repEnvInfo[0].getEnv();
    assertTrue(master.getState().isMaster());

    RepNode masterRep = repEnvInfo[0].getRepNode();

    /* Reduce the group size all the way down to one. */
    for (int i = 1; i < groupSize; i++) {
      assertTrue(!RepInternal.isClosed(repEnvInfo[i].getEnv()));
      masterRep.removeMember(repEnvInfo[i].getEnv().getNodeName());
      assertEquals((groupSize - i), masterRep.getGroup().getElectableGroupSize());
    }

    /* Close the replica handles*/
    for (int i = groupSize - 1; i > 0; i--) {
      repEnvInfo[i].closeEnv();
    }

    /* Attempting to re-open them with the same node names should fail. */
    for (int i = 1; i < groupSize; i++) {
      try {
        repEnvInfo[i].openEnv();
        fail("Exception expected");
      } catch (EnvironmentFailureException e) {
        /* Expected, the master should reject the attempt. */
        assertEquals(EnvironmentFailureReason.HANDSHAKE_ERROR, e.getReason());
      }
    }

    /* Doing the same but with different node names should be ok. */
    for (int i = 1; i < groupSize; i++) {
      final RepEnvInfo ri = repEnvInfo[i];
      final ReplicationConfig repConfig = ri.getRepConfig();
      TestUtils.removeLogFiles("RemoveRepEnvironments", ri.getEnvHome(), false);

      repConfig.setNodeName("ReplaceNode_" + i);
      ri.openEnv();
      assertEquals(i + 1, masterRep.getGroup().getElectableGroupSize());
    }
    master.close();
  }
コード例 #11
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
 public void setLastAppliedVLSN(VLSN justApplied) {
   if (justApplied.compareTo(lastApplied) <= 0) {
     throw EnvironmentFailureException.unexpectedState(
         "Txn "
             + getId()
             + " attempted VLSN = "
             + justApplied
             + " txnLastApplied = "
             + lastApplied);
   }
   this.lastApplied = justApplied;
 }
コード例 #12
0
ファイル: SharedLatch.java プロジェクト: prat0318/dbms
  /**
   * Acquire a latch for exclusive/write access. If the thread already holds the latch for shared
   * access, it cannot be upgraded and EnvironmentFailureException will be thrown.
   *
   * <p>Wait for the latch if some other thread is holding it. If there are threads waiting for
   * access, they will be granted the latch on a FIFO basis if fair latches are enabled (as of JE
   * 5.0 they are never enabled). When the method returns, the latch is held for exclusive access.
   *
   * @throws EnvironmentFailureException if the latch is already held by the current thread for
   *     shared access.
   */
  public void acquireExclusive() {
    try {
      if (isWriteLockedByCurrentThread()) {
        throw EnvironmentFailureException.unexpectedState("Latch already held: " + name);
      }

      writeLock().lock();

      assert (noteLatch ? noteLatch() : true); // intentional side effect;
    } finally {
      assert EnvironmentImpl.maybeForceYield();
    }
  }
コード例 #13
0
  /**
   * Converts the given DBIN slot, leaving bin/index set to the inserted BIN slot.
   *
   * <p>Enter/leave with bin field latched, although bin field may change.
   *
   * <p>If slot is inserted into current bin, leave bin field unchanged and set index field to
   * inserted slot.
   *
   * <p>If slot is inserted into a different bin, set bin/index fields to inserted slot.
   */
  private void convertDbinSlot(final DBIN dbin, final int dbinIndex, final byte[] binKey) {

    final byte[] newKey = DupKeyData.replaceData(binKey, dbin.getKey(dbinIndex));

    if (DEBUG) {
      System.out.println("DupConvert DBIN LN " + Key.dumpString(newKey, 0));
    }

    /*
     * If the current BIN can hold the new slot, don't bother to do a
     * search to find it.
     */
    if (bin.needsSplitting() || !bin.isKeyInBounds(newKey)) {

      /* Compact keys after finishing with a BIN. */
      bin.compactMemory();

      /* Evict without latches, before moving to a new BIN. */
      bin.releaseLatch();
      envImpl.daemonEviction(false /*backgroundIO*/);

      /* Find a BIN for insertion, split if necessary. */
      bin = dbin.getDatabase().getTree().searchSplitsAllowed(newKey, CacheMode.UNCHANGED);
    }

    final int newIndex =
        bin.insertEntry1(
            null /*ln*/,
            newKey,
            null /*data*/,
            dbin.getLsn(dbinIndex),
            dbin.getState(dbinIndex),
            false);

    if ((newIndex & IN.INSERT_SUCCESS) == 0) {
      throw EnvironmentFailureException.unexpectedState(
          "Key not inserted: " + Key.dumpString(newKey, 0) + " DB: " + dbin.getDatabase().getId());
    }

    index = newIndex & ~IN.INSERT_SUCCESS;

    /*
     * Evict LN from DBIN slot. Although we don't explicitly load DBIN LNs,
     * it may have been loaded by recovery.
     */
    dbin.detachNode(dbinIndex, false /*updateLsn*/, -1 /*lsn*/);

    nConverted += 1;
  }
コード例 #14
0
ファイル: SharedLatch.java プロジェクト: prat0318/dbms
  /**
   * Probe a latch for exclusive access, but don't block if it's not available.
   *
   * @return true if the latch was acquired, false if it is not available.
   * @throws EnvironmentFailureException if the latch is already held by the calling thread.
   */
  public boolean acquireExclusiveNoWait() {
    try {
      if (isWriteLockedByCurrentThread()) {
        throw EnvironmentFailureException.unexpectedState("Latch already held: " + name);
      }

      boolean ret = writeLock().tryLock();

      /* Intentional side effect. */
      assert ((noteLatch & ret) ? noteLatch() : true);
      return ret;
    } finally {
      assert EnvironmentImpl.maybeForceYield();
    }
  }
コード例 #15
0
ファイル: LockManager.java プロジェクト: lzimm/qluster
  /**
   * Attempt to acquire a lock of <i>type</i> on <i>nodeId</i>. If the lock acquisition would result
   * in a deadlock, throw an exception.<br>
   * If the requested lock is not currently available, block until it is or until timeout
   * milliseconds have elapsed.<br>
   * If a lock of <i>type</i> is already held, return EXISTING.<br>
   * If a WRITE lock is held and a READ lock is requested, return PROMOTION.<br>
   * If a lock request is for a lock that is not currently held, return either NEW or DENIED
   * depending on whether the lock is granted or not.<br>
   *
   * @param nodeId The NodeId to lock.
   * @param locker The Locker to lock this on behalf of.
   * @param type The lock type requested.
   * @param timeout milliseconds to time out after if lock couldn't be obtained. 0 means block
   *     indefinitely. Not used if nonBlockingRequest is true.
   * @param nonBlockingRequest if true, means don't block if lock can't be acquired, and ignore the
   *     timeout parameter.
   * @return a LockGrantType indicating whether the request was fulfilled or not. LockGrantType.NEW
   *     means the lock grant was fulfilled and the caller did not previously hold the lock.
   *     PROMOTION means the lock was granted and it was a promotion from READ to WRITE. EXISTING
   *     means the lock was already granted (not a promotion). DENIED means the lock was not granted
   *     because the timeout passed without acquiring the lock or timeout was 0 and the lock was not
   *     immediately available.
   * @throws LockConflictException if lock could not be acquired.
   * @throws IllegalArgumentException via db/cursor read/write methods, if non-transactional access
   *     to a replicated environment is attempted, and read-uncommitted is not specified.
   */
  public LockGrantType lock(
      long nodeId,
      Locker locker,
      LockType type,
      long timeout,
      boolean nonBlockingRequest,
      DatabaseImpl database)
      throws LockConflictException, DatabaseException {

    assert timeout >= 0;

    /* No lock needed for dirty-read, return as soon as possible. */
    if (type == LockType.NONE) {
      return LockGrantType.NONE_NEEDED;
    }

    /*
     * Assert that a replication-defined locker is used for locks on
     * replicated databases.  Two cases are exempt from this rule:
     * - Only NameLNs that identify replicated DBs are replicated, not
     *   all NameLNs in the naming DB, so the naming DB is exempt.
     * - Non-preemption is permissible for selected internal operations
     *   because we can ensure that they are not long running and will not
     *   hold locks interminably.  A BasicLocker is often used internally
     *   in such cases.
     */
    if (envImpl.isReplicated()
        && database != null
        && database.isReplicated()
        && !database.getId().equals(DbTree.NAME_DB_ID)
        && (locker.getPreemptable() || type.isWriteLock())
        && !locker.isReplicationDefined()) {
      throw EnvironmentFailureException.unexpectedState("Locker: " + locker.getClass().getName());
    }

    /*
     * Lock on locker before latching the lockTable to avoid having another
     * notifier perform the notify before the waiter is actually waiting.
     */
    synchronized (locker) {
      LockGrantType ret = null;
      ret = lockInternal(nodeId, locker, type, timeout, nonBlockingRequest, database);
      return ret;
    }
  }
コード例 #16
0
ファイル: LogFileFeeder.java プロジェクト: prat0318/dbms
  public LogFileFeeder(FeederManager feederManager, SocketChannel channel)
      throws DatabaseException {
    super(feederManager.getEnvImpl(), "Log File Feeder");

    this.feederManager = feederManager;
    logger = feederManager.logger;
    this.namedChannel = new NamedChannel(channel, feederManager.nameIdPair);

    try {
      messageDigest = MessageDigest.getInstance("SHA1");
    } catch (NoSuchAlgorithmException e) {
      LoggerUtils.severe(
          logger,
          feederManager.getEnvImpl(),
          "The SHA1 algorithm was not made available " + "by the security provider");
      throw EnvironmentFailureException.unexpectedException(e);
    }
  }
コード例 #17
0
ファイル: NameIdPair.java プロジェクト: prat0318/dbms
 @Override
 public boolean equals(Object obj) {
   if (this == obj) {
     return true;
   }
   if (obj == null) {
     return false;
   }
   if (getClass() != obj.getClass()) {
     return false;
   }
   NameIdPair other = (NameIdPair) obj;
   if (id != other.id) {
     return false;
   }
   if (!name.equals(other.name)) {
     throw EnvironmentFailureException.unexpectedState(
         "Ids: " + id + " were equal." + " But names: " + name + ", " + other.name + " weren't!");
   }
   return true;
 }
コード例 #18
0
  /**
   * Transfer a lock from another transaction to this one. Used for master-> replica transitions,
   * when a node has to transform a MasterTxn into a ReplayTxn. Another approach would be to have
   * this importunate ReplayTxn call lock() on the lsn, but that path is not available because we do
   * not have a handle on a databaseImpl.
   */
  public void stealLockFromMasterTxn(Long lsn) {

    LockAttemptResult result = lockManager.stealLock(lsn, this, LockType.WRITE);

    /*
     * Assert, and if something strange happened, opt to invalidate
     * the environment and wipe the slate clean.
     */
    if (!result.success) {
      throw EnvironmentFailureException.unexpectedState(
          envImpl,
          "Transferring from master to replica state, txn "
              + getId()
              + " was unable to transfer lock for "
              + DbLsn.getNoFormatString(lsn)
              + ", lock grant type="
              + result.lockGrant);
    }

    addLock(Long.valueOf(lsn), LockType.WRITE, result.lockGrant);
    addLogInfo(lsn);
  }
コード例 #19
0
ファイル: SharedLatch.java プロジェクト: prat0318/dbms
  /**
   * Release an exclusive or shared latch. If there are other thread(s) waiting for the latch, they
   * are woken up and granted the latch.
   */
  public void release() {
    try {
      if (isWriteLockedByCurrentThread()) {
        writeLock().unlock();
        /* Intentional side effect. */
        assert (noteLatch ? unNoteLatch() : true);
        return;
      }

      if (exclusiveOnly) {
        return;
      }

      boolean assertionsEnabled = false;
      assert assertionsEnabled = true;
      if (assertionsEnabled) {
        if (readers.remove(Thread.currentThread())) {
          readLock().unlock();
        } else {
          throw EnvironmentFailureException.unexpectedState("Latch not held: " + name);
        }
      } else {

        /*
         * There's no way to tell if a readlock is held by the current
         * thread so just try unlocking it.
         */
        readLock().unlock();
      }
      /* Intentional side effect. */
      assert (noteLatch ? unNoteLatch() : true);
    } catch (IllegalMonitorStateException IMSE) {
      IMSE.printStackTrace();
      return;
    }
  }
コード例 #20
0
 /** @see Loggable#getLogSize */
 @Override
 public int getLogSize() {
   throw EnvironmentFailureException.unexpectedState();
 }
コード例 #21
0
ファイル: NameIdPair.java プロジェクト: prat0318/dbms
 public void setId(int id) {
   if ((id != this.id) && !hasNullId()) {
     throw EnvironmentFailureException.unexpectedState("Id was already not null: " + this.id);
   }
   this.id = id;
 }
コード例 #22
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
 @Override
 @SuppressWarnings("unused")
 public long commit(Durability durability) {
   throw EnvironmentFailureException.unexpectedState(
       "Replay Txn abort semantics require use of internal commit api");
 }
コード例 #23
0
ファイル: NameIdPair.java プロジェクト: prat0318/dbms
 @Override
 @SuppressWarnings("unused")
 public void setId(int id) {
   throw EnvironmentFailureException.unexpectedState("Read only NameIdPair");
 }
コード例 #24
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
 @Override
 public void abort() {
   throw EnvironmentFailureException.unexpectedState(
       "Replay Txn abort semantics require use of internal abort api");
 }
コード例 #25
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
 @Override
 @SuppressWarnings("unused")
 public long abort(boolean forceFlush) {
   throw EnvironmentFailureException.unexpectedState(
       "Replay Txn abort semantics require use of internal abort api");
 }
コード例 #26
0
ファイル: ReplayTxn.java プロジェクト: geniot/elex
  /**
   * Rollback the changes to this txn's write locked nodes up to but not including the entry at the
   * specified matchpoint. When we log a transactional entry, we record the LSN of the original,
   * before-this-transaction version as the abort LSN. This means that if there are multiple updates
   * to a given record in a single transaction, each update only references that original version
   * and its true predecessor.
   *
   * <p>This was done to streamline abort processing, so that an undo reverts directly to the
   * original version rather than stepping through all the intermediates. The intermediates are
   * skipped. However, undo to a matchpoint may need to stop at an intermediate point, so we need to
   * create a true chain of versions.
   *
   * <p>To do so, we read the transaction backwards from the last logged LSN to reconstruct a
   * transaction chain that links intermediate versions of records. For example, suppose our
   * transaction looks like this and that we are undoing up to LSN 250
   *
   * <p>lsn=100 node=A (version 1) lsn=200 node=B (version 1) <-- matchpointLsn lsn=300 node=C
   * (version 1) lsn=400 node=A (version 2) lsn=500 node=B (version 2) lsn=600 node=A (version 3)
   * lsn=700 node=A (version 4)
   *
   * <p>To setup the old versions, We walk from LSN 700 -> 100 700 (A) rolls back to 600 600 (A)
   * rolls back to 400 500 (B) rolls back to 200 400 (A) rolls back to 100 300 (C) rolls back to an
   * empty slot (NULL_LSN).
   *
   * <p>A partial rollback also requires resetting the lastLoggedLsn field, because these operations
   * are no longer in the btree and their on-disk entries are no longer valid.
   *
   * <p>Lastly, the appropriate write locks must be released.
   *
   * @param matchpointLsn the rollback should go up to but not include this LSN.
   */
  private void undoWrites(long matchpointLsn, List<Long> rollbackLsns) throws DatabaseException {

    /*
     * Generate a map of nodeId->List of intermediate LSNs for this node.
     * to re-create the transaction chain.
     */
    TreeLocation location = new TreeLocation();
    Long undoLsn = lastLoggedLsn;
    TxnChain chain = new TxnChain(undoLsn, id, matchpointLsn, undoDatabases, envImpl);

    try {
      while ((undoLsn != DbLsn.NULL_LSN) && DbLsn.compareTo(undoLsn, matchpointLsn) > 0) {

        UndoReader undo = new UndoReader(envImpl, undoLsn, undoDatabases);

        RevertInfo revertTo = chain.pop();

        logFinest(undoLsn, undo, revertTo);

        /*
         * When we undo this log entry, we've logically truncated
         * it from the log. Remove it from the btree and mark it
         * obsolete.
         */
        RecoveryManager.rollbackUndo(logger, Level.FINER, undo, revertTo, location, undoLsn);

        countObsoleteInexact(undoLsn, undo);
        rollbackLsns.add(undoLsn);

        /*
         * Move on to the previous log entry for this txn and update
         * what is considered to be the end of the transaction chain.
         */
        undoLsn = undo.logEntry.getUserTxn().getLastLsn();
        lastLoggedLsn = undoLsn;
      }

      /*
       * Correct the fields which hold LSN and VLSN state that may
       * now be changed.
       */
      lastApplied = chain.getLastValidVLSN();
      if (!updateLoggedForTxn()) {
        firstLoggedLsn = NULL_LSN;
      }

    } catch (DatabaseException e) {
      LoggerUtils.traceAndLogException(
          envImpl, "Txn", "undo", "For LSN=" + DbLsn.getNoFormatString(undoLsn), e);
      throw e;
    } catch (RuntimeException e) {
      throw EnvironmentFailureException.unexpectedException(
          "Txn undo for LSN=" + DbLsn.getNoFormatString(undoLsn), e);
    }

    if (lastLoggedLsn == DbLsn.NULL_LSN) {
      /*
       * The whole txn is rolled back, and it may not appear again. This
       * is the equivalent of an abort. Do any delete processing for an
       * abort which is needed.
       *
       * Set database state for deletes before releasing any write
       * locks.
       */
      setDeletedDatabaseState(false);
    }

    /* Clear any write locks that are no longer needed. */
    clearWriteLocks(chain.getRemainingLockedNodes());
  }
コード例 #27
0
ファイル: LogFileFeeder.java プロジェクト: prat0318/dbms
  /**
   * Send files in response to request messages. The request sequence looks like the following:
   *
   * <p>[FileReq | StatReq]+ Done
   *
   * <p>The response sequence to a FileReq looks like:
   *
   * <p>FileStart <file byte stream> FileEnd
   *
   * <p>and that for a StatReq, is simply a StatResp
   */
  private void sendRequestedFiles(Protocol protocol)
      throws IOException, ProtocolException, DatabaseException {

    try {
      while (true) {
        FileReq fileReq = protocol.read(namedChannel.getChannel(), FileReq.class);
        final String fileName = fileReq.getFileName();

        /*
         * Calculate the full path for a specified log file name,
         * especially when this Feeder is configured to run with sub
         * directories.
         */
        FileManager fMgr = feederManager.getEnvImpl().getFileManager();
        File file = new File(fMgr.getFullFileName(fileName));

        if (!file.exists()) {
          throw EnvironmentFailureException.unexpectedState("Log file not found: " + fileName);
        }
        /* Freeze the length and last modified date. */
        final long length = file.length();
        final long lastModified = file.lastModified();
        byte digest[] = null;
        FileInfoResp resp = null;
        Protocol.FileInfoResp cachedResp = feederManager.statResponses.get(fileName);
        byte cachedDigest[] =
            ((cachedResp != null)
                    && (cachedResp.getFileLength() == length)
                    && (cachedResp.getLastModifiedTime() == lastModified))
                ? cachedResp.getDigestSHA1()
                : null;

        if (fileReq instanceof FileInfoReq) {
          if (cachedDigest != null) {
            digest = cachedDigest;
          } else if (((FileInfoReq) fileReq).getNeedSHA1()) {
            digest = getSHA1Digest(file, length).digest();
          } else {
            // Digest not requested
            digest = new byte[0];
          }
          resp = protocol.new FileInfoResp(fileName, length, lastModified, digest);
        } else {
          protocol.write(protocol.new FileStart(fileName, length, lastModified), namedChannel);
          digest = sendFileContents(file, length);
          if ((cachedDigest != null) && !Arrays.equals(cachedDigest, digest)) {
            throw EnvironmentFailureException.unexpectedState(
                "Inconsistent cached and computed digests");
          }
          resp = protocol.new FileEnd(fileName, length, lastModified, digest);
        }
        /* Cache for subsequent requests, if it was computed. */
        if (digest.length > 0) {
          feederManager.statResponses.put(fileName, resp);
        }
        protocol.write(resp, namedChannel);
      }
    } catch (ProtocolException pe) {
      if (pe.getUnexpectedMessage() instanceof Protocol.Done) {
        return;
      }
      throw pe;
    }
  }
コード例 #28
0
 /** @see Loggable#writeToLog */
 @Override
 public void writeToLog(ByteBuffer logBuffer) {
   throw EnvironmentFailureException.unexpectedState();
 }