コード例 #1
0
 /** Message contains a Command. Execute it against *this* object and return result. */
 @Override
 public void handle(Message req, org.jgroups.blocks.Response response) throws Exception {
   if (isValid(req)) {
     ReplicableCommand cmd = null;
     try {
       cmd =
           (ReplicableCommand)
               req_marshaller.objectFromBuffer(
                   req.getRawBuffer(), req.getOffset(), req.getLength());
       if (cmd == null)
         throw new NullPointerException("Unable to execute a null command!  Message was " + req);
       if (req.getSrc() instanceof SiteAddress) {
         executeCommandFromRemoteSite(cmd, req, response);
       } else {
         executeCommandFromLocalCluster(cmd, req, response);
       }
     } catch (InterruptedException e) {
       log.shutdownHandlingCommand(cmd);
       reply(response, new ExceptionResponse(new CacheException("Cache is shutting down")));
     } catch (Throwable x) {
       if (cmd == null) log.errorUnMarshallingCommand(x);
       else log.exceptionHandlingCommand(cmd, x);
       reply(response, new ExceptionResponse(new CacheException("Problems invoking command.", x)));
     }
   } else {
     reply(response, null);
   }
 }
  protected Object invalidateAcrossCluster(
      boolean synchronous,
      InvocationContext ctx,
      Object[] keys,
      boolean useFuture,
      final Object retvalForFuture)
      throws Throwable {
    if (!isLocalModeForced(ctx)) {
      // increment invalidations counter if statistics maintained
      incrementInvalidations();
      final InvalidateCommand command = commandsFactory.buildInvalidateCommand(keys);
      if (log.isDebugEnabled())
        log.debug("Cache [" + rpcManager.getTransport().getAddress() + "] replicating " + command);
      // voila, invalidated!
      if (useFuture) {
        NotifyingNotifiableFuture<Object> future = new NotifyingFutureImpl(retvalForFuture);
        rpcManager.broadcastRpcCommandInFuture(command, future);
        return future;
      } else {
        rpcManager.broadcastRpcCommand(command, synchronous, false);
      }
    }

    return retvalForFuture;
  }
コード例 #3
0
  public void enlist(Transaction transaction, LocalTransaction localTransaction) {
    if (!localTransaction.isEnlisted()) {
      SynchronizationAdapter sync =
          new SynchronizationAdapter(
              localTransaction,
              txCoordinator,
              commandsFactory,
              rpcManager,
              this,
              clusteringLogic,
              configuration);
      if (transactionSynchronizationRegistry != null) {
        try {
          transactionSynchronizationRegistry.registerInterposedSynchronization(sync);
        } catch (Exception e) {
          log.failedSynchronizationRegistration(e);
          throw new CacheException(e);
        }

      } else {

        try {
          transaction.registerSynchronization(sync);
        } catch (Exception e) {
          log.failedSynchronizationRegistration(e);
          throw new CacheException(e);
        }
      }
      ((SyncLocalTransaction) localTransaction).setEnlisted(true);
    }
  }
コード例 #4
0
  @Override
  protected boolean isProperWriter(InvocationContext ctx, FlagAffectedCommand command, Object key) {
    if (command.hasFlag(Flag.SKIP_OWNERSHIP_CHECK)) return true;

    if (loaderConfig.shared()) {
      if (!dm.getPrimaryLocation(key).equals(address)) {
        log.tracef(
            "Skipping cache store since the cache loader is shared "
                + "and the caller is not the first owner of the key %s",
            key);
        return false;
      }
    } else {
      if (isUsingLockDelegation && !command.hasFlag(Flag.CACHE_MODE_LOCAL)) {
        if (ctx.isOriginLocal() && !dm.getPrimaryLocation(key).equals(address)) {
          // The command will be forwarded back to the originator, and the value will be stored then
          // (while holding the lock on the primary owner).
          log.tracef(
              "Skipping cache store on the originator because it is not the primary owner "
                  + "of key %s",
              key);
          return false;
        }
      }
      if (!dm.getWriteConsistentHash().isKeyLocalToNode(address, key)) {
        log.tracef("Skipping cache store since the key is not local: %s", key);
        return false;
      }
    }
    return true;
  }
コード例 #5
0
  @Override
  public void markRehashCompleted(int viewId) throws InterruptedException {
    waitForJoinToStart();

    if (viewId < lastViewId) {
      if (trace)
        log.tracef(
            "Ignoring old rehash completed confirmation for view %d, last view is %d",
            viewId, lastViewId);
      return;
    }

    if (viewId > lastViewId) {
      throw new IllegalStateException(
          "Received rehash completed confirmation before confirming it ourselves");
    }

    if (trace)
      log.tracef(
          "Rehash completed on node %s, data container has %d keys",
          getSelf(), dataContainer.size());
    receivedRehashCompletedNotification = true;
    synchronized (rehashInProgressMonitor) {
      // we know for sure the rehash task is waiting for this confirmation, so the CH hasn't been
      // replaced
      if (trace) log.tracef("Updating last rehashed CH to %s", this.lastSuccessfulCH);
      lastSuccessfulCH = this.consistentHash;
      rehashInProgressMonitor.notifyAll();
    }
    joinCompletedLatch.countDown();
  }
コード例 #6
0
 public static void waitForRehashToComplete(Cache... caches) {
   // give it 1 second to start rehashing
   // TODO Should look at the last committed view instead and check if it contains all the caches
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   for (Cache c : caches) {
     CacheViewsManager cacheViewsManager =
         TestingUtil.extractGlobalComponent(c.getCacheManager(), CacheViewsManager.class);
     RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class);
     while (cacheViewsManager.getCommittedView(c.getName()).getMembers().size() != caches.length) {
       if (System.currentTimeMillis() > giveup) {
         String message =
             String.format(
                 "Timed out waiting for rehash to complete on node %s, expected member list is %s, current member list is %s!",
                 rpcManager.getAddress(),
                 Arrays.toString(caches),
                 cacheViewsManager.getCommittedView(c.getName()));
         log.error(message);
         throw new RuntimeException(message);
       }
       LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
     }
     log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
   }
 }
コード例 #7
0
ファイル: CacheImpl.java プロジェクト: oranheim/infinispan
  private Object executeCommandAndCommitIfNeeded(InvocationContext ctx, VisitableCommand command) {
    final boolean txInjected =
        ctx.isInTxScope() && ((TxInvocationContext) ctx).isImplicitTransaction();
    Object result;
    try {
      result = invoker.invoke(ctx, command);
    } catch (RuntimeException e) {
      if (txInjected) tryRollback();
      throw e;
    }

    if (txInjected) {
      if (trace)
        log.tracef("Committing transaction as it was implicit: %s", getOngoingTransaction());
      try {
        transactionManager.commit();
      } catch (Throwable e) {
        log.couldNotCompleteInjectedTransaction(e);
        tryRollback();
        throw new CacheException("Could not commit implicit transaction", e);
      }
    }

    return result;
  }
コード例 #8
0
    private void replayModificationsInTransaction(PrepareCommand command, boolean onePhaseCommit)
        throws Throwable {
      TransactionManager tm = txManager();
      boolean replaySuccessful = false;
      try {

        tm.begin();
        replayModifications(command);
        replaySuccessful = true;
      } finally {
        LocalTransaction localTx = txTable().getLocalTransaction(tm.getTransaction());
        if (localTx
            != null) { // possible for the tx to be null if we got an exception during applying
                       // modifications
          localTx.setFromRemoteSite(true);

          if (onePhaseCommit) {
            if (replaySuccessful) {
              log.tracef(
                  "Committing remotely originated tx %s as it is 1PC",
                  command.getGlobalTransaction());
              tm.commit();
            } else {
              log.tracef("Rolling back remotely originated tx %s", command.getGlobalTransaction());
              tm.rollback();
            }
          } else { // Wait for a remote commit/rollback.
            remote2localTx.put(command.getGlobalTransaction(), localTx.getGlobalTransaction());
            tm.suspend();
          }
        }
      }
    }
コード例 #9
0
  private FileEntry allocateExistingEntry(FileEntry free, int len) {
    int remainder = free.size - len;
    // If the entry is quite bigger than configured threshold, then split it
    if ((remainder >= SMALLEST_ENTRY_SIZE) && (len <= (free.size * fragmentationFactor))) {
      try {
        // Add remainder of the space as a fileEntry
        FileEntry newFreeEntry = new FileEntry(free.offset + len, remainder);
        addNewFreeEntry(newFreeEntry);
        FileEntry newEntry = new FileEntry(free.offset, len);
        if (trace)
          log.tracef(
              "Split entry at %d:%d, allocated %d:%d, free %d:%d, %d free entries",
              free.offset,
              free.size,
              newEntry.offset,
              newEntry.size,
              newFreeEntry.offset,
              newFreeEntry.size,
              freeList.size());
        return newEntry;
      } catch (IOException e) {
        throw new PersistenceException("Cannot add new free entry", e);
      }
    }

    if (trace)
      log.tracef(
          "Existing free entry allocated at %d:%d, %d free entries",
          free.offset, free.size, freeList.size());
    return free;
  }
コード例 #10
0
  @Override
  public void stop() {
    try {
      if (stopChannel && channel != null && channel.isOpen()) {
        log.disconnectAndCloseJGroups();

        // Unregistering before disconnecting/closing because
        // after that the cluster name is null
        if (globalStatsEnabled) {
          JmxConfigurator.unregisterChannel(
              (JChannel) channel, mbeanServer, domain, channel.getClusterName());
        }

        channel.disconnect();
        channel.close();
      }
    } catch (Exception toLog) {
      log.problemClosingChannel(toLog);
    }

    channel = null;
    if (dispatcher != null) {
      log.stoppingRpcDispatcher();
      dispatcher.stop();
    }

    members = Collections.emptyList();
    coordinator = null;
    isCoordinator = false;
    dispatcher = null;
  }
コード例 #11
0
 private Cache getCache(String regionName, String typeKey, Properties properties) {
   TypeOverrides regionOverride = typeOverrides.get(regionName);
   if (!definedConfigurations.contains(regionName)) {
     String templateCacheName = null;
     Configuration regionCacheCfg = null;
     if (regionOverride != null) {
       if (log.isDebugEnabled())
         log.debug("Cache region specific configuration exists: " + regionOverride);
       regionOverride = overrideStatisticsIfPresent(regionOverride, properties);
       regionCacheCfg = regionOverride.createInfinispanConfiguration();
       String cacheName = regionOverride.getCacheName();
       if (cacheName != null) // Region specific override with a given cache name
       templateCacheName = cacheName;
       else // Region specific override without cache name, so template cache name is generic for
            // data type.
       templateCacheName = typeOverrides.get(typeKey).getCacheName();
     } else {
       // No region specific overrides, template cache name is generic for data type.
       templateCacheName = typeOverrides.get(typeKey).getCacheName();
       regionCacheCfg = typeOverrides.get(typeKey).createInfinispanConfiguration();
     }
     // Configure transaction manager
     regionCacheCfg = configureTransactionManager(regionCacheCfg, templateCacheName, properties);
     // Apply overrides
     manager.defineConfiguration(regionName, templateCacheName, regionCacheCfg);
     definedConfigurations.add(regionName);
   }
   Cache cache = manager.getCache(regionName);
   if (!cache.getStatus().allowInvocations()) {
     cache.start();
   }
   return createCacheWrapper(cache.getAdvancedCache());
 }
コード例 #12
0
  protected void startJGroupsChannelIfNeeded() {
    if (startChannel) {
      String clusterName = configuration.getClusterName();
      try {
        channel.connect(clusterName);
      } catch (Exception e) {
        throw new CacheException("Unable to start JGroups Channel", e);
      }

      try {
        // Normally this would be done by CacheManagerJmxRegistration but
        // the channel is not started when the cache manager starts but
        // when first cache starts, so it's safer to do it here.
        globalStatsEnabled = configuration.isExposeGlobalJmxStatistics();
        if (globalStatsEnabled) {
          String groupName =
              String.format("type=channel,cluster=%s", ObjectName.quote(clusterName));
          mbeanServer = JmxUtil.lookupMBeanServer(configuration);
          domain = JmxUtil.buildJmxDomain(configuration, mbeanServer, groupName);
          JmxConfigurator.registerChannel(
              (JChannel) channel, mbeanServer, domain, clusterName, true);
        }
      } catch (Exception e) {
        throw new CacheException("Channel connected, but unable to register MBeans", e);
      }
    }
    address = fromJGroupsAddress(channel.getAddress());
    if (!startChannel) {
      // the channel was already started externally, we need to initialize our member list
      viewAccepted(channel.getView());
    }
    if (log.isInfoEnabled()) log.localAndPhysicalAddress(getAddress(), getPhysicalAddresses());
  }
コード例 #13
0
  private void invalidateAcrossCluster(boolean synchronous, Object[] keys, InvocationContext ctx)
      throws Throwable {
    // increment invalidations counter if statistics maintained
    incrementInvalidations();
    final InvalidateCommand invalidateCommand =
        commandsFactory.buildInvalidateCommand(InfinispanCollections.<Flag>emptySet(), keys);
    if (log.isDebugEnabled())
      log.debug("Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand);

    ReplicableCommand command = invalidateCommand;
    if (ctx.isInTxScope()) {
      TxInvocationContext txCtx = (TxInvocationContext) ctx;
      // A Prepare command containing the invalidation command in its 'modifications' list is sent
      // to the remote nodes
      // so that the invalidation is executed in the same transaction and locks can be acquired and
      // released properly.
      // This is 1PC on purpose, as an optimisation, even if the current TX is 2PC.
      // If the cache uses 2PC it's possible that the remotes will commit the invalidation and the
      // originator rolls back,
      // but this does not impact consistency and the speed benefit is worth it.
      command =
          commandsFactory.buildPrepareCommand(
              txCtx.getGlobalTransaction(),
              Collections.<WriteCommand>singletonList(invalidateCommand),
              true);
    }
    rpcManager.invokeRemotely(null, command, rpcManager.getDefaultRpcOptions(synchronous));
  }
コード例 #14
0
  @Override
  public void handleRebalanceCompleted(
      String cacheName, Address node, int topologyId, Throwable throwable, int viewId)
      throws Exception {
    if (throwable != null) {
      // TODO We could try to update the pending CH such that nodes reporting errors are not
      // considered to hold any state
      // For now we are just logging the error and proceeding as if the rebalance was successful
      // everywhere
      log.rebalanceError(cacheName, node, throwable);
    }

    CLUSTER.rebalanceCompleted(cacheName, node, topologyId);

    ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
    if (cacheStatus == null || !cacheStatus.isRebalanceInProgress()) {
      log.debugf(
          "Ignoring rebalance confirmation from %s "
              + "for cache %s because it doesn't have a cache status entry",
          node, cacheName);
      return;
    }

    cacheStatus.doConfirmRebalance(node, topologyId);
  }
コード例 #15
0
  @Override
  public void copyForUpdate(DataContainer container, boolean writeSkewCheck) {
    if (isChanged()) return; // already copied

    // mark entry as changed.
    setChanged();

    if (writeSkewCheck) {
      // check for write skew.
      InternalCacheEntry ice = container.get(key);
      Object actualValue = ice == null ? null : ice.getValue();

      // Note that this identity-check is intentional.  We don't *want* to call actualValue.equals()
      // since that defeats the purpose.
      // the implicit "versioning" we have in R_R creates a new wrapper "value" instance for every
      // update.
      if (actualValue != null && actualValue != value) {
        String errormsg =
            new StringBuilder()
                .append("Detected write skew on key [")
                .append(getKey())
                .append("].  Another process has changed the entry since we last read it!")
                .toString();
        if (log.isWarnEnabled()) log.warn(errormsg + ".  Unable to copy entry for update.");
        throw new CacheException(errormsg);
      }
    }
    // make a backup copy
    oldValue = value;
  }
コード例 #16
0
  public void cleanupTimedOutTransactions() {
    if (trace)
      log.tracef(
          "About to cleanup remote transactions older than %d ms",
          configuration.transaction().completedTxTimeout());
    long beginning = timeService.time();
    long cutoffCreationTime =
        beginning - TimeUnit.MILLISECONDS.toNanos(configuration.transaction().completedTxTimeout());
    List<GlobalTransaction> toKill = new ArrayList<>();

    // Check remote transactions.
    for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) {
      GlobalTransaction gtx = e.getKey();
      RemoteTransaction remoteTx = e.getValue();
      if (remoteTx != null) {
        if (trace) log.tracef("Checking transaction %s", gtx);
        // Check the time.
        if (remoteTx.getCreationTime() - cutoffCreationTime < 0) {
          long duration =
              timeService.timeDuration(
                  remoteTx.getCreationTime(), beginning, TimeUnit.MILLISECONDS);
          log.remoteTransactionTimeout(gtx, duration);
          toKill.add(gtx);
        }
      }
    }

    // Rollback the orphaned transactions and release any held locks.
    for (GlobalTransaction gtx : toKill) {
      killTransaction(gtx);
    }
  }
  private void addNewRunnables() throws InterruptedException {
    RunnableEntry addList;
    synchronized (this) {
      addList = newRunnables;
      newRunnables = null;
      if (addList == null && activeRunnables == null) {
        wait();
      }
    }

    if (addList == null) {
      return;
    } else if (activeRunnables == null) {
      activeRunnables = addList;
      if (log.isTraceEnabled()) {
        log.tracef("Adding pending tasks. Active=%s", count(activeRunnables));
      }
      return;
    }

    RunnableEntry last = activeRunnables;
    while (last.next != null) {
      last = last.next;
    }
    last.next = addList;
    if (log.isTraceEnabled()) {
      log.tracef("Adding pending tasks. Active=%s", count(activeRunnables));
    }
  }
コード例 #18
0
  @Stop(priority = 20)
  @Override
  public void stop() {
    if (trace) {
      log.tracef(
          "Shutting down StateConsumer of cache %s on node %s", cacheName, rpcManager.getAddress());
    }

    try {
      synchronized (this) {
        // cancel all inbound transfers
        taskQueue.clear();
        for (Iterator<List<InboundTransferTask>> it = transfersBySource.values().iterator();
            it.hasNext(); ) {
          List<InboundTransferTask> inboundTransfers = it.next();
          it.remove();
          for (InboundTransferTask inboundTransfer : inboundTransfers) {
            inboundTransfer.cancel();
          }
        }
        transfersBySource.clear();
        transfersBySegment.clear();
      }
    } catch (Throwable t) {
      log.errorf(
          t,
          "Failed to stop StateConsumer of cache %s on node %s",
          cacheName,
          rpcManager.getAddress());
    }
  }
コード例 #19
0
  @Override
  public final void store(InternalCacheEntry ed) throws CacheLoaderException {
    if (trace) {
      log.tracef("store(%s)", ed);
    }
    if (ed == null) {
      return;
    }
    if (ed.canExpire() && ed.isExpired(System.currentTimeMillis())) {
      if (containsKey(ed.getKey())) {
        if (trace) {
          log.tracef("Entry %s is expired!  Removing!", ed);
        }
        remove(ed.getKey());
      } else {
        if (trace) {
          log.tracef("Entry %s is expired!  Not doing anything.", ed);
        }
      }
      return;
    }

    L keyHashCode = getLockFromKey(ed.getKey());
    lockForWriting(keyHashCode);
    try {
      storeLockSafe(ed, keyHashCode);
    } finally {
      unlock(keyHashCode);
    }
    if (trace) {
      log.tracef("exit store(%s)", ed);
    }
  }
コード例 #20
0
 private List<TransactionInfo> getTransactions(
     Address source, Set<Integer> segments, int topologyId) {
   if (trace) {
     log.tracef(
         "Requesting transactions for segments %s of cache %s from node %s",
         segments, cacheName, source);
   }
   // get transactions and locks
   try {
     StateRequestCommand cmd =
         commandsFactory.buildStateRequestCommand(
             StateRequestCommand.Type.GET_TRANSACTIONS,
             rpcManager.getAddress(),
             topologyId,
             segments);
     Map<Address, Response> responses =
         rpcManager.invokeRemotely(
             Collections.singleton(source), cmd, ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, timeout);
     Response response = responses.get(source);
     if (response instanceof SuccessfulResponse) {
       return (List<TransactionInfo>) ((SuccessfulResponse) response).getResponseValue();
     }
     log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, null);
   } catch (CacheException e) {
     log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, e);
   }
   return null;
 }
コード例 #21
0
ファイル: CacheImpl.java プロジェクト: oranheim/infinispan
 /**
  * If this is a transactional cache and autoCommit is set to true then starts a transaction if
  * this is not a transactional call.
  */
 private InvocationContext getInvocationContextWithImplicitTransaction(
     EnumSet<Flag> explicitFlags, ClassLoader explicitClassLoader) {
   InvocationContext invocationContext;
   boolean txInjected = false;
   if (config.isTransactionalCache()) {
     Transaction transaction = getOngoingTransaction();
     if (transaction == null && config.isTransactionAutoCommit()) {
       try {
         transactionManager.begin();
         transaction = transactionManager.getTransaction();
         txInjected = true;
         if (trace) log.trace("Implicit transaction started!");
       } catch (Exception e) {
         throw new CacheException("Could not start transaction", e);
       }
     }
     invocationContext = getInvocationContext(transaction, explicitFlags, explicitClassLoader);
   } else {
     invocationContext = getInvocationContextForWrite(explicitFlags, explicitClassLoader);
   }
   if (txInjected) {
     ((TxInvocationContext) invocationContext).setImplicitTransaction(true);
     if (trace) log.tracef("Marked tx as implicit.");
   }
   return invocationContext;
 }
コード例 #22
0
  protected final void commitContextEntries(
      InvocationContext ctx, FlagAffectedCommand command, Metadata metadata) {
    final Flag stateTransferFlag = extractStateTransferFlag(ctx, command);

    if (stateTransferFlag == null) {
      // it is a normal operation
      stopStateTransferIfNeeded(command);
    }

    if (ctx instanceof SingleKeyNonTxInvocationContext) {
      SingleKeyNonTxInvocationContext singleKeyCtx = (SingleKeyNonTxInvocationContext) ctx;
      commitEntryIfNeeded(ctx, command, singleKeyCtx.getCacheEntry(), stateTransferFlag, metadata);
    } else {
      Set<Map.Entry<Object, CacheEntry>> entries = ctx.getLookedUpEntries().entrySet();
      Iterator<Map.Entry<Object, CacheEntry>> it = entries.iterator();
      final Log log = getLog();
      while (it.hasNext()) {
        Map.Entry<Object, CacheEntry> e = it.next();
        CacheEntry entry = e.getValue();
        if (!commitEntryIfNeeded(ctx, command, entry, stateTransferFlag, metadata)) {
          if (trace) {
            if (entry == null)
              log.tracef("Entry for key %s is null : not calling commitUpdate", toStr(e.getKey()));
            else
              log.tracef(
                  "Entry for key %s is not changed(%s): not calling commitUpdate",
                  toStr(e.getKey()), entry);
          }
        }
      }
    }
  }
コード例 #23
0
  @Override
  public void applyState(
      ConsistentHash consistentHash,
      Map<Object, InternalCacheValue> state,
      Address sender,
      int viewId)
      throws InterruptedException {
    waitForJoinToStart();

    if (viewId < lastViewId) {
      log.debugf(
          "Rejecting state pushed by node %s for old rehash %d (last view id is %d)",
          sender, viewId, lastViewId);
      return;
    }

    log.debugf("Applying new state from %s: received %d keys", sender, state.size());
    if (trace) log.tracef("Received keys: %s", state.keySet());
    int retryCount = 3; // in case we have issues applying state.
    Map<Object, InternalCacheValue> pendingApplications = state;
    for (int i = 0; i < retryCount; i++) {
      pendingApplications = applyStateMap(pendingApplications, true);
      if (pendingApplications.isEmpty()) break;
    }
    // one last go
    if (!pendingApplications.isEmpty()) applyStateMap(pendingApplications, false);

    if (trace) log.tracef("After applying state data container has %d keys", dataContainer.size());
  }
コード例 #24
0
 /** Same comment as for {@link #prepare(javax.transaction.xa.Xid)} applies for commit. */
 public void commit(Xid externalXid, boolean isOnePhase) throws XAException {
   Xid xid = convertXid(externalXid);
   LocalXaTransaction localTransaction = getLocalTransactionAndValidate(xid);
   if (trace)
     log.tracef(
         "Committing transaction %s. One phase? %s",
         localTransaction.getGlobalTransaction(), isOnePhase);
   if (isOnePhase && !configuration.isOnePhaseCommit()) {
     // isOnePhase being true means that we're the only participant in the distributed transaction
     // and TM does the
     // 1PC optimization. We run a 2PC though, as running only 1PC has a high chance of leaving the
     // cluster in
     // inconsistent state.
     try {
       txCoordinator.prepare(localTransaction);
       txCoordinator.commit(localTransaction, false);
     } catch (XAException e) {
       if (trace)
         log.tracef("Couldn't commit 1PC transaction %s, trying to rollback.", localTransaction);
       try {
         rollback(xid);
         throw new XAException(XAException.XA_HEURRB); // this is a heuristic rollback
       } catch (XAException e1) {
         log.couldNotRollbackPrepared1PcTransaction(localTransaction, e1);
         // inform the TM that a resource manager error has occurred in the transaction branch
         // (XAER_RMERR).
         throw new XAException(XAException.XAER_RMERR);
       }
     }
   } else {
     txCoordinator.commit(localTransaction, configuration.isOnePhaseCommit());
   }
   forgetSuccessfullyCompletedTransaction(recoveryManager, xid, localTransaction);
 }
コード例 #25
0
ファイル: Streams.java プロジェクト: n1hility/infinispan
  /**
   * Copy a limited number of bytes from the input stream to the output stream.
   *
   * @param input Stream to read bytes from.
   * @param output Stream to write bytes to.
   * @param buffer The buffer to use while copying.
   * @param length The maximum number of bytes to copy.
   * @return The total number of bytes copied.
   * @throws IOException Failed to copy bytes.
   */
  public static long copySome(
      final InputStream input, final OutputStream output, final byte buffer[], final long length)
      throws IOException {
    long total = 0;
    int read;
    int readLength;

    boolean trace = log.isTraceEnabled();

    // setup the initial readLength, if length is less than the buffer
    // size, then we only want to read that much
    readLength = Math.min((int) length, buffer.length);
    if (trace) {
      log.tracef("initial read length: %d", readLength);
    }

    while (readLength != 0 && (read = input.read(buffer, 0, readLength)) != -1) {
      if (trace) log.tracef("read bytes: %d", read);
      output.write(buffer, 0, read);
      total += read;
      if (trace) log.tracef("total bytes read: %d", total);

      // update the readLength
      readLength = Math.min((int) (length - total), buffer.length);
      if (trace) log.tracef("next read length: %d", readLength);
    }

    return total;
  }
コード例 #26
0
  public Xid[] recover(int flag) throws XAException {
    if (!configuration.isTransactionRecoveryEnabled()) {
      log.recoveryIgnored();
      return RecoveryManager.RecoveryIterator.NOTHING;
    }
    if (trace) log.trace("recover called: " + flag);

    if (isFlag(flag, TMSTARTRSCAN)) {
      recoveryIterator = recoveryManager.getPreparedTransactionsFromCluster();
      if (trace) log.tracef("Fetched a new recovery iterator: %s", recoveryIterator);
    }
    if (isFlag(flag, TMENDRSCAN)) {
      if (trace) log.trace("Flushing the iterator");
      return recoveryIterator.all();
    } else {
      // as per the spec: "TMNOFLAGS this flag must be used when no other flags are specified."
      if (!isFlag(flag, TMSTARTRSCAN) && !isFlag(flag, TMNOFLAGS))
        throw new IllegalArgumentException(
            "TMNOFLAGS this flag must be used when no other flags are specified."
                + " Received "
                + flag);
      return recoveryIterator.hasNext()
          ? recoveryIterator.next()
          : RecoveryManager.RecoveryIterator.NOTHING;
    }
  }
コード例 #27
0
  public NotifyingNotifiableFuture<Object> flushCache(
      Collection<Object> keys, Object retval, Address origin) {
    if (trace) log.tracef("Invalidating L1 caches for keys %s", keys);

    NotifyingNotifiableFuture<Object> future = new AggregatingNotifyingFutureImpl(retval, 2);

    Collection<Address> invalidationAddresses = buildInvalidationAddressList(keys, origin);

    int nodes = invalidationAddresses.size();

    if (nodes > 0) {
      // No need to invalidate at all if there is no one to invalidate!
      boolean multicast = isUseMulticast(nodes);

      if (trace)
        log.tracef(
            "There are %s nodes involved in invalidation. Threshold is: %s; using multicast: %s",
            nodes, threshold, multicast);

      if (multicast) {
        if (trace) log.tracef("Invalidating keys %s via multicast", keys);
        InvalidateCommand ic = commandsFactory.buildInvalidateFromL1Command(origin, false, keys);
        rpcManager.broadcastRpcCommandInFuture(ic, future);
      } else {
        InvalidateCommand ic = commandsFactory.buildInvalidateFromL1Command(origin, false, keys);

        // Ask the caches who have requested from us to remove
        if (trace) log.tracef("Keys %s needs invalidation on %s", keys, invalidationAddresses);
        rpcManager.invokeRemotelyInFuture(
            invalidationAddresses, ic, true, future, rpcTimeout, true);
        return future;
      }
    } else if (trace) log.trace("No L1 caches to invalidate");
    return future;
  }
コード例 #28
0
 @Override
 public final void run() {
   try {
     state.waitForStart();
   } catch (InterruptedException e1) {
     state.errorManage(e1);
     return;
   }
   try {
     beforeLoop();
   } catch (IOException e) {
     log.error("unexpected error", e);
     state.errorManage(e);
   }
   while (!state.needToQuit()) {
     try {
       testLoop();
     } catch (Exception e) {
       log.error("unexpected error", e);
       state.errorManage(e);
     }
   }
   try {
     cleanup();
   } catch (IOException e) {
     log.error("unexpected error", e);
     state.errorManage(e);
   }
 }
コード例 #29
0
  /**
   * This method calculates the minimum view ID known by the current node. This method is only used
   * in a clustered cache, and only invoked when either a view change is detected, or a transaction
   * whose view ID is not the same as the current view ID.
   *
   * <p>This method is guarded by minViewRecalculationLock to prevent concurrent updates to the
   * minimum view ID field.
   *
   * @param idOfRemovedTransaction the view ID associated with the transaction that triggered this
   *     recalculation, or -1 if triggered by a view change event.
   */
  @GuardedBy("minViewRecalculationLock")
  private void calculateMinViewId(int idOfRemovedTransaction) {
    minViewRecalculationLock.lock();
    try {
      // We should only need to re-calculate the minimum view ID if the transaction being completed
      // has the same ID as the smallest known transaction ID, to check what the new smallest is.
      // We do this check
      // again here, since this is now within a synchronized method.
      if (idOfRemovedTransaction == -1
          || (idOfRemovedTransaction == minTxViewId && idOfRemovedTransaction < currentViewId)) {
        int minViewIdFound = currentViewId;

        for (CacheTransaction ct : localTransactions.values()) {
          int viewId = ct.getViewId();
          if (viewId < minViewIdFound) minViewIdFound = viewId;
        }
        for (CacheTransaction ct : remoteTransactions.values()) {
          int viewId = ct.getViewId();
          if (viewId < minViewIdFound) minViewIdFound = viewId;
        }
        if (minViewIdFound > minTxViewId) {
          log.tracef("Changing minimum view ID from %s to %s", minTxViewId, minViewIdFound);
          minTxViewId = minViewIdFound;
        } else {
          log.tracef("Minimum view ID still is %s; nothing to change", minViewIdFound);
        }
      }
    } finally {
      minViewRecalculationLock.unlock();
    }
  }
コード例 #30
0
  public void cleanupCompletedTransactions() {
    if (!completedTransactions.isEmpty()) {
      try {
        log.tracef(
            "About to cleanup completed transaction. Initial size is %d",
            completedTransactions.size());
        // this iterator is weekly consistent and will never throw ConcurrentModificationException
        Iterator<Map.Entry<GlobalTransaction, Long>> iterator =
            completedTransactions.entrySet().iterator();
        long timeout = configuration.transaction().completedTxTimeout();

        int removedEntries = 0;
        long beginning = timeService.time();
        while (iterator.hasNext()) {
          Map.Entry<GlobalTransaction, Long> e = iterator.next();
          long ageMillis = timeService.timeDuration(e.getValue(), TimeUnit.MILLISECONDS);
          if (ageMillis >= timeout) {
            iterator.remove();
            removedEntries++;
          }
        }
        long duration = timeService.timeDuration(beginning, TimeUnit.MILLISECONDS);

        log.tracef(
            "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, "
                + "current number of completed transactions is %d",
            removedEntries, duration, completedTransactions.size());
      } catch (Exception e) {
        log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage());
      }
    }
  }