private void testStoredEntry(
     InternalCacheEntry entry,
     Object expectedValue,
     long expectedLifespan,
     String src,
     Object key) {
   assert entry != null : src + " entry for key " + key + " should NOT be null";
   assert entry.getValue().equals(expectedValue)
       : src
           + " should contain value "
           + expectedValue
           + " under key "
           + entry.getKey()
           + " but was "
           + entry.getValue()
           + ". Entry is "
           + entry;
   assert entry.getLifespan() == expectedLifespan
       : src
           + " expected lifespan for key "
           + key
           + " to be "
           + expectedLifespan
           + " but was "
           + entry.getLifespan()
           + ". Entry is "
           + entry;
 }
コード例 #2
0
ファイル: CacheImpl.java プロジェクト: oranheim/infinispan
 public void compact() {
   for (InternalCacheEntry e : dataContainer) {
     if (e.getKey() instanceof MarshalledValue) {
       ((MarshalledValue) e.getKey()).compact(true, true);
     }
     if (e.getValue() instanceof MarshalledValue) {
       ((MarshalledValue) e.getValue()).compact(true, true);
     }
   }
 }
コード例 #3
0
  private Object realRemoteGet(
      InvocationContext ctx, Object key, boolean storeInL1, boolean isWrite) throws Throwable {
    if (trace) log.tracef("Doing a remote get for key %s", key);

    boolean acquireRemoteLock = false;
    if (ctx.isInTxScope()) {
      TxInvocationContext txContext = (TxInvocationContext) ctx;
      acquireRemoteLock =
          isWrite && isPessimisticCache && !txContext.getAffectedKeys().contains(key);
    }
    // attempt a remote lookup
    InternalCacheEntry ice = dm.retrieveFromRemoteSource(key, ctx, acquireRemoteLock);

    if (acquireRemoteLock) {
      ((TxInvocationContext) ctx).addAffectedKey(key);
    }

    if (ice != null) {
      if (storeInL1) {
        if (isL1CacheEnabled) {
          if (trace) log.tracef("Caching remotely retrieved entry for key %s in L1", key);
          // This should be fail-safe
          try {
            long lifespan =
                ice.getLifespan() < 0
                    ? configuration.getL1Lifespan()
                    : Math.min(ice.getLifespan(), configuration.getL1Lifespan());
            PutKeyValueCommand put =
                cf.buildPutKeyValueCommand(
                    ice.getKey(), ice.getValue(), lifespan, -1, ctx.getFlags());
            lockAndWrap(ctx, key, ice);
            invokeNextInterceptor(ctx, put);
          } catch (Exception e) {
            // Couldn't store in L1 for some reason.  But don't fail the transaction!
            log.infof("Unable to store entry %s in L1 cache", key);
            log.debug("Inability to store in L1 caused by", e);
          }
        } else {
          CacheEntry ce = ctx.lookupEntry(key);
          if (ce == null || ce.isNull() || ce.isLockPlaceholder() || ce.getValue() == null) {
            if (ce != null && ce.isChanged()) {
              ce.setValue(ice.getValue());
            } else {
              if (isWrite) lockAndWrap(ctx, key, ice);
              else ctx.putLookedUpEntry(key, ice);
            }
          }
        }
      } else {
        if (trace) log.tracef("Not caching remotely retrieved entry for key %s in L1", key);
      }
      return ice.getValue();
    }
    return null;
  }
コード例 #4
0
  /**
   * In case of a remotely originating transactions we don't have a chance to visit the single
   * commands but receive this "batch". We then need the before-apply snapshot of some types to
   * route the cleanup commands to the correct indexes. Note we don't need to visit the
   * CommitCommand as the indexing context is registered as a transaction sync.
   */
  @Override
  public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command)
      throws Throwable {
    final WriteCommand[] writeCommands = command.getModifications();
    final Object[] stateBeforePrepare = new Object[writeCommands.length];

    for (int i = 0; i < writeCommands.length; i++) {
      final WriteCommand writeCommand = writeCommands[i];
      if (writeCommand instanceof PutKeyValueCommand) {
        InternalCacheEntry internalCacheEntry =
            dataContainer.get(((PutKeyValueCommand) writeCommand).getKey());
        stateBeforePrepare[i] = internalCacheEntry != null ? internalCacheEntry.getValue() : null;
      } else if (writeCommand instanceof PutMapCommand) {
        stateBeforePrepare[i] = getPreviousValues(((PutMapCommand) writeCommand).getMap().keySet());
      } else if (writeCommand instanceof RemoveCommand) {
        InternalCacheEntry internalCacheEntry =
            dataContainer.get(((RemoveCommand) writeCommand).getKey());
        stateBeforePrepare[i] = internalCacheEntry != null ? internalCacheEntry.getValue() : null;
      } else if (writeCommand instanceof ReplaceCommand) {
        InternalCacheEntry internalCacheEntry =
            dataContainer.get(((ReplaceCommand) writeCommand).getKey());
        stateBeforePrepare[i] = internalCacheEntry != null ? internalCacheEntry.getValue() : null;
      }
    }

    final Object toReturn = super.visitPrepareCommand(ctx, command);

    if (ctx.isTransactionValid()) {
      final TransactionContext transactionContext = makeTransactionalEventContext();
      for (int i = 0; i < writeCommands.length; i++) {
        final WriteCommand writeCommand = writeCommands[i];
        if (writeCommand instanceof PutKeyValueCommand) {
          processPutKeyValueCommand(
              (PutKeyValueCommand) writeCommand, ctx, stateBeforePrepare[i], transactionContext);
        } else if (writeCommand instanceof PutMapCommand) {
          processPutMapCommand(
              (PutMapCommand) writeCommand,
              ctx,
              (Map<Object, Object>) stateBeforePrepare[i],
              transactionContext);
        } else if (writeCommand instanceof RemoveCommand) {
          processRemoveCommand(
              (RemoveCommand) writeCommand, ctx, stateBeforePrepare[i], transactionContext);
        } else if (writeCommand instanceof ReplaceCommand) {
          processReplaceCommand(
              (ReplaceCommand) writeCommand, ctx, stateBeforePrepare[i], transactionContext);
        } else if (writeCommand instanceof ClearCommand) {
          processClearCommand((ClearCommand) writeCommand, ctx, transactionContext);
        }
      }
    }
    return toReturn;
  }
コード例 #5
0
  public void testEvictBeforeRead()
      throws CacheLoaderException, ExecutionException, InterruptedException {
    cache = cacheManager.getCache();
    cache.put("a", "b");
    assert cache.get("a").equals("b");
    CacheLoader cl = TestingUtil.getCacheLoader(cache);
    assert cl != null;
    InternalCacheEntry se = cl.load("a");
    assert se != null;
    assert se.getValue().equals("b");

    // clear the cache
    cache.getAdvancedCache().withFlags(SKIP_CACHE_STORE).clear();

    se = cl.load("a");
    assert se != null;
    assert se.getValue().equals("b");

    // now attempt a concurrent get and evict.
    ExecutorService e = Executors.newFixedThreadPool(1);
    sdi.enabled = true;

    log.info("test::doing the get");

    // call the get
    Future<String> future =
        e.submit(
            new Callable<String>() {
              public String call() throws Exception {
                return (String) cache.get("a");
              }
            });

    // now run the evict.
    log.info("test::before the evict");
    cache.evict("a");
    log.info("test::after the evict");

    // make sure the get call, which would have gone past the cache loader interceptor first, gets
    // the correct value.
    assert future.get().equals("b");

    // disable the SlowDownInterceptor
    sdi.enabled = false;

    // and check that the key actually has been evicted
    assert !TestingUtil.extractComponent(cache, DataContainer.class).containsKey("a");

    e.shutdownNow();
  }
コード例 #6
0
 @Override
 protected String toString(InternalCacheEntry ice) {
   if (ice == null) return null;
   StringBuilder sb = new StringBuilder(256);
   sb.append(ice.getClass().getSimpleName());
   sb.append("[key=").append(ice.getKey()).append(", value=").append(ice.getValue());
   sb.append(", created=").append(ice.getCreated()).append(", isCreated=").append(ice.isCreated());
   sb.append(", lastUsed=")
       .append(ice.getLastUsed())
       .append(", isChanged=")
       .append(ice.isChanged());
   sb.append(", expires=")
       .append(ice.getExpiryTime())
       .append(", isExpired=")
       .append(ice.isExpired(System.currentTimeMillis()));
   sb.append(", canExpire=")
       .append(ice.canExpire())
       .append(", isEvicted=")
       .append(ice.isEvicted());
   sb.append(", isRemoved=").append(ice.isRemoved()).append(", isValid=").append(ice.isValid());
   sb.append(", lifespan=")
       .append(ice.getLifespan())
       .append(", maxIdle=")
       .append(ice.getMaxIdle());
   return sb.append(']').toString();
 }
コード例 #7
0
    @Override
    protected void commitSingleEntry(
        CacheEntry entry,
        Metadata metadata,
        FlagAffectedCommand command,
        InvocationContext ctx,
        Flag trackFlag,
        boolean l1Invalidation) {
      // Cache flags before they're reset
      // TODO: Can the reset be done after notification instead?
      boolean created = entry.isCreated();
      boolean removed = entry.isRemoved();
      boolean expired;
      if (removed && entry instanceof MVCCEntry) {
        expired = ((MVCCEntry) entry).isExpired();
      } else {
        expired = false;
      }

      InternalCacheEntry previousEntry = dataContainer.peek(entry.getKey());
      Object previousValue = null;
      Metadata previousMetadata = null;
      if (previousEntry != null) {
        previousValue = previousEntry.getValue();
        previousMetadata = previousEntry.getMetadata();
      }
      commitManager.commit(entry, metadata, trackFlag, l1Invalidation);

      // Notify after events if necessary
      notifyCommitEntry(
          created, removed, expired, entry, ctx, command, previousValue, previousMetadata);
    }
コード例 #8
0
  public void testStateTransfer() throws Exception {
    // Insert initial data in the cache
    Set<Object> keys = new HashSet<Object>();
    for (int i = 0; i < NUM_KEYS; i++) {
      Object key = "key" + i;
      keys.add(key);
      cache(0).put(key, key);
    }

    log.trace("State transfer happens here");
    // add a third node
    addClusterEnabledCacheManager(dccc);
    waitForClusterToForm();

    log.trace("Checking the values from caches...");
    int keysOnJoiner = 0;
    for (Object key : keys) {
      log.tracef("Checking key: %s", key);
      // check them directly in data container
      InternalCacheEntry d0 = advancedCache(0).getDataContainer().get(key);
      InternalCacheEntry d1 = advancedCache(1).getDataContainer().get(key);
      InternalCacheEntry d2 = advancedCache(2).getDataContainer().get(key);
      assertEquals(key, d0.getValue());
      assertNull(d1);
      if (d2 != null) {
        keysOnJoiner++;
      }
    }

    assertTrue("The joiner should receive at least one key", keysOnJoiner > 0);
  }
コード例 #9
0
 private void assertContainerEntry(
     Class<? extends InternalCacheEntry> type, String expectedValue) {
   assert dc.containsKey("k");
   InternalCacheEntry entry = dc.get("k");
   assertEquals(type, entry.getClass());
   assertEquals(expectedValue, entry.getValue());
 }
コード例 #10
0
 private Object getValue(Object key) {
   log.tracef("Checking key: %s", key);
   InternalCacheEntry d0 = advancedCache(0).getDataContainer().get(key);
   InternalCacheEntry d1 = advancedCache(1).getDataContainer().get(key);
   InternalCacheEntry d2 = advancedCache(2).getDataContainer().get(key);
   if (d0 == null) {
     assert sameValue(d1, d2);
     return d1.getValue();
   } else if (d1 == null) {
     assert sameValue(d0, d2);
     return d0.getValue();
   } else if (d2 == null) {
     assert sameValue(d0, d1);
     return d0.getValue();
   }
   throw new RuntimeException();
 }
コード例 #11
0
 private Map<Object, Object> getPreviousValues(Set<Object> keySet) {
   HashMap<Object, Object> previousValues = new HashMap<Object, Object>();
   for (Object key : keySet) {
     InternalCacheEntry internalCacheEntry = dataContainer.get(key);
     Object previousValue = internalCacheEntry != null ? internalCacheEntry.getValue() : null;
     previousValues.put(key, previousValue);
   }
   return previousValues;
 }
コード例 #12
0
 private Object remoteGet(InvocationContext ctx, Object key, GetKeyValueCommand command)
     throws Throwable {
   if (trace) log.tracef("Doing a remote get for key %s", key);
   InternalCacheEntry ice = dm.retrieveFromRemoteSource(key, ctx, false, command);
   command.setRemotelyFetchedValue(ice);
   if (ice != null) {
     return ice.getValue();
   }
   return null;
 }
コード例 #13
0
  @Override
  public void notifyCacheEntriesEvicted(
      Collection<InternalCacheEntry<? extends K, ? extends V>> entries,
      InvocationContext ctx,
      FlagAffectedCommand command) {
    if (!entries.isEmpty()) {
      if (isNotificationAllowed(command, cacheEntriesEvictedListeners)) {
        EventImpl<K, V> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED);
        Map<K, V> evictedKeysAndValues =
            transformCollectionToMap(
                entries,
                new InfinispanCollections.MapMakerFunction<
                    K, V, InternalCacheEntry<? extends K, ? extends V>>() {
                  @Override
                  public Map.Entry<K, V> transform(
                      final InternalCacheEntry<? extends K, ? extends V> input) {
                    return new Map.Entry<K, V>() {
                      @Override
                      public K getKey() {
                        return input.getKey();
                      }

                      @Override
                      public V getValue() {
                        return input.getValue();
                      }

                      @Override
                      public V setValue(V value) {
                        throw new UnsupportedOperationException();
                      }
                    };
                  }
                });

        e.setEntries(evictedKeysAndValues);
        for (CacheEntryListenerInvocation<K, V> listener : cacheEntriesEvictedListeners)
          listener.invoke(e);
      }

      // For backward compat
      if (isNotificationAllowed(command, cacheEntryEvictedListeners)) {
        for (InternalCacheEntry<? extends K, ? extends V> ice : entries) {
          EventImpl<K, V> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED);
          e.setKey(ice.getKey());
          e.setValue(ice.getValue());
          boolean isLocalNodePrimaryOwner =
              clusteringDependentLogic.localNodeIsPrimaryOwner(ice.getKey());
          for (CacheEntryListenerInvocation<K, V> listener : cacheEntryEvictedListeners)
            listener.invoke(e, isLocalNodePrimaryOwner);
        }
      }
    }
  }
コード例 #14
0
 private MVCCEntry wrapInternalCacheEntryForPut(
     InvocationContext ctx, Object key, InternalCacheEntry cacheEntry) {
   MVCCEntry mvccEntry =
       createWrappedEntry(
           key,
           cacheEntry.getValue(),
           cacheEntry.getVersion(),
           false,
           false,
           cacheEntry.getLifespan());
   ctx.putLookedUpEntry(key, mvccEntry);
   return mvccEntry;
 }
コード例 #15
0
 private void commitClearCommand(
     DataContainer<Object, Object> dataContainer,
     ClearCacheEntry<Object, Object> cacheEntry,
     InvocationContext context,
     FlagAffectedCommand command) {
   List<InternalCacheEntry<Object, Object>> copyEntries =
       new ArrayList<>(dataContainer.entrySet());
   cacheEntry.commit(dataContainer, null);
   for (InternalCacheEntry entry : copyEntries) {
     notifier.notifyCacheEntryRemoved(
         entry.getKey(), entry.getValue(), entry.getMetadata(), false, context, command);
   }
 }
コード例 #16
0
    @Override
    public boolean contains(Object o) {
      if (!(o instanceof Map.Entry)) {
        return false;
      }

      @SuppressWarnings("rawtypes")
      Map.Entry e = (Map.Entry) o;
      InternalCacheEntry ice = entries.get(e.getKey());
      if (ice == null) {
        return false;
      }
      return ice.getValue().equals(e.getValue());
    }
コード例 #17
0
 @Test
 public void testSkip() throws IOException, CacheLoaderException {
   final long expected = (DATA_SIZE - 1);
   final byte[] data = randomData(DATA_SIZE);
   when(mockEntry.getValue()).thenReturn(data);
   when(mockStore.load(MOCK_FIRST_CHUNK)).thenReturn(mockEntry);
   final long actual = testObj.skip(expected);
   assertEquals(expected, actual);
   verify(mockStore).load(anyString());
   verify(mockStore).load(MOCK_FIRST_CHUNK);
   verify(mockEntry).getValue();
   assertTrue(testObj.read() > -1);
   assertEquals(-1, testObj.read());
 }
コード例 #18
0
 /**
  * Attempts to the L1 update and set the value. If the L1 update was marked as being skipped this
  * will instead just set the value to release blockers. A null value can be provided which will
  * not run the L1 update but will just alert other waiters that a null was given.
  */
 public void runL1UpdateIfPossible(InternalCacheEntry ice) {
   Object value = null;
   try {
     if (ice != null) {
       value = ice.getValue();
       Object key;
       if (sync.attemptUpdateToRunning() && !dc.containsKey((key = ice.getKey()))) {
         // Acquire the transfer lock to ensure that we don't have a rehash and change to become an
         // owner,
         // note we check the ownership in following if
         stateTransferLock.acquireSharedTopologyLock();
         try {
           // Now we can update the L1 if there isn't a value already there and we haven't now
           // become a write
           // owner
           if (!dc.containsKey(key) && !cdl.localNodeIsOwner(key)) {
             log.tracef("Caching remotely retrieved entry for key %s in L1", key);
             long lifespan =
                 ice.getLifespan() < 0 ? l1Lifespan : Math.min(ice.getLifespan(), l1Lifespan);
             // Make a copy of the metadata stored internally, adjust
             // lifespan/maxIdle settings and send them a modification
             Metadata newMetadata =
                 ice.getMetadata().builder().lifespan(lifespan).maxIdle(-1).build();
             dc.put(key, ice.getValue(), newMetadata);
           } else {
             log.tracef("Data container contained value after rehash for key %s", key);
           }
         } finally {
           stateTransferLock.releaseSharedTopologyLock();
         }
       }
     }
   } finally {
     sync.innerSet(value);
   }
 }
コード例 #19
0
 @Override
 public CacheEntry wrapEntryForDelta(InvocationContext ctx, Object deltaKey, Delta delta)
     throws InterruptedException {
   CacheEntry cacheEntry = getFromContext(ctx, deltaKey);
   DeltaAwareCacheEntry deltaAwareEntry = null;
   if (cacheEntry != null) {
     deltaAwareEntry = wrapEntryForDelta(ctx, deltaKey, cacheEntry);
   } else {
     InternalCacheEntry ice = getFromContainer(deltaKey);
     if (ice != null) {
       deltaAwareEntry = newDeltaAwareCacheEntry(ctx, deltaKey, (DeltaAware) ice.getValue());
     }
   }
   if (deltaAwareEntry != null) deltaAwareEntry.appendDelta(delta);
   return deltaAwareEntry;
 }
コード例 #20
0
 public Object getValueAt(int rowIndex, int columnIndex) {
   if (data.size() > rowIndex) {
     InternalCacheEntry e = data.get(rowIndex);
     switch (columnIndex) {
       case 0:
         return e.getKey();
       case 1:
         return e.getValue();
       case 2:
         return e.getLifespan();
       case 3:
         return e.getMaxIdle();
     }
   }
   return "NULL!";
 }
コード例 #21
0
  @Override
  public void notifyCacheEntriesEvicted(
      Collection<InternalCacheEntry> entries, InvocationContext ctx, FlagAffectedCommand command) {
    if (!entries.isEmpty()) {
      if (isNotificationAllowed(command, cacheEntriesEvictedListeners)) {
        EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED);
        Map<Object, Object> evictedKeysAndValues =
            transformCollectionToMap(
                entries,
                new InfinispanCollections.MapMakerFunction<Object, Object, InternalCacheEntry>() {
                  @Override
                  public Map.Entry<Object, Object> transform(final InternalCacheEntry input) {
                    return new Map.Entry<Object, Object>() {
                      @Override
                      public Object getKey() {
                        return input.getKey();
                      }

                      @Override
                      public Object getValue() {
                        return input.getValue();
                      }

                      @Override
                      public Object setValue(Object value) {
                        throw new UnsupportedOperationException();
                      }
                    };
                  }
                });

        e.setEntries(evictedKeysAndValues);
        for (ListenerInvocation listener : cacheEntriesEvictedListeners) listener.invoke(e);
      }

      // For backward compat
      if (isNotificationAllowed(command, cacheEntryEvictedListeners)) {
        for (InternalCacheEntry ice : entries) {
          EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED);
          e.setKey(ice.getKey());
          e.setValue(ice.getValue());
          for (ListenerInvocation listener : cacheEntryEvictedListeners) listener.invoke(e);
        }
      }
    }
  }
コード例 #22
0
ファイル: ExpiryTest.java プロジェクト: nmldiegues/stibt
  public void testIdleExpiryInPut() throws InterruptedException {
    Cache<String, String> cache = cm.getCache();
    long idleTime = IDLE_TIMEOUT;
    cache.put("k", "v", -1, MILLISECONDS, idleTime, MILLISECONDS);

    DataContainer dc = cache.getAdvancedCache().getDataContainer();
    InternalCacheEntry se = dc.get("k", null);
    assert se.getKey().equals("k");
    assert se.getValue().equals("v");
    assert se.getLifespan() == -1;
    assert se.getMaxIdle() == idleTime;
    assert !se.isExpired();
    assert cache.get("k").equals("v");
    Thread.sleep(idleTime + 100);
    assert se.isExpired();
    assert cache.get("k") == null;
  }
コード例 #23
0
 @Test
 public void testAvailable() throws IOException, CacheLoaderException {
   final byte[] data = randomData(DATA_SIZE);
   when(mockEntry.getValue()).thenReturn(data);
   when(mockStore.load(MOCK_FIRST_CHUNK)).thenReturn(mockEntry);
   assertEquals(0, testObj.available());
   final int partition = 435;
   testObj.skip(partition);
   // part of the first buffer remains
   assertEquals(DATA_SIZE - partition, testObj.available());
   testObj.skip(DATA_SIZE - partition);
   // none of the first buffer remains
   assertEquals(0, testObj.available());
   testObj.skip(1);
   // no buffers remain
   assertEquals(-1, testObj.available());
 }
コード例 #24
0
 @SuppressWarnings("unchecked")
 protected <KIn, KOut> KOut loadValueFromCacheLoader(KIn key) {
   KOut value = null;
   CacheLoader cl = resolveCacheLoader();
   if (cl != null) {
     try {
       InternalCacheEntry entry = cl.load(key);
       if (entry != null) {
         Object loadedValue = entry.getValue();
         if (loadedValue instanceof MarshalledValue) {
           value = (KOut) ((MarshalledValue) loadedValue).get();
         } else {
           value = (KOut) loadedValue;
         }
       }
     } catch (CacheLoaderException e) {
       throw new CacheException("Could not load key/value entries from cacheloader", e);
     }
   }
   return value;
 }
コード例 #25
0
 @Test
 public void testBufferedRead() throws IOException, CacheLoaderException {
   final byte[] data = randomData(DATA_SIZE);
   when(mockEntry.getValue()).thenReturn(data);
   when(mockStore.load(anyString())).thenReturn(mockEntry).thenReturn(mockEntry).thenReturn(null);
   final int partition = 234;
   final int expected = (DATA_SIZE - partition);
   final byte[] buffer = new byte[DATA_SIZE];
   long actual = testObj.read(buffer, 0, expected);
   // can read less than a block of data
   assertEquals(expected, actual);
   // will not load the next chunk if more data is available
   actual = testObj.read(buffer, 0, DATA_SIZE);
   assertEquals(partition, actual);
   actual = testObj.read(buffer, 0, DATA_SIZE);
   // will load the next chunk if no data is available
   assertEquals(DATA_SIZE, actual);
   // and will report the end of the data accurately
   actual = testObj.read(buffer, 0, DATA_SIZE);
   assertEquals(-1, actual);
 }
コード例 #26
0
 @Test
 public void testSkipMultipleBuffers() throws IOException, CacheLoaderException {
   final byte[] data = randomData(DATA_SIZE);
   when(mockEntry.getValue()).thenReturn(data);
   when(mockStore.load(anyString())).thenReturn(mockEntry).thenReturn(mockEntry).thenReturn(null);
   long expected = (DATA_SIZE);
   // ask for more than the buffer
   long actual = testObj.skip(DATA_SIZE + 1);
   // we should skip only one complete buffer
   assertEquals(expected, actual);
   // ok, skip all but the last byte remaining
   expected = (DATA_SIZE - 1);
   actual = testObj.skip(expected);
   // new buffer, mostly skipped
   assertEquals(expected, actual);
   // we should still have 1 more byte
   assertTrue(testObj.read() > -1);
   // but only the one
   assertEquals(-1, testObj.read());
   // and we only had two cacheEntries
   verify(mockEntry, times(2)).getValue();
 }
コード例 #27
0
  public void testEntrySetWithEvictedEntriesAndFlags() {
    final int numKeys = 300;
    for (int i = 0; i < numKeys; i++) {
      cache.put(i, i);
    }

    AdvancedCache<Object, Object> flagCache =
        cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD);
    DataContainer dc = flagCache.getDataContainer();
    assertFalse("Data Container should not have all keys", numKeys == dc.size());
    Set<Map.Entry<Object, Object>> entrySet = flagCache.entrySet();
    assertEquals(dc.size(), entrySet.size());

    Set<InternalCacheEntry> entries = dc.entrySet();
    Map<Object, Object> map = new HashMap<Object, Object>(entrySet.size());
    for (Map.Entry<Object, Object> entry : entrySet) {
      map.put(entry.getKey(), entry.getValue());
    }

    for (InternalCacheEntry entry : entries) {
      assertEquals("Key/Value mismatch!", entry.getValue(), map.get(entry.getKey()));
    }
  }
コード例 #28
0
  private void doApplyState(
      Address sender, int segmentId, Collection<InternalCacheEntry> cacheEntries) {
    log.debugf(
        "Applying new state for segment %d of cache %s from node %s: received %d cache entries",
        segmentId, cacheName, sender, cacheEntries.size());
    if (trace) {
      List<Object> keys = new ArrayList<Object>(cacheEntries.size());
      for (InternalCacheEntry e : cacheEntries) {
        keys.add(e.getKey());
      }
      log.tracef(
          "Received keys %s for segment %d of cache %s from node %s",
          keys, segmentId, cacheName, sender);
    }

    // CACHE_MODE_LOCAL avoids handling by StateTransferInterceptor and any potential locks in
    // StateTransferLock
    EnumSet<Flag> flags =
        EnumSet.of(
            PUT_FOR_STATE_TRANSFER,
            CACHE_MODE_LOCAL,
            IGNORE_RETURN_VALUES,
            SKIP_REMOTE_LOOKUP,
            SKIP_SHARED_CACHE_STORE,
            SKIP_OWNERSHIP_CHECK,
            SKIP_XSITE_BACKUP);
    for (InternalCacheEntry e : cacheEntries) {
      try {
        InvocationContext ctx;
        if (transactionManager != null) {
          // cache is transactional
          transactionManager.begin();
          Transaction transaction = transactionManager.getTransaction();
          ctx = icc.createInvocationContext(transaction);
          ((TxInvocationContext) ctx).setImplicitTransaction(true);
        } else {
          // non-tx cache
          ctx = icc.createSingleKeyNonTxInvocationContext();
        }

        PutKeyValueCommand put =
            useVersionedPut
                ? commandsFactory.buildVersionedPutKeyValueCommand(
                    e.getKey(),
                    e.getValue(),
                    e.getLifespan(),
                    e.getMaxIdle(),
                    e.getVersion(),
                    flags)
                : commandsFactory.buildPutKeyValueCommand(
                    e.getKey(), e.getValue(), e.getLifespan(), e.getMaxIdle(), flags);

        boolean success = false;
        try {
          interceptorChain.invoke(ctx, put);
          success = true;
        } finally {
          if (ctx.isInTxScope()) {
            if (success) {
              ((LocalTransaction) ((TxInvocationContext) ctx).getCacheTransaction())
                  .setFromStateTransfer(true);
              try {
                transactionManager.commit();
              } catch (Throwable ex) {
                log.errorf(
                    ex,
                    "Could not commit transaction created by state transfer of key %s",
                    e.getKey());
                if (transactionManager.getTransaction() != null) {
                  transactionManager.rollback();
                }
              }
            } else {
              transactionManager.rollback();
            }
          }
        }
      } catch (Exception ex) {
        log.problemApplyingStateForKey(ex.getMessage(), e.getKey(), ex);
      }
    }
    log.debugf("Finished applying state for segment %d of cache %s", segmentId, cacheName);
  }
コード例 #29
0
  private void testLockMigration(int nodeThatPuts, boolean commit) throws Exception {
    Map<Object, Transaction> key2Tx = new HashMap<Object, Transaction>();
    for (int i = 0; i < NUM_KEYS; i++) {
      Object key = getKeyForCache(0);
      if (key2Tx.containsKey(key)) continue;

      // put a key to have some data in cache
      cache(nodeThatPuts).put(key, key);

      // start a TX that locks the key and then we suspend it
      tm(nodeThatPuts).begin();
      Transaction tx = tm(nodeThatPuts).getTransaction();
      advancedCache(nodeThatPuts).lock(key);
      tm(nodeThatPuts).suspend();
      key2Tx.put(key, tx);

      assertLocked(0, key);
    }

    log.trace("Lock transfer happens here");

    // add a third node hoping that some of the previously created keys will be migrated to it
    addClusterEnabledCacheManager(dccc);
    waitForClusterToForm();

    // search for a key that was migrated to third node and the suspended TX that locked it
    Object migratedKey = null;
    Transaction migratedTransaction = null;
    ConsistentHash consistentHash = advancedCache(2).getDistributionManager().getConsistentHash();
    for (Object key : key2Tx.keySet()) {
      if (consistentHash.locatePrimaryOwner(key).equals(address(2))) {
        migratedKey = key;
        migratedTransaction = key2Tx.get(key);
        log.trace("Migrated key = " + migratedKey);
        log.trace(
            "Migrated transaction = "
                + ((DummyTransaction) migratedTransaction).getEnlistedResources());
        break;
      }
    }

    // we do not focus on the other transactions so we commit them now
    log.trace("Committing all transactions except the migrated one.");
    for (Object key : key2Tx.keySet()) {
      if (!key.equals(migratedKey)) {
        Transaction tx = key2Tx.get(key);
        tm(nodeThatPuts).resume(tx);
        tm(nodeThatPuts).commit();
      }
    }

    if (migratedKey == null) {
      // this could happen in extreme cases
      log.trace("No key migrated to new owner - test cannot be performed!");
    } else {
      // the migrated TX is resumed and committed or rolled back. we expect the migrated key to be
      // unlocked now
      tm(nodeThatPuts).resume(migratedTransaction);
      if (commit) {
        tm(nodeThatPuts).commit();
      } else {
        tm(nodeThatPuts).rollback();
      }

      // there should not be any locks
      assertNotLocked(cache(0), migratedKey);
      assertNotLocked(cache(1), migratedKey);
      assertNotLocked(cache(2), migratedKey);

      // if a new TX tries to write to the migrated key this should not fail, the key should not be
      // locked
      tm(nodeThatPuts).begin();
      cache(nodeThatPuts)
          .put(
              migratedKey,
              "someValue"); // this should not result in TimeoutException due to key still locked
      tm(nodeThatPuts).commit();
    }

    log.trace("Checking the values from caches...");
    for (Object key : key2Tx.keySet()) {
      log.tracef("Checking key: %s", key);
      Object expectedValue = key;
      if (key.equals(migratedKey)) {
        expectedValue = "someValue";
      }
      // check them directly in data container
      InternalCacheEntry d0 = advancedCache(0).getDataContainer().get(key);
      InternalCacheEntry d1 = advancedCache(1).getDataContainer().get(key);
      InternalCacheEntry d2 = advancedCache(2).getDataContainer().get(key);
      int c = 0;
      if (d0 != null && !d0.isExpired(TIME_SERVICE.wallClockTime())) {
        assertEquals(expectedValue, d0.getValue());
        c++;
      }
      if (d1 != null && !d1.isExpired(TIME_SERVICE.wallClockTime())) {
        assertEquals(expectedValue, d1.getValue());
        c++;
      }
      if (d2 != null && !d2.isExpired(TIME_SERVICE.wallClockTime())) {
        assertEquals(expectedValue, d2.getValue());
        c++;
      }
      assertEquals(1, c);

      // look at them also via cache API
      assertEquals(expectedValue, cache(0).get(key));
      assertEquals(expectedValue, cache(1).get(key));
      assertEquals(expectedValue, cache(2).get(key));
    }
  }
コード例 #30
0
 private boolean sameValue(InternalCacheEntry d1, InternalCacheEntry d2) {
   return d1.getValue().equals(d2.getValue());
 }