/** This method retains both ohOldValue and ohNewValue */ @Retained(OffHeapIdentifier.TEST_OFF_HEAP_REGION_BASE_LISTENER) private void setEventData(EntryEvent e) { close(); EntryEventImpl event = (EntryEventImpl) e; this.ohOldValue = event.getOffHeapOldValue(); this.ohNewValue = event.getOffHeapNewValue(); }
@Override public boolean destroy( LocalRegion region, EntryEventImpl event, boolean inTokenMode, boolean cacheWrite, @Unretained Object expectedOldValue, boolean forceDestroy, boolean removeRecoveredEntry) throws CacheWriterException, EntryNotFoundException, TimeoutException, RegionClearedException { Object key = event.getKey(); if (key instanceof CompactCompositeRegionKey) { byte[] keyBytes = ((CompactCompositeRegionKey) key).snapshotKeyFromValue(false); if (keyBytes != null) { this._setRawKey(keyBytes); } } return super.destroy( region, event, inTokenMode, cacheWrite, expectedOldValue, forceDestroy, removeRecoveredEntry); }
private HDFSGatewayEventImpl getNewEvent( Object key, Object value, Region r1, int bid, int tailKey) throws Exception { EntryEventImpl ev1 = EntryEventImpl.create( (LocalRegion) r1, Operation.CREATE, key, value, null, false, (DistributedMember) c.getMyId()); ev1.setEventId(new EventID(this.c.getDistributedSystem())); HDFSGatewayEventImpl event = null; event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, ev1, null, true, bid); event.setShadowKey((long) tailKey); return event; }
private byte[] getBytes(Object o) { if (o instanceof byte[]) { return (byte[]) o; } if (o instanceof CachedDeserializable) { return ((CachedDeserializable) o).getSerializedValue(); } else { return EntryEventImpl.serialize(o); } }
private void checkQueueGet(String key, KeyValue expected, String... entries) throws Exception { PartitionAttributesFactory paf = new PartitionAttributesFactory(); paf.setTotalNumBuckets(1); RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION); PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1"); // create the buckets r1.put("blah", "blah"); // hack to get the queue. HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c); HDFSBucketRegionQueue brq = (HDFSBucketRegionQueue) ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0); int seq = 0; for (String s : entries) { if (s.equals("roll")) { brq.rolloverSkipList(); } else { String[] kv = s.split("-"); hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++)); } } byte[] bkey = EntryEventImpl.serialize(key); HDFSGatewayEventImpl evt = hopqueue.get(r1, bkey, 0); if (expected == null) { assertNull(evt); } else { assertEquals(expected.key, evt.getKey()); assertEquals(expected.value, evt.getDeserializedValue()); } }
@Override public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException, InterruptedException { Part regionNamePart = null, keyPart = null, callbackArgPart = null; String regionName = null; Object callbackArg = null, key = null; Part eventPart = null; CachedRegionHelper crHelper = servConn.getCachedRegionHelper(); CacheServerStats stats = servConn.getCacheServerStats(); servConn.setAsTrue(REQUIRES_RESPONSE); { long oldStart = start; start = DistributionStats.getStatTime(); stats.incReadInvalidateRequestTime(start - oldStart); } // Retrieve the data from the message parts regionNamePart = msg.getPart(0); keyPart = msg.getPart(1); eventPart = msg.getPart(2); // callbackArgPart = null; (redundant assignment) if (msg.getNumberOfParts() > 3) { callbackArgPart = msg.getPart(3); try { callbackArg = callbackArgPart.getObject(); } catch (Exception e) { writeException(msg, e, false, servConn); servConn.setAsTrue(RESPONDED); return; } } regionName = regionNamePart.getString(); try { key = keyPart.getStringOrObject(); } catch (Exception e) { writeException(msg, e, false, servConn); servConn.setAsTrue(RESPONDED); return; } if (logger.fineEnabled()) { logger.fine( servConn.getName() + ": Received invalidate request (" + msg.getPayloadLength() + " bytes) from " + servConn.getSocketString() + " for region " + regionName + " key " + key); } // Process the invalidate request if (key == null || regionName == null) { StringBuilder errMessage = new StringBuilder(); if (key == null) { if (logger.warningEnabled()) { logger.warning( LocalizedStrings.BaseCommand__THE_INPUT_KEY_FOR_THE_0_REQUEST_IS_NULL, "invalidate"); } errMessage.append( LocalizedStrings.BaseCommand__THE_INPUT_KEY_FOR_THE_0_REQUEST_IS_NULL.toLocalizedString( "invalidate")); } if (regionName == null) { if (logger.warningEnabled()) { logger.warning( LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL, "invalidate"); } errMessage.append( LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL .toLocalizedString("invalidate")); } writeErrorResponse(msg, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), servConn); servConn.setAsTrue(RESPONDED); } else { LocalRegion region = (LocalRegion) crHelper.getRegion(regionName); if (region == null) { String reason = LocalizedStrings.BaseCommand__0_WAS_NOT_FOUND_DURING_1_REQUEST.toLocalizedString( regionName, "invalidate"); writeRegionDestroyedEx(msg, regionName, reason, servConn); servConn.setAsTrue(RESPONDED); } else { // Invalidate the entry ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm()); long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer); long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer); EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId); Breadcrumbs.setEventId(eventId); VersionTag tag = null; try { /* * * txtodo: doesn't seem like there is any notion of authzInvalidate */ AuthorizeRequest authzRequest = servConn.getAuthzRequest(); if (authzRequest != null) { InvalidateOperationContext invalidateContext = authzRequest.invalidateAuthorize(regionName, key, callbackArg); callbackArg = invalidateContext.getCallbackArg(); } EntryEventImpl clientEvent = new EntryEventImpl(eventId); // msg.isRetry might be set by v7.0 and later clients if (msg.isRetry()) { // if (logger.fineEnabled()) { // logger.fine("DEBUG: encountered isRetry in Invalidate"); // } clientEvent.setPossibleDuplicate(true); if (region.getAttributes().getConcurrencyChecksEnabled()) { // recover the version tag from other servers clientEvent.setRegion(region); if (!recoverVersionTagForRetriedOperation(clientEvent)) { clientEvent.setPossibleDuplicate(false); // no-one has seen this event } } } region.basicBridgeInvalidate(key, callbackArg, servConn.getProxyID(), true, clientEvent); tag = clientEvent.getVersionTag(); servConn.setModificationInfo(true, regionName, key); } catch (EntryNotFoundException e) { // Don't send an exception back to the client if this // exception happens. Just log it and continue. if (logger.infoEnabled()) { logger.info( LocalizedStrings.BaseCommand_DURING_0_NO_ENTRY_WAS_FOUND_FOR_KEY_1, new Object[] {"invalidate", key}); } } catch (RegionDestroyedException rde) { writeException(msg, rde, false, servConn); servConn.setAsTrue(RESPONDED); return; } catch (Exception e) { // If an interrupted exception is thrown , rethrow it checkForInterrupt(servConn, e); // If an exception occurs during the destroy, preserve the connection writeException(msg, e, false, servConn); servConn.setAsTrue(RESPONDED); if (e instanceof GemFireSecurityException) { // Fine logging for security exceptions since these are already // logged by the security logger if (logger.fineEnabled()) logger.fine(servConn.getName() + ": Unexpected Security exception", e); } else if (logger.warningEnabled()) { logger.warning( LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION, servConn.getName(), e); } return; } // Update the statistics and write the reply { long oldStart = start; start = DistributionStats.getStatTime(); stats.incProcessInvalidateTime(start - oldStart); } if (region instanceof PartitionedRegion) { PartitionedRegion pr = (PartitionedRegion) region; if (pr.isNetworkHop() != (byte) 0) { writeReplyWithRefreshMetadata(msg, servConn, pr, pr.isNetworkHop(), tag); pr.setIsNetworkHop((byte) 0); pr.setMetadataVersion(Byte.valueOf((byte) 0)); } else { writeReply(msg, servConn, tag); } } else { writeReply(msg, servConn, tag); } servConn.setAsTrue(RESPONDED); if (logger.fineEnabled()) { logger.fine( servConn.getName() + ": Sent invalidate response for region " + regionName + " key " + key); } stats.incWriteInvalidateResponseTime(DistributionStats.getStatTime() - start); } } }
public void testHopQueueWithOneBucket() throws Exception { this.c.close(); this.c = createCache(); PartitionAttributesFactory paf = new PartitionAttributesFactory(); paf.setTotalNumBuckets(1); RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION); PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1"); r1.put("K9", "x1"); r1.put("K8", "x2"); // hack to get the queue. HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c); HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue) ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0); EntryEventImpl ev1 = EntryEventImpl.create( (LocalRegion) r1, Operation.CREATE, (Object) "K1", (Object) "V1", null, false, (DistributedMember) c.getMyId()); // put some keys with multiple updates. hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2)); hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8)); hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7)); hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3)); hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6)); hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9)); assertTrue( " skip list size should be 6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6); // peek a key. it should be the lowesy Object[] l = hopqueue.peek(1, 0).toArray(); assertTrue( "First key should be K1 but is " + ((HDFSGatewayEventImpl) l[0]).getKey(), ((HDFSGatewayEventImpl) l[0]).getKey().equals("K1")); assertTrue( " Peeked skip list size should be 0 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6); assertTrue( " skip list size should be 6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0); // try to fetch the key. it would be in peeked skip list but still available Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0); assertTrue("First key should be K1", ((HDFSGatewayEventImpl) o).getKey().equals("K1")); assertTrue( " skip lists size should be 6", (getSortedEventQueue(hdfsBQ).getPeeked().size() + getSortedEventQueue(hdfsBQ).currentSkipList.size()) == 6); o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0); Object v = ((HDFSGatewayEventImpl) o).getDeserializedValue(); assertTrue(" key should K2 with value V2a but the value was " + v, ((String) v).equals("V2a")); o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0); v = ((HDFSGatewayEventImpl) o).getDeserializedValue(); assertTrue(" key should K3 with value V3b but the value was " + v, ((String) v).equals("V3b")); }