Пример #1
0
  boolean isUsedInPartitioning() {
    boolean ok = false;
    // TODO: Asif: Handle this case where region is turning out to be null
    // Ideally Bug 39923 workaround should ensure that region is not null. But we are doing
    // this check only for Update type statements. For Select queries, it may stll be null,
    // hence the check

    if (this.tqi == null) {
      return ok;
    }
    Region rgnOwningColumn = this.tqi.getRegion();
    assert rgnOwningColumn != null;
    RegionAttributes ra = rgnOwningColumn.getAttributes();
    // If the region is a Replicated Region or if it is a PR with just
    // itself
    // as a member then we should go with Derby's Activation Object
    DataPolicy policy = ra.getDataPolicy();

    if (policy.withPartitioning()) {
      PartitionedRegion pr = (PartitionedRegion) rgnOwningColumn;
      GfxdPartitionResolver rslvr = (GfxdPartitionResolver) pr.getPartitionResolver();
      ok = rslvr != null && rslvr.isUsedInPartitioning(this.actualColumnName);
    }
    return ok;
  }
Пример #2
0
 @Override
 public ArrayList<HashMap> doExecute(Map<String, Object> args) {
   Cache cache = CacheFactory.getAnyInstance();
   String regionPath = (String) args.get(ApiConstant.REGIONPATH);
   Region region = cache.getRegion(regionPath);
   ArrayList<HashMap> list = new ArrayList<HashMap>();
   if (region != null && region instanceof PartitionedRegion) {
     DistributedMember member = cache.getDistributedSystem().getDistributedMember();
     PartitionedRegion pr = (PartitionedRegion) region;
     if (pr.getDataStore() != null) {
       Set<BucketRegion> set2 = pr.getDataStore().getAllLocalBucketRegions();
       for (BucketRegion br : set2) {
         HashMap map = new HashMap();
         map.put("BucketId", br.getId());
         map.put("Size", br.size());
         map.put("Bytes", br.getTotalBytes());
         map.put("host", getHost());
         map.put("node", System.getProperty("NODE_NAME"));
         InternalDistributedMember m = pr.getBucketPrimary(br.getId());
         if (m != null && member.getId().equals(m.getId())) {
           map.put("type", "primary");
         } else {
           map.put("type", "redundant");
         }
         map.put("TotalNumBuckets", pr.getPartitionAttributes().getTotalNumBuckets());
         list.add(map);
       }
     }
   }
   return list;
 }
 @Override
 protected void process(DistributionManager dm) {
   try {
     PartitionedRegion pr = PartitionedRegion.getPRFromId(this.prId);
     for (Map.Entry<Integer, BucketAdvisor.BucketProfile> profile : this.profiles.entrySet()) {
       pr.getRegionAdvisor().putBucketProfile(profile.getKey(), profile.getValue());
     }
   } catch (PRLocallyDestroyedException fre) {
     if (logger.isDebugEnabled()) logger.debug("<region locally destroyed> ///{}", this);
   } catch (RegionDestroyedException e) {
     if (logger.isDebugEnabled()) logger.debug("<region destroyed> ///{}", this);
   } catch (CancelException e) {
     if (logger.isDebugEnabled()) logger.debug("<cache closed> ///{}", this);
   } catch (VirtualMachineError err) {
     SystemFailure.initiateFailure(err);
     // If this ever returns, rethrow the error.  We're poisoned
     // now, so don't let this thread continue.
     throw err;
   } catch (Throwable ignore) {
     // Whenever you catch Error or Throwable, you must also
     // catch VirtualMachineError (see above).  However, there is
     // _still_ a possibility that you are dealing with a cascading
     // error condition, so you also need to check to see if the JVM
     // is still usable:
     SystemFailure.checkFailure();
   } finally {
     if (this.processorId != 0) {
       ReplyMessage.send(getSender(), this.processorId, null, dm);
     }
   }
 }
 public NewLRUClockHand(
     Object region, EnableLRU ccHelper, InternalRegionArguments internalRegionArgs) {
   setBucketRegion(region);
   this.lock = new HeadLock();
   // behavior relies on a single evicted node in the pipe when the pipe is empty.
   initHeadAndTail();
   if (this.bucketRegion != null) {
     this.stats =
         internalRegionArgs.getPartitionedRegion() != null
             ? internalRegionArgs.getPartitionedRegion().getEvictionController().stats
             : null;
   } else {
     LRUStatistics tmp = null;
     if (region instanceof PlaceHolderDiskRegion) {
       tmp = ((PlaceHolderDiskRegion) region).getPRLRUStats();
     } else if (region instanceof PartitionedRegion) {
       tmp = ((PartitionedRegion) region).getPRLRUStatsDuringInitialization(); // bug 41938
       PartitionedRegion pr = (PartitionedRegion) region;
       if (tmp != null) {
         pr.getEvictionController().stats = tmp;
       }
     }
     if (tmp == null) {
       StatisticsFactory sf = GemFireCacheImpl.getExisting("").getDistributedSystem();
       tmp = ccHelper.initStats(region, sf);
     }
     this.stats = tmp;
   }
 }
  public void removeIndexes(Region region) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    // removing indexes on paritioned region will reguire sending message and
    // remvoing all the local indexes on the local bucket regions.
    if (region instanceof PartitionedRegion) {
      try {
        // not remotely orignated
        ((PartitionedRegion) region).removeIndexes(false);
      } catch (ForceReattemptException ex) {
        // will have to throw a proper exception relating to remove index.
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.DefaultQueryService_EXCEPTION_REMOVING_INDEX___0),
            ex);
      }
    }
    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) return;

    indexManager.removeIndexes();
  }
  public void removeIndex(Index index) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    Region region = index.getRegion();
    if (region instanceof PartitionedRegion) {
      try {
        ((PartitionedRegion) region).removeIndex(index, false);
      } catch (ForceReattemptException ex) {
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.DefaultQueryService_EXCEPTION_REMOVING_INDEX___0),
            ex);
      }
      return;
    }
    // get write lock for indexes in replicated region
    // for PR lock will be taken in PartitionRegion.removeIndex
    ((AbstractIndex) index).acquireIndexWriteLockForRemove();
    try {
      IndexManager indexManager = ((LocalRegion) index.getRegion()).getIndexManager();
      indexManager.removeIndex(index);
    } finally {
      ((AbstractIndex) index).releaseIndexWriteLockForRemove();
    }
  }
Пример #7
0
  public Object evaluate(ExecutionContext context) throws RegionNotFoundException {
    Region rgn;
    Cache cache = context.getCache();
    // do PR bucketRegion substitution here for expressions that evaluate to a Region.
    PartitionedRegion pr = context.getPartitionedRegion();

    if (pr != null && pr.getFullPath().equals(this.regionPath)) {
      rgn = context.getBucketRegion();
    } else if (pr != null) {
      // Asif : This is a   very tricky solution to allow equijoin queries on PartitionedRegion
      // locally
      // We have possibly got a situation of equijoin. it may be across PRs. so use the context's
      // bucket region
      // to get ID and then retrieve the this region's bucket region
      BucketRegion br = context.getBucketRegion();
      int bucketID = br.getId();
      // Is current region a partitioned region
      rgn = cache.getRegion(this.regionPath);
      if (rgn.getAttributes().getDataPolicy().withPartitioning()) {
        // convert it into bucket region.
        PartitionedRegion prLocal = (PartitionedRegion) rgn;
        rgn = prLocal.getDataStore().getLocalBucketById(bucketID);
      }

    } else {
      rgn = cache.getRegion(this.regionPath);
    }

    if (rgn == null) {
      // if we couldn't find the region because the cache is closed, throw
      // a CacheClosedException
      if (cache.isClosed()) {
        throw new CacheClosedException();
      }
      throw new RegionNotFoundException(
          LocalizedStrings.CompiledRegion_REGION_NOT_FOUND_0.toLocalizedString(this.regionPath));
    }

    if (context.isCqQueryContext()) {
      return new QRegion(rgn, true, context);
    } else {
      return new QRegion(rgn, false, context);
    }
  }
  public static RegionMBeanBridge getInstance(Region region) {

    if (region.getAttributes().getPartitionAttributes() != null) {

      RegionMBeanBridge bridge = new PartitionedRegionBridge(region);
      PartitionedRegion parRegion = ((PartitionedRegion) region);
      DiskStoreImpl dsi = parRegion.getDiskStore();
      if (dsi != null) {
        DiskRegionStats stats = parRegion.getDiskRegionStats();

        DiskRegionBridge diskRegionBridge = new DiskRegionBridge(stats);
        bridge.addDiskRegionBridge(diskRegionBridge);

        for (DirectoryHolder dh : dsi.getDirectoryHolders()) {
          diskRegionBridge.addDirectoryStats(dh.getDiskDirectoryStats());
        }

        bridge.addDiskRegionBridge(diskRegionBridge);
      }

      return bridge;

    } else {
      RegionMBeanBridge bridge = new RegionMBeanBridge(region);

      LocalRegion localRegion = ((LocalRegion) region);
      DiskStoreImpl dsi = localRegion.getDiskStore();
      if (dsi != null) {
        DiskRegionBridge diskRegionBridge =
            new DiskRegionBridge(localRegion.getDiskRegion().getStats());
        bridge.addDiskRegionBridge(diskRegionBridge);

        for (DirectoryHolder dh : dsi.getDirectoryHolders()) {
          diskRegionBridge.addDirectoryStats(dh.getDiskDirectoryStats());
        }
      }
      return bridge;
    }
  }
  private void checkQueueGet(String key, KeyValue expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
      }
    }

    byte[] bkey = EntryEventImpl.serialize(key);
    HDFSGatewayEventImpl evt = hopqueue.get(r1, bkey, 0);
    if (expected == null) {
      assertNull(evt);

    } else {
      assertEquals(expected.key, evt.getKey());
      assertEquals(expected.value, evt.getDeserializedValue());
    }
  }
Пример #10
0
  /**
   * Initializes without children.
   *
   * @param region The region from which RegionInfo is extracted.
   */
  @SuppressWarnings({"unchecked", "rawtypes"})
  private void init(Region region) {
    if (region == null) {
      return;
    }
    DistributedMember member =
        CacheFactory.getAnyInstance().getDistributedSystem().getDistributedMember();
    setName(region.getName());
    setFullPath(region.getFullPath());
    GemfireRegionAttributeInfo attrInfo = new GemfireRegionAttributeInfo();
    RegionAttributes<?, ?> attr = region.getAttributes();
    attrInfo.setAttribute(GemfireRegionAttributeInfo.DATA_POLICY, attr.getDataPolicy().toString());
    attrInfo.setAttribute(GemfireRegionAttributeInfo.SCOPE, attr.getScope().toString());
    if (region instanceof PartitionedRegion) {
      PartitionedRegion pr = (PartitionedRegion) region;
      PartitionAttributes pattr = pr.getPartitionAttributes();
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.LOCAL_MAX_MEMORY, pattr.getLocalMaxMemory() + "");
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.REDUNDANT_COPIES, pattr.getRedundantCopies() + "");
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.TOTAL_MAX_MEMORY, pattr.getTotalMaxMemory() + "");
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.TOTAL_NUM_BUCKETS, pattr.getTotalNumBuckets() + "");

      // data store is null if it's a proxy, i.e., LOCAL_MAX_MEMORY=0
      if (pr.getDataStore() != null) {
        Set<BucketRegion> localtBuketSet = pr.getDataStore().getAllLocalBucketRegions();
        List<BucketInfo> primaryList = new ArrayList<BucketInfo>();
        List<BucketInfo> redundantList = new ArrayList<BucketInfo>();
        this.size = 0;
        for (BucketRegion br : localtBuketSet) {
          BucketInfo bucketInfo =
              new GemfireBucketInfo(
                  br.getId(), br.getBucketAdvisor().isPrimary(), br.size(), br.getTotalBytes());
          //					InternalDistributedMember m = pr.getBucketPrimary(br.getId());
          //					if (m.getId().equals(member.getId())) {
          if (bucketInfo.isPrimary()) {
            primaryList.add(bucketInfo);
            this.size += bucketInfo.getSize();
          } else {
            redundantList.add(bucketInfo);
          }
        }
        Collections.sort(primaryList);
        Collections.sort(redundantList);
        setPrimaryBucketInfoList(primaryList);
        setRedundantBucketInfoList(redundantList);
      }
    } else {
      this.size = region.size();
    }
    setAttrInfo(attrInfo);
    temporalType = GemfireTemporalManager.getTemporalType(region);
    if (region.isDestroyed() == false && region.isEmpty() == false) {
      Set<Map.Entry> regionEntrySet = region.entrySet();
      for (Map.Entry entry : regionEntrySet) {
        Object key = entry.getKey();
        Object value = entry.getValue();
        keyTypeName = key.getClass().getName();
        valueTypeName = value.getClass().getName();
        break;
      }
    }
  }
Пример #11
0
  @Override
  public void cmdExecute(Message msg, ServerConnection servConn, long start)
      throws IOException, InterruptedException {
    Part regionNamePart = null, keyPart = null, callbackArgPart = null;
    String regionName = null;
    Object callbackArg = null, key = null;
    Part eventPart = null;
    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
    CacheServerStats stats = servConn.getCacheServerStats();
    servConn.setAsTrue(REQUIRES_RESPONSE);

    {
      long oldStart = start;
      start = DistributionStats.getStatTime();
      stats.incReadInvalidateRequestTime(start - oldStart);
    }
    // Retrieve the data from the message parts
    regionNamePart = msg.getPart(0);
    keyPart = msg.getPart(1);
    eventPart = msg.getPart(2);
    //    callbackArgPart = null; (redundant assignment)
    if (msg.getNumberOfParts() > 3) {
      callbackArgPart = msg.getPart(3);
      try {
        callbackArg = callbackArgPart.getObject();
      } catch (Exception e) {
        writeException(msg, e, false, servConn);
        servConn.setAsTrue(RESPONDED);
        return;
      }
    }
    regionName = regionNamePart.getString();
    try {
      key = keyPart.getStringOrObject();
    } catch (Exception e) {
      writeException(msg, e, false, servConn);
      servConn.setAsTrue(RESPONDED);
      return;
    }
    if (logger.fineEnabled()) {
      logger.fine(
          servConn.getName()
              + ": Received invalidate request ("
              + msg.getPayloadLength()
              + " bytes) from "
              + servConn.getSocketString()
              + " for region "
              + regionName
              + " key "
              + key);
    }

    // Process the invalidate request
    if (key == null || regionName == null) {
      StringBuilder errMessage = new StringBuilder();
      if (key == null) {
        if (logger.warningEnabled()) {
          logger.warning(
              LocalizedStrings.BaseCommand__THE_INPUT_KEY_FOR_THE_0_REQUEST_IS_NULL, "invalidate");
        }
        errMessage.append(
            LocalizedStrings.BaseCommand__THE_INPUT_KEY_FOR_THE_0_REQUEST_IS_NULL.toLocalizedString(
                "invalidate"));
      }
      if (regionName == null) {
        if (logger.warningEnabled()) {
          logger.warning(
              LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL,
              "invalidate");
        }
        errMessage.append(
            LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL
                .toLocalizedString("invalidate"));
      }
      writeErrorResponse(msg, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), servConn);
      servConn.setAsTrue(RESPONDED);
    } else {
      LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
      if (region == null) {
        String reason =
            LocalizedStrings.BaseCommand__0_WAS_NOT_FOUND_DURING_1_REQUEST.toLocalizedString(
                regionName, "invalidate");
        writeRegionDestroyedEx(msg, regionName, reason, servConn);
        servConn.setAsTrue(RESPONDED);
      } else {
        // Invalidate the entry
        ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
        long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
        long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
        EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);

        Breadcrumbs.setEventId(eventId);

        VersionTag tag = null;
        try {
          /*
           *
           * txtodo: doesn't seem like there is any notion of authzInvalidate
           */
          AuthorizeRequest authzRequest = servConn.getAuthzRequest();
          if (authzRequest != null) {
            InvalidateOperationContext invalidateContext =
                authzRequest.invalidateAuthorize(regionName, key, callbackArg);
            callbackArg = invalidateContext.getCallbackArg();
          }
          EntryEventImpl clientEvent = new EntryEventImpl(eventId);

          // msg.isRetry might be set by v7.0 and later clients
          if (msg.isRetry()) {
            //            if (logger.fineEnabled()) {
            //              logger.fine("DEBUG: encountered isRetry in Invalidate");
            //            }
            clientEvent.setPossibleDuplicate(true);
            if (region.getAttributes().getConcurrencyChecksEnabled()) {
              // recover the version tag from other servers
              clientEvent.setRegion(region);
              if (!recoverVersionTagForRetriedOperation(clientEvent)) {
                clientEvent.setPossibleDuplicate(false); // no-one has seen this event
              }
            }
          }

          region.basicBridgeInvalidate(key, callbackArg, servConn.getProxyID(), true, clientEvent);
          tag = clientEvent.getVersionTag();
          servConn.setModificationInfo(true, regionName, key);
        } catch (EntryNotFoundException e) {
          // Don't send an exception back to the client if this
          // exception happens. Just log it and continue.
          if (logger.infoEnabled()) {
            logger.info(
                LocalizedStrings.BaseCommand_DURING_0_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
                new Object[] {"invalidate", key});
          }
        } catch (RegionDestroyedException rde) {
          writeException(msg, rde, false, servConn);
          servConn.setAsTrue(RESPONDED);
          return;
        } catch (Exception e) {
          // If an interrupted exception is thrown , rethrow it
          checkForInterrupt(servConn, e);

          // If an exception occurs during the destroy, preserve the connection
          writeException(msg, e, false, servConn);
          servConn.setAsTrue(RESPONDED);
          if (e instanceof GemFireSecurityException) {
            // Fine logging for security exceptions since these are already
            // logged by the security logger
            if (logger.fineEnabled())
              logger.fine(servConn.getName() + ": Unexpected Security exception", e);
          } else if (logger.warningEnabled()) {
            logger.warning(
                LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION, servConn.getName(), e);
          }
          return;
        }

        // Update the statistics and write the reply
        {
          long oldStart = start;
          start = DistributionStats.getStatTime();
          stats.incProcessInvalidateTime(start - oldStart);
        }
        if (region instanceof PartitionedRegion) {
          PartitionedRegion pr = (PartitionedRegion) region;
          if (pr.isNetworkHop() != (byte) 0) {
            writeReplyWithRefreshMetadata(msg, servConn, pr, pr.isNetworkHop(), tag);
            pr.setIsNetworkHop((byte) 0);
            pr.setMetadataVersion(Byte.valueOf((byte) 0));
          } else {
            writeReply(msg, servConn, tag);
          }
        } else {
          writeReply(msg, servConn, tag);
        }
        servConn.setAsTrue(RESPONDED);
        if (logger.fineEnabled()) {
          logger.fine(
              servConn.getName()
                  + ": Sent invalidate response for region "
                  + regionName
                  + " key "
                  + key);
        }
        stats.incWriteInvalidateResponseTime(DistributionStats.getStatTime() - start);
      }
    }
  }
  public void testHopQueueWithOneBucket() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    EntryEventImpl ev1 =
        EntryEventImpl.create(
            (LocalRegion) r1,
            Operation.CREATE,
            (Object) "K1",
            (Object) "V1",
            null,
            false,
            (DistributedMember) c.getMyId());
    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);

    // peek a key. it should be the lowesy
    Object[] l = hopqueue.peek(1, 0).toArray();

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) l[0]).getKey(),
        ((HDFSGatewayEventImpl) l[0]).getKey().equals("K1"));
    assertTrue(
        " Peeked skip list size should be  0 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);

    // try to fetch the key. it would be in peeked skip list but still available
    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    assertTrue("First key should be K1", ((HDFSGatewayEventImpl) o).getKey().equals("K1"));

    assertTrue(
        " skip lists size should be  6",
        (getSortedEventQueue(hdfsBQ).getPeeked().size()
                + getSortedEventQueue(hdfsBQ).currentSkipList.size())
            == 6);

    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
    Object v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K2 with value V2a but the value was " + v, ((String) v).equals("V2a"));

    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K3 with value V3b but the value was " + v, ((String) v).equals("V3b"));
  }
  public void testPeekABatch() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    getSortedEventQueue(hdfsBQ).rollover(true);

    hopqueue.put(getNewEvent("K1", "V12", r1, 0, 11));
    hopqueue.put(getNewEvent("K5", "V3a", r1, 0, 12));
    hopqueue.put(getNewEvent("K5", "V3b", r1, 0, 13));

    assertTrue(
        " skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(),
        getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
    assertTrue(
        " skip list size should be  6 but is "
            + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 6);

    Object o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    Object o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    Object v1 = ((HDFSGatewayEventImpl) o1).getDeserializedValue();
    Object v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(
        " key should K3 with value V3b but the value was " + v1, ((String) v1).equals("V3b"));
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    ArrayList a = hdfsBQ.peekABatch();
    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) a.get(0)).getKey(),
        ((HDFSGatewayEventImpl) a.get(0)).getKey().equals("K1"));
    assertTrue(
        "Second key should be K2 but is " + ((HDFSGatewayEventImpl) a.get(1)).getKey(),
        ((HDFSGatewayEventImpl) a.get(1)).getKey().equals("K2"));
    assertTrue(
        "Third key should be K2 but is " + ((HDFSGatewayEventImpl) a.get(2)).getKey(),
        ((HDFSGatewayEventImpl) a.get(2)).getKey().equals("K2"));

    assertTrue(
        " Peeked skip list size should be 6 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " queueOfLists size should be  2 ", getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);

    assertTrue(
        " skip list size should be  3 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);

    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    v1 = ((HDFSGatewayEventImpl) o1).getDeserializedValue();
    v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(
        " key should K3 with value V3b but the value was " + v1, ((String) v1).equals("V3b"));
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    java.util.Iterator<KeyToSeqNumObject> iter1 =
        getSortedEventQueue(hdfsBQ).getPeeked().iterator();
    assertTrue("key in peeked list should be 3 ", iter1.next().getSeqNum() == 3);
    assertTrue("key in peeked list should be 6 ", iter1.next().getSeqNum() == 6);
    assertTrue("key in peeked list should be 2 ", iter1.next().getSeqNum() == 2);
    assertTrue("key in peeked list should be 9 ", iter1.next().getSeqNum() == 9);
    assertTrue("key in peeked list should be 8 ", iter1.next().getSeqNum() == 8);
    assertTrue("key in peeked list should be 7 ", iter1.next().getSeqNum() == 7);
    assertTrue(" Peeked list should not have any more elements. ", iter1.hasNext() == false);

    java.util.Iterator<KeyToSeqNumObject> iter2 =
        getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
    assertTrue("key in peeked list should be 11", iter2.next().getSeqNum() == 11);
    assertTrue("key in peeked list should be 13", iter2.next().getSeqNum() == 13);
    assertTrue("key in peeked list should be 12 ", iter2.next().getSeqNum() == 12);

    iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
    HashSet<Long> hs = new HashSet<Long>();
    hs.add((long) 11);
    hs.add((long) 13);
    hs.add((long) 12);
    hs.add((long) 3);
    hs.add((long) 6);
    hs.add((long) 2);
    hs.add((long) 9);
    hs.add((long) 8);
    hs.add((long) 7);

    hdfsBQ.hdfsEventQueue.handleRemainingElements(hs);

    ArrayList a1 = hdfsBQ.peekABatch();
    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(" key should K3 should not have been found ", o1 == null);
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) a1.get(0)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(0)).getKey().equals("K1"));
    assertTrue(
        "Second key should be K5 but is " + ((HDFSGatewayEventImpl) a1.get(1)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(1)).getKey().equals("K5"));
    assertTrue(
        "Third key should be K5 but is " + ((HDFSGatewayEventImpl) a1.get(2)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(2)).getKey().equals("K5"));

    assertTrue(
        " Peeked skip list size should be  3 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 3);
    assertTrue(
        " skip list size should be  0 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(),
        getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
    assertTrue(
        " skip list size should be  3 but is "
            + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 3);
    assertTrue(
        " skip list size should be  2 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
  }
  @SuppressWarnings("unchecked")
  @Override
  public void cmdExecute(Message msg, ServerConnection servConn, long start)
      throws IOException, ClassNotFoundException, InterruptedException {
    String regionFullPath = null;
    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
    regionFullPath = msg.getPart(0).getString();
    String errMessage = "";
    if (regionFullPath == null) {
      logger.warn(
          LocalizedMessage.create(
              LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL));
      errMessage =
          LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL
              .toLocalizedString();
      writeErrorResponse(
          msg, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR, errMessage.toString(), servConn);
      servConn.setAsTrue(RESPONDED);
    } else {
      Region region = crHelper.getRegion(regionFullPath);
      if (region == null) {
        logger.warn(
            LocalizedMessage.create(
                LocalizedStrings
                    .GetClientPartitionAttributes_REGION_NOT_FOUND_FOR_SPECIFIED_REGION_PATH,
                regionFullPath));
        errMessage =
            LocalizedStrings.GetClientPartitionAttributes_REGION_NOT_FOUND.toLocalizedString()
                + regionFullPath;
        writeErrorResponse(
            msg,
            MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
            errMessage.toString(),
            servConn);
        servConn.setAsTrue(RESPONDED);
      } else {
        try {
          Message responseMsg = servConn.getResponseMessage();
          responseMsg.setTransactionId(msg.getTransactionId());
          responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PARTITION_ATTRIBUTES);

          PartitionedRegion prRgion = (PartitionedRegion) region;

          PartitionResolver partitionResolver = prRgion.getPartitionResolver();
          int numParts = 2; // MINUMUM PARTS
          if (partitionResolver != null) {
            numParts++;
          }
          responseMsg.setNumberOfParts(numParts);
          // PART 1
          responseMsg.addObjPart(prRgion.getTotalNumberOfBuckets());

          // PART 2
          if (partitionResolver != null) {
            responseMsg.addObjPart(partitionResolver.getClass().toString().substring(6));
          }

          // PART 3
          String leaderRegionPath = null;
          PartitionedRegion leaderRegion = null;
          String leaderRegionName = prRgion.getColocatedWith();
          if (leaderRegionName != null) {
            Cache cache = prRgion.getCache();
            while (leaderRegionName != null) {
              leaderRegion = (PartitionedRegion) cache.getRegion(leaderRegionName);
              if (leaderRegion.getColocatedWith() == null) {
                leaderRegionPath = leaderRegion.getFullPath();
                break;
              } else {
                leaderRegionName = leaderRegion.getColocatedWith();
              }
            }
          }
          responseMsg.addObjPart(leaderRegionPath);
          responseMsg.send();
          msg.flush();
        } catch (Exception e) {
          writeException(msg, e, false, servConn);
        } finally {
          servConn.setAsTrue(Command.RESPONDED);
        }
      }
    }
  }