@Override
  protected void setUp() throws Exception {
    super.setUp();
    System.setProperty(HDFSStoreImpl.ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP, "true");

    // This is logged by HDFS when it is stopped.
    TestUtils.addExpectedException("sleep interrupted");
    TestUtils.addExpectedException("java.io.InterruptedIOException");

    testDataDir = new Path("test-case");

    cache = createCache();

    configureHdfsStoreFactory();
    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);

    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
    region = regionfactory.create(getName());

    // disable compaction by default and clear existing queues
    HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
    compactionManager.reset();

    director = HDFSRegionDirector.getInstance();
    director.setCache(cache);
    regionManager = ((LocalRegion) region).getHdfsRegionManager();
    stats = director.getHdfsRegionStats("/" + getName());
    storeStats = hdfsStore.getStats();
    blockCache = hdfsStore.getBlockCache();
    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
  }
  private void checkQueueIteration(List<KeyValue> expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    Region r1 = rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
        getSortedEventQueue(brq).rollover(true);
      }
    }

    Iterator<HDFSGatewayEventImpl> iter = brq.iterator(r1);
    List<KeyValue> actual = new ArrayList<KeyValue>();
    while (iter.hasNext()) {
      HDFSGatewayEventImpl evt = iter.next();
      actual.add(new KeyValue((String) evt.getKey(), (String) evt.getDeserializedValue()));
    }

    assertEquals(expected, actual);
  }
  @Test
  public void testDisabledThresholds() throws Exception {
    final InternalResourceManager irm = this.cache.getResourceManager();
    final OffHeapMemoryMonitor monitor = irm.getOffHeapMonitor();

    final RegionFactory regionFactory = this.cache.createRegionFactory(RegionShortcut.LOCAL);
    regionFactory.setOffHeap(true);
    final EvictionAttributesImpl evictionAttrs = new EvictionAttributesImpl();
    evictionAttrs.setAlgorithm(EvictionAlgorithm.NONE);
    regionFactory.setEvictionAttributes(evictionAttrs);
    final Region region = regionFactory.create("testDefaultThresholdsRegion");
    TestMemoryThresholdListener listener = new TestMemoryThresholdListener();
    irm.addResourceListener(ResourceType.OFFHEAP_MEMORY, listener);

    region.put("1", new Byte[550000]);
    region.put("2", new Byte[200000]);
    assertEquals(0, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(0, irm.getStats().getOffHeapEvictionStopEvents());
    assertEquals(0, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(0, irm.getStats().getOffHeapSafeEvents());
    assertEquals(0, listener.getEvictionThresholdCalls());
    assertEquals(0, listener.getCriticalThresholdCalls());

    // Enable eviction threshold and make sure event is generated
    monitor.setEvictionThreshold(50f);
    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(0, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(1, listener.getEvictionThresholdCalls());
    assertEquals(0, listener.getCriticalThresholdCalls());

    // Enable critical threshold and make sure event is generated
    region.put("3", new Byte[200000]);
    monitor.setCriticalThreshold(70f);
    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(1, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(2, listener.getEvictionThresholdCalls());
    assertEquals(1, listener.getCriticalThresholdCalls());

    // Disable thresholds and verify events
    monitor.setEvictionThreshold(0f);
    monitor.setCriticalThreshold(0f);

    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(1, irm.getStats().getOffHeapEvictionStopEvents());
    assertEquals(1, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(1, irm.getStats().getOffHeapSafeEvents());

    assertEquals(2, listener.getEvictionThresholdCalls());
    assertEquals(2, listener.getCriticalThresholdCalls());
    assertEquals(0, listener.getNormalCalls());
    assertEquals(2, listener.getEvictionDisabledCalls());
    assertEquals(2, listener.getCriticalDisabledCalls());
  }
  @Test
  public void testGetRegionFactoryWithIsGlobalScope() throws Exception {
    serverOptions = JSONFormatter.fromJSON("{ \"scope\": \"GLOBAL\" }");
    new ScopeOption(serverOptions).setOptionOnRegionFactory(regionFactory);

    Region region = regionFactory.create(getCurrentTestName());
    assertThat(region.getAttributes().getScope(), equalTo(Scope.GLOBAL));
  }
 /**
  * Validates and sets the Data Policy on the RegionFactory used to create and configure the Region
  * from this FactoryBean.
  *
  * @param regionFactory the RegionFactory used by this FactoryBean to create and configure the
  *     Region.
  * @param persistent a boolean value indicating whether the Region should be persistent and
  *     persist it's data to disk.
  * @param dataPolicy the configured Data Policy for the Region.
  * @see #resolveDataPolicy(com.gemstone.gemfire.cache.RegionFactory, Boolean, String)
  * @see com.gemstone.gemfire.cache.DataPolicy
  * @see com.gemstone.gemfire.cache.RegionFactory
  */
 protected void resolveDataPolicy(
     RegionFactory<K, V> regionFactory, Boolean persistent, DataPolicy dataPolicy) {
   if (dataPolicy != null) {
     assertDataPolicyAndPersistentAttributesAreCompatible(dataPolicy);
     regionFactory.setDataPolicy(dataPolicy);
     setDataPolicy(dataPolicy);
   } else {
     resolveDataPolicy(regionFactory, persistent, (String) null);
   }
 }
  private void checkQueueGet(String key, KeyValue expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
      }
    }

    byte[] bkey = EntryEventImpl.serialize(key);
    HDFSGatewayEventImpl evt = hopqueue.get(r1, bkey, 0);
    if (expected == null) {
      assertNull(evt);

    } else {
      assertEquals(expected.key, evt.getKey());
      assertEquals(expected.value, evt.getDeserializedValue());
    }
  }
  /**
   * Validates the configured Data Policy and may override it, taking into account the 'persistent'
   * attribute and constraints for the Region type.
   *
   * @param regionFactory the GemFire RegionFactory used to create the desired Region.
   * @param persistent a boolean value indicating whether the Region should persist it's data to
   *     disk.
   * @param dataPolicy requested Data Policy as set by the user in the Spring GemFire configuration
   *     meta-data.
   * @see com.gemstone.gemfire.cache.DataPolicy
   * @see com.gemstone.gemfire.cache.RegionFactory
   */
  protected void resolveDataPolicy(
      RegionFactory<K, V> regionFactory, Boolean persistent, String dataPolicy) {
    if (dataPolicy != null) {
      DataPolicy resolvedDataPolicy = new DataPolicyConverter().convert(dataPolicy);

      Assert.notNull(
          resolvedDataPolicy, String.format("Data Policy '%1$s' is invalid.", dataPolicy));
      assertDataPolicyAndPersistentAttributesAreCompatible(resolvedDataPolicy);

      regionFactory.setDataPolicy(resolvedDataPolicy);
      setDataPolicy(resolvedDataPolicy);
    } else {
      DataPolicy regionAttributesDataPolicy = getDataPolicy(getAttributes(), DataPolicy.DEFAULT);
      DataPolicy resolvedDataPolicy =
          (isPersistent() && DataPolicy.DEFAULT.equals(regionAttributesDataPolicy)
              ? DataPolicy.PERSISTENT_REPLICATE
              : regionAttributesDataPolicy);

      assertDataPolicyAndPersistentAttributesAreCompatible(resolvedDataPolicy);

      regionFactory.setDataPolicy(resolvedDataPolicy);
      setDataPolicy(resolvedDataPolicy);
    }
  }
  protected <K, V> void mergePartitionAttributes(
      final RegionFactory<K, V> regionFactory, final RegionAttributes<K, V> regionAttributes) {

    // NOTE PartitionAttributes are created by certain RegionShortcuts; need the null check since
    // RegionAttributes
    // can technically return null!
    // NOTE most likely, the PartitionAttributes will never be null since the
    // PartitionRegionFactoryBean always
    // sets a PartitionAttributesFactoryBean BeanBuilder on the RegionAttributesFactoryBean
    // "partitionAttributes"
    // property.
    if (regionAttributes.getPartitionAttributes() != null) {
      PartitionAttributes partitionAttributes = regionAttributes.getPartitionAttributes();
      PartitionAttributesFactory partitionAttributesFactory =
          new PartitionAttributesFactory(partitionAttributes);
      RegionShortcutWrapper shortcutWrapper = RegionShortcutWrapper.valueOf(shortcut);

      // NOTE however, since the default value of redundancy is 0, we need to account for
      // 'redundant'
      // RegionShortcut types, which specify a redundancy of 1.
      if (shortcutWrapper.isRedundant() && partitionAttributes.getRedundantCopies() == 0) {
        partitionAttributesFactory.setRedundantCopies(1);
      }

      // NOTE and, since the default value of localMaxMemory is based on the system memory, we need
      // to account for
      // 'proxy' RegionShortcut types, which specify a local max memory of 0.
      if (shortcutWrapper.isProxy()) {
        partitionAttributesFactory.setLocalMaxMemory(0);
      }

      // NOTE internally, RegionFactory.setPartitionAttributes handles merging the
      // PartitionAttributes, hooray!
      regionFactory.setPartitionAttributes(partitionAttributesFactory.create());
    }
  }
  /**
   * Intelligently merges the given RegionAttributes with the configuration setting of the
   * RegionFactory. This method is used to merge the RegionAttributes and PartitionAttributes with
   * the RegionFactory that is created when the user specified a RegionShortcut. This method gets
   * called by the createRegionFactory method depending upon the value passed to the
   * Cache.createRegionFactory() method (i.e. whether there was a RegionShortcut specified or not).
   *
   * @param <K> the Class type fo the Region key.
   * @param <V> the Class type of the Region value.
   * @param regionFactory the GemFire RegionFactory used to configure and create the Region that is
   *     the product of this RegionFactoryBean.
   * @param regionAttributes the RegionAttributes containing the Region configuration settings to
   *     merge to the RegionFactory.
   * @return the RegionFactory with the configuration settings of the RegionAttributes merged.
   * @see #isUserSpecifiedEvictionAttributes(com.gemstone.gemfire.cache.RegionAttributes)
   * @see #validateRegionAttributes(com.gemstone.gemfire.cache.RegionAttributes)
   * @see com.gemstone.gemfire.cache.RegionAttributes
   * @see com.gemstone.gemfire.cache.RegionFactory
   */
  @SuppressWarnings("unchecked")
  protected <K, V> RegionFactory<K, V> mergeRegionAttributes(
      final RegionFactory<K, V> regionFactory, final RegionAttributes<K, V> regionAttributes) {

    if (regionAttributes != null) {
      // NOTE this validation may not be strictly required depending on how the RegionAttributes
      // were "created",
      // but...
      validateRegionAttributes(regionAttributes);

      regionFactory.setCloningEnabled(regionAttributes.getCloningEnabled());
      regionFactory.setCompressor(regionAttributes.getCompressor());
      regionFactory.setConcurrencyChecksEnabled(regionAttributes.getConcurrencyChecksEnabled());
      regionFactory.setConcurrencyLevel(regionAttributes.getConcurrencyLevel());
      regionFactory.setCustomEntryIdleTimeout(regionAttributes.getCustomEntryIdleTimeout());
      regionFactory.setCustomEntryTimeToLive(regionAttributes.getCustomEntryTimeToLive());
      regionFactory.setDiskSynchronous(regionAttributes.isDiskSynchronous());
      regionFactory.setEnableAsyncConflation(regionAttributes.getEnableAsyncConflation());
      regionFactory.setEnableSubscriptionConflation(
          regionAttributes.getEnableSubscriptionConflation());
      regionFactory.setEntryIdleTimeout(regionAttributes.getEntryIdleTimeout());
      regionFactory.setEntryTimeToLive(regionAttributes.getEntryTimeToLive());

      // NOTE EvictionAttributes are created by certain RegionShortcuts; need the null check!
      if (isUserSpecifiedEvictionAttributes(regionAttributes)) {
        regionFactory.setEvictionAttributes(regionAttributes.getEvictionAttributes());
      }

      regionFactory.setIgnoreJTA(regionAttributes.getIgnoreJTA());
      regionFactory.setIndexMaintenanceSynchronous(
          regionAttributes.getIndexMaintenanceSynchronous());
      regionFactory.setInitialCapacity(regionAttributes.getInitialCapacity());
      regionFactory.setKeyConstraint(regionAttributes.getKeyConstraint());
      regionFactory.setLoadFactor(regionAttributes.getLoadFactor());
      regionFactory.setLockGrantor(regionAttributes.isLockGrantor());
      regionFactory.setMembershipAttributes(regionAttributes.getMembershipAttributes());
      regionFactory.setMulticastEnabled(regionAttributes.getMulticastEnabled());
      mergePartitionAttributes(regionFactory, regionAttributes);
      regionFactory.setPoolName(regionAttributes.getPoolName());
      regionFactory.setRegionIdleTimeout(regionAttributes.getRegionIdleTimeout());
      regionFactory.setRegionTimeToLive(regionAttributes.getRegionTimeToLive());
      regionFactory.setStatisticsEnabled(regionAttributes.getStatisticsEnabled());
      regionFactory.setSubscriptionAttributes(regionAttributes.getSubscriptionAttributes());
      regionFactory.setValueConstraint(regionAttributes.getValueConstraint());
    }

    return regionFactory;
  }
  @Override
  @SuppressWarnings("deprecation")
  protected Region<K, V> lookupFallback(GemFireCache gemfireCache, String regionName)
      throws Exception {
    Assert.isTrue(
        gemfireCache instanceof Cache,
        String.format("Unable to create Regions from '%1$s'.", gemfireCache));

    Cache cache = (Cache) gemfireCache;

    RegionFactory<K, V> regionFactory = createRegionFactory(cache);

    if (hubId != null) {
      enableGateway = (enableGateway == null || enableGateway);
      Assert.isTrue(enableGateway, "The 'hubId' requires the 'enableGateway' property to be true.");
      regionFactory.setGatewayHubId(hubId);
    }

    if (enableGateway != null) {
      if (enableGateway) {
        Assert.notNull(
            hubId, "The 'enableGateway' property requires the 'hubId' property to be set.");
      }
      regionFactory.setEnableGateway(enableGateway);
    }

    if (!ObjectUtils.isEmpty(gatewaySenders)) {
      Assert.isTrue(
          hubId == null,
          "It is invalid to configure a region with both a hubId and gatewaySenders."
              + " Note that the enableGateway and hubId properties are deprecated since Gemfire 7.0");

      for (Object gatewaySender : gatewaySenders) {
        regionFactory.addGatewaySenderId(((GatewaySender) gatewaySender).getId());
      }
    }

    if (!ObjectUtils.isEmpty(asyncEventQueues)) {
      for (Object asyncEventQueue : asyncEventQueues) {
        regionFactory.addAsyncEventQueueId(((AsyncEventQueue) asyncEventQueue).getId());
      }
    }

    if (!ObjectUtils.isEmpty(cacheListeners)) {
      for (CacheListener<K, V> listener : cacheListeners) {
        regionFactory.addCacheListener(listener);
      }
    }

    if (cacheLoader != null) {
      regionFactory.setCacheLoader(cacheLoader);
    }

    if (cacheWriter != null) {
      regionFactory.setCacheWriter(cacheWriter);
    }

    resolveDataPolicy(regionFactory, persistent, dataPolicy);

    if (isDiskStoreConfigurationAllowed()) {
      regionFactory.setDiskStoreName(diskStoreName);
    }

    if (scope != null) {
      regionFactory.setScope(scope);
    }

    if (attributes != null) {
      Assert.state(
          !attributes.isLockGrantor() || (scope == null) || scope.isGlobal(),
          "Lock Grantor only applies to a 'GLOBAL' scoped Region.");
    }

    postProcess(regionFactory);

    Region<K, V> region =
        (getParent() != null
            ? regionFactory.createSubregion(getParent(), regionName)
            : regionFactory.create(regionName));

    if (log.isInfoEnabled()) {
      if (getParent() != null) {
        log.info(
            String.format(
                "Created new Cache sub-Region [%1$s] under parent Region [%2$s].",
                regionName, getParent().getName()));
      } else {
        log.info(String.format("Created new Cache Region [%1$s].", regionName));
      }
    }

    if (snapshot != null) {
      region.loadSnapshot(snapshot.getInputStream());
    }

    if (attributes != null && attributes.isLockGrantor()) {
      region.becomeLockGrantor();
    }

    return region;
  }
  public void testHopQueueWithOneBucket() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    EntryEventImpl ev1 =
        EntryEventImpl.create(
            (LocalRegion) r1,
            Operation.CREATE,
            (Object) "K1",
            (Object) "V1",
            null,
            false,
            (DistributedMember) c.getMyId());
    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);

    // peek a key. it should be the lowesy
    Object[] l = hopqueue.peek(1, 0).toArray();

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) l[0]).getKey(),
        ((HDFSGatewayEventImpl) l[0]).getKey().equals("K1"));
    assertTrue(
        " Peeked skip list size should be  0 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);

    // try to fetch the key. it would be in peeked skip list but still available
    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    assertTrue("First key should be K1", ((HDFSGatewayEventImpl) o).getKey().equals("K1"));

    assertTrue(
        " skip lists size should be  6",
        (getSortedEventQueue(hdfsBQ).getPeeked().size()
                + getSortedEventQueue(hdfsBQ).currentSkipList.size())
            == 6);

    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
    Object v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K2 with value V2a but the value was " + v, ((String) v).equals("V2a"));

    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K3 with value V3b but the value was " + v, ((String) v).equals("V3b"));
  }
  public void testPeekABatch() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    getSortedEventQueue(hdfsBQ).rollover(true);

    hopqueue.put(getNewEvent("K1", "V12", r1, 0, 11));
    hopqueue.put(getNewEvent("K5", "V3a", r1, 0, 12));
    hopqueue.put(getNewEvent("K5", "V3b", r1, 0, 13));

    assertTrue(
        " skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(),
        getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
    assertTrue(
        " skip list size should be  6 but is "
            + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 6);

    Object o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    Object o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    Object v1 = ((HDFSGatewayEventImpl) o1).getDeserializedValue();
    Object v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(
        " key should K3 with value V3b but the value was " + v1, ((String) v1).equals("V3b"));
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    ArrayList a = hdfsBQ.peekABatch();
    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) a.get(0)).getKey(),
        ((HDFSGatewayEventImpl) a.get(0)).getKey().equals("K1"));
    assertTrue(
        "Second key should be K2 but is " + ((HDFSGatewayEventImpl) a.get(1)).getKey(),
        ((HDFSGatewayEventImpl) a.get(1)).getKey().equals("K2"));
    assertTrue(
        "Third key should be K2 but is " + ((HDFSGatewayEventImpl) a.get(2)).getKey(),
        ((HDFSGatewayEventImpl) a.get(2)).getKey().equals("K2"));

    assertTrue(
        " Peeked skip list size should be 6 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " queueOfLists size should be  2 ", getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);

    assertTrue(
        " skip list size should be  3 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);

    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    v1 = ((HDFSGatewayEventImpl) o1).getDeserializedValue();
    v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(
        " key should K3 with value V3b but the value was " + v1, ((String) v1).equals("V3b"));
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    java.util.Iterator<KeyToSeqNumObject> iter1 =
        getSortedEventQueue(hdfsBQ).getPeeked().iterator();
    assertTrue("key in peeked list should be 3 ", iter1.next().getSeqNum() == 3);
    assertTrue("key in peeked list should be 6 ", iter1.next().getSeqNum() == 6);
    assertTrue("key in peeked list should be 2 ", iter1.next().getSeqNum() == 2);
    assertTrue("key in peeked list should be 9 ", iter1.next().getSeqNum() == 9);
    assertTrue("key in peeked list should be 8 ", iter1.next().getSeqNum() == 8);
    assertTrue("key in peeked list should be 7 ", iter1.next().getSeqNum() == 7);
    assertTrue(" Peeked list should not have any more elements. ", iter1.hasNext() == false);

    java.util.Iterator<KeyToSeqNumObject> iter2 =
        getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
    assertTrue("key in peeked list should be 11", iter2.next().getSeqNum() == 11);
    assertTrue("key in peeked list should be 13", iter2.next().getSeqNum() == 13);
    assertTrue("key in peeked list should be 12 ", iter2.next().getSeqNum() == 12);

    iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
    HashSet<Long> hs = new HashSet<Long>();
    hs.add((long) 11);
    hs.add((long) 13);
    hs.add((long) 12);
    hs.add((long) 3);
    hs.add((long) 6);
    hs.add((long) 2);
    hs.add((long) 9);
    hs.add((long) 8);
    hs.add((long) 7);

    hdfsBQ.hdfsEventQueue.handleRemainingElements(hs);

    ArrayList a1 = hdfsBQ.peekABatch();
    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(" key should K3 should not have been found ", o1 == null);
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) a1.get(0)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(0)).getKey().equals("K1"));
    assertTrue(
        "Second key should be K5 but is " + ((HDFSGatewayEventImpl) a1.get(1)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(1)).getKey().equals("K5"));
    assertTrue(
        "Third key should be K5 but is " + ((HDFSGatewayEventImpl) a1.get(2)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(2)).getKey().equals("K5"));

    assertTrue(
        " Peeked skip list size should be  3 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 3);
    assertTrue(
        " skip list size should be  0 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(),
        getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
    assertTrue(
        " skip list size should be  3 but is "
            + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 3);
    assertTrue(
        " skip list size should be  2 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
  }