private HDFSGatewayEventImpl getNewEvent(
     Object key, Object value, Region r1, int bid, int tailKey) throws Exception {
   EntryEventImpl ev1 =
       EntryEventImpl.create(
           (LocalRegion) r1,
           Operation.CREATE,
           key,
           value,
           null,
           false,
           (DistributedMember) c.getMyId());
   ev1.setEventId(new EventID(this.c.getDistributedSystem()));
   HDFSGatewayEventImpl event = null;
   event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, ev1, null, true, bid);
   event.setShadowKey((long) tailKey);
   return event;
 }
  public void testHopQueueWithOneBucket() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    EntryEventImpl ev1 =
        EntryEventImpl.create(
            (LocalRegion) r1,
            Operation.CREATE,
            (Object) "K1",
            (Object) "V1",
            null,
            false,
            (DistributedMember) c.getMyId());
    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);

    // peek a key. it should be the lowesy
    Object[] l = hopqueue.peek(1, 0).toArray();

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) l[0]).getKey(),
        ((HDFSGatewayEventImpl) l[0]).getKey().equals("K1"));
    assertTrue(
        " Peeked skip list size should be  0 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);

    // try to fetch the key. it would be in peeked skip list but still available
    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    assertTrue("First key should be K1", ((HDFSGatewayEventImpl) o).getKey().equals("K1"));

    assertTrue(
        " skip lists size should be  6",
        (getSortedEventQueue(hdfsBQ).getPeeked().size()
                + getSortedEventQueue(hdfsBQ).currentSkipList.size())
            == 6);

    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
    Object v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K2 with value V2a but the value was " + v, ((String) v).equals("V2a"));

    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K3 with value V3b but the value was " + v, ((String) v).equals("V3b"));
  }