private void checkQueueIteration(List<KeyValue> expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    Region r1 = rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
        getSortedEventQueue(brq).rollover(true);
      }
    }

    Iterator<HDFSGatewayEventImpl> iter = brq.iterator(r1);
    List<KeyValue> actual = new ArrayList<KeyValue>();
    while (iter.hasNext()) {
      HDFSGatewayEventImpl evt = iter.next();
      actual.add(new KeyValue((String) evt.getKey(), (String) evt.getDeserializedValue()));
    }

    assertEquals(expected, actual);
  }
コード例 #2
0
 @Override
 public Region createRegion(String regionName, Class valueConstraint) {
   PartitionAttributesFactory paf = new PartitionAttributesFactory();
   AttributesFactory af = new AttributesFactory();
   af.setPartitionAttributes(paf.create());
   af.setValueConstraint(valueConstraint);
   Region r1 = CacheUtils.createRegion(regionName, af.create(), false);
   return r1;
 }
コード例 #3
0
  protected AttributesFactory getServerCacheAttributesFactory(boolean enableStorage) {
    AttributesFactory factory = new AttributesFactory();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    factory.setDataPolicy(DataPolicy.PARTITION);
    paf.setRedundantCopies(0).setTotalNumBuckets(1);
    if (!enableStorage) {
      paf.setLocalMaxMemory(0);
    }

    factory.setPartitionAttributes(paf.create());
    return factory;
  }
コード例 #4
0
 /**
  * Commented the test as it is for some reason causing OOM when run in the suite. It is due to
  * presence of PR Tests the where clause formed with CompiledComparison nesting with CompiledIN
  *
  * @throws Exception
  */
 public void _testBug40333_InPartitionedRegion_2() throws Exception {
   CacheUtils.startCache();
   final Cache cache = CacheUtils.getCache();
   AttributesFactory attributesFactory = new AttributesFactory();
   PartitionAttributesFactory paf = new PartitionAttributesFactory();
   paf.setTotalNumBuckets(10);
   PartitionAttributes pa = paf.create();
   attributesFactory.setPartitionAttributes(pa);
   RegionAttributes ra = attributesFactory.create();
   final Region region = cache.createRegion("new_pos", ra);
   String queryStr =
       " select distinct r.name, pVal, r.\"type\"  "
           + " from /new_pos r , r.positions.values pVal where "
           + " ( r.name IN Set('name_11' , 'name_12') OR false ) AND pVal.mktValue < 1.00";
   this.bug40333Simulation(region, queryStr);
 }
  private void checkQueueGet(String key, KeyValue expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
      }
    }

    byte[] bkey = EntryEventImpl.serialize(key);
    HDFSGatewayEventImpl evt = hopqueue.get(r1, bkey, 0);
    if (expected == null) {
      assertNull(evt);

    } else {
      assertEquals(expected.key, evt.getKey());
      assertEquals(expected.value, evt.getDeserializedValue());
    }
  }
コード例 #6
0
  protected <K, V> void mergePartitionAttributes(
      final RegionFactory<K, V> regionFactory, final RegionAttributes<K, V> regionAttributes) {

    // NOTE PartitionAttributes are created by certain RegionShortcuts; need the null check since
    // RegionAttributes
    // can technically return null!
    // NOTE most likely, the PartitionAttributes will never be null since the
    // PartitionRegionFactoryBean always
    // sets a PartitionAttributesFactoryBean BeanBuilder on the RegionAttributesFactoryBean
    // "partitionAttributes"
    // property.
    if (regionAttributes.getPartitionAttributes() != null) {
      PartitionAttributes partitionAttributes = regionAttributes.getPartitionAttributes();
      PartitionAttributesFactory partitionAttributesFactory =
          new PartitionAttributesFactory(partitionAttributes);
      RegionShortcutWrapper shortcutWrapper = RegionShortcutWrapper.valueOf(shortcut);

      // NOTE however, since the default value of redundancy is 0, we need to account for
      // 'redundant'
      // RegionShortcut types, which specify a redundancy of 1.
      if (shortcutWrapper.isRedundant() && partitionAttributes.getRedundantCopies() == 0) {
        partitionAttributesFactory.setRedundantCopies(1);
      }

      // NOTE and, since the default value of localMaxMemory is based on the system memory, we need
      // to account for
      // 'proxy' RegionShortcut types, which specify a local max memory of 0.
      if (shortcutWrapper.isProxy()) {
        partitionAttributesFactory.setLocalMaxMemory(0);
      }

      // NOTE internally, RegionFactory.setPartitionAttributes handles merging the
      // PartitionAttributes, hooray!
      regionFactory.setPartitionAttributes(partitionAttributesFactory.create());
    }
  }
  public void testHopQueueWithOneBucket() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    EntryEventImpl ev1 =
        EntryEventImpl.create(
            (LocalRegion) r1,
            Operation.CREATE,
            (Object) "K1",
            (Object) "V1",
            null,
            false,
            (DistributedMember) c.getMyId());
    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);

    // peek a key. it should be the lowesy
    Object[] l = hopqueue.peek(1, 0).toArray();

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) l[0]).getKey(),
        ((HDFSGatewayEventImpl) l[0]).getKey().equals("K1"));
    assertTrue(
        " Peeked skip list size should be  0 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);

    // try to fetch the key. it would be in peeked skip list but still available
    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    assertTrue("First key should be K1", ((HDFSGatewayEventImpl) o).getKey().equals("K1"));

    assertTrue(
        " skip lists size should be  6",
        (getSortedEventQueue(hdfsBQ).getPeeked().size()
                + getSortedEventQueue(hdfsBQ).currentSkipList.size())
            == 6);

    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
    Object v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K2 with value V2a but the value was " + v, ((String) v).equals("V2a"));

    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K3 with value V3b but the value was " + v, ((String) v).equals("V3b"));
  }
  public void testPeekABatch() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    getSortedEventQueue(hdfsBQ).rollover(true);

    hopqueue.put(getNewEvent("K1", "V12", r1, 0, 11));
    hopqueue.put(getNewEvent("K5", "V3a", r1, 0, 12));
    hopqueue.put(getNewEvent("K5", "V3b", r1, 0, 13));

    assertTrue(
        " skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(),
        getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
    assertTrue(
        " skip list size should be  6 but is "
            + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 6);

    Object o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    Object o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    Object v1 = ((HDFSGatewayEventImpl) o1).getDeserializedValue();
    Object v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(
        " key should K3 with value V3b but the value was " + v1, ((String) v1).equals("V3b"));
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    ArrayList a = hdfsBQ.peekABatch();
    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) a.get(0)).getKey(),
        ((HDFSGatewayEventImpl) a.get(0)).getKey().equals("K1"));
    assertTrue(
        "Second key should be K2 but is " + ((HDFSGatewayEventImpl) a.get(1)).getKey(),
        ((HDFSGatewayEventImpl) a.get(1)).getKey().equals("K2"));
    assertTrue(
        "Third key should be K2 but is " + ((HDFSGatewayEventImpl) a.get(2)).getKey(),
        ((HDFSGatewayEventImpl) a.get(2)).getKey().equals("K2"));

    assertTrue(
        " Peeked skip list size should be 6 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " queueOfLists size should be  2 ", getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);

    assertTrue(
        " skip list size should be  3 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);

    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    v1 = ((HDFSGatewayEventImpl) o1).getDeserializedValue();
    v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(
        " key should K3 with value V3b but the value was " + v1, ((String) v1).equals("V3b"));
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    java.util.Iterator<KeyToSeqNumObject> iter1 =
        getSortedEventQueue(hdfsBQ).getPeeked().iterator();
    assertTrue("key in peeked list should be 3 ", iter1.next().getSeqNum() == 3);
    assertTrue("key in peeked list should be 6 ", iter1.next().getSeqNum() == 6);
    assertTrue("key in peeked list should be 2 ", iter1.next().getSeqNum() == 2);
    assertTrue("key in peeked list should be 9 ", iter1.next().getSeqNum() == 9);
    assertTrue("key in peeked list should be 8 ", iter1.next().getSeqNum() == 8);
    assertTrue("key in peeked list should be 7 ", iter1.next().getSeqNum() == 7);
    assertTrue(" Peeked list should not have any more elements. ", iter1.hasNext() == false);

    java.util.Iterator<KeyToSeqNumObject> iter2 =
        getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
    assertTrue("key in peeked list should be 11", iter2.next().getSeqNum() == 11);
    assertTrue("key in peeked list should be 13", iter2.next().getSeqNum() == 13);
    assertTrue("key in peeked list should be 12 ", iter2.next().getSeqNum() == 12);

    iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
    HashSet<Long> hs = new HashSet<Long>();
    hs.add((long) 11);
    hs.add((long) 13);
    hs.add((long) 12);
    hs.add((long) 3);
    hs.add((long) 6);
    hs.add((long) 2);
    hs.add((long) 9);
    hs.add((long) 8);
    hs.add((long) 7);

    hdfsBQ.hdfsEventQueue.handleRemainingElements(hs);

    ArrayList a1 = hdfsBQ.peekABatch();
    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    ;
    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    ;
    v2 = ((HDFSGatewayEventImpl) o2).getDeserializedValue();
    assertTrue(" key should K3 should not have been found ", o1 == null);
    assertTrue(
        " key should K1 with value V12 but the value was " + v2, ((String) v2).equals("V12"));

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) a1.get(0)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(0)).getKey().equals("K1"));
    assertTrue(
        "Second key should be K5 but is " + ((HDFSGatewayEventImpl) a1.get(1)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(1)).getKey().equals("K5"));
    assertTrue(
        "Third key should be K5 but is " + ((HDFSGatewayEventImpl) a1.get(2)).getKey(),
        ((HDFSGatewayEventImpl) a1.get(2)).getKey().equals("K5"));

    assertTrue(
        " Peeked skip list size should be  3 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 3);
    assertTrue(
        " skip list size should be  0 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(),
        getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
    assertTrue(
        " skip list size should be  3 but is "
            + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 3);
    assertTrue(
        " skip list size should be  2 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.size(),
        getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
  }