@Before
 public void setup() {
   assertThat(factorials, is(notNullValue()));
   assertThat(factorials.getName(), is(equalTo("Factorials")));
   assertThat(
       factorials.getFullPath(), is(equalTo(String.format("%1$sFactorials", Region.SEPARATOR))));
   assertThat(factorials.getAttributes(), is(notNullValue()));
   assertThat(factorials.getAttributes().getDataPolicy(), is(equalTo(DataPolicy.EMPTY)));
   assertThat(
       factorials.getAttributes().getPoolName(),
       is(equalTo(GemfireConstants.DEFAULT_GEMFIRE_POOL_NAME)));
 }
 protected static void assertRegionMetaData(
     final Region<?, ?> region,
     final String expectedName,
     final String expectedFullPath,
     final DataPolicy expectedDataPolicy) {
   assertNotNull(
       String.format("Region (%1$s) was not properly configured and initialized!", expectedName),
       region);
   assertEquals(expectedName, region.getName());
   assertEquals(expectedFullPath, region.getFullPath());
   assertNotNull(
       String.format("Region (%1$s) must have RegionAttributes defined!", expectedName),
       region.getAttributes());
   assertEquals(expectedDataPolicy, region.getAttributes().getDataPolicy());
   assertFalse(region.getAttributes().getDataPolicy().withPersistence());
 }
  boolean isUsedInPartitioning() {
    boolean ok = false;
    // TODO: Asif: Handle this case where region is turning out to be null
    // Ideally Bug 39923 workaround should ensure that region is not null. But we are doing
    // this check only for Update type statements. For Select queries, it may stll be null,
    // hence the check

    if (this.tqi == null) {
      return ok;
    }
    Region rgnOwningColumn = this.tqi.getRegion();
    assert rgnOwningColumn != null;
    RegionAttributes ra = rgnOwningColumn.getAttributes();
    // If the region is a Replicated Region or if it is a PR with just
    // itself
    // as a member then we should go with Derby's Activation Object
    DataPolicy policy = ra.getDataPolicy();

    if (policy.withPartitioning()) {
      PartitionedRegion pr = (PartitionedRegion) rgnOwningColumn;
      GfxdPartitionResolver rslvr = (GfxdPartitionResolver) pr.getPartitionResolver();
      ok = rslvr != null && rslvr.isUsedInPartitioning(this.actualColumnName);
    }
    return ok;
  }
  @Test
  public void testGetRegionFactoryWithIsGlobalScope() throws Exception {
    serverOptions = JSONFormatter.fromJSON("{ \"scope\": \"GLOBAL\" }");
    new ScopeOption(serverOptions).setOptionOnRegionFactory(regionFactory);

    Region region = regionFactory.create(getCurrentTestName());
    assertThat(region.getAttributes().getScope(), equalTo(Scope.GLOBAL));
  }
  protected RegionMBeanBridge(Region<K, V> region) {
    this.region = region;
    this.regAttrs = region.getAttributes();

    this.isStatisticsEnabled = regAttrs.getStatisticsEnabled();

    this.regionAttributesData = RegionMBeanCompositeDataFactory.getRegionAttributesData(regAttrs);
    this.membershipAttributesData =
        RegionMBeanCompositeDataFactory.getMembershipAttributesData(regAttrs);
    this.evictionAttributesData =
        RegionMBeanCompositeDataFactory.getEvictionAttributesData(regAttrs);

    this.regionMonitor =
        new MBeanStatsMonitor(ManagementStrings.REGION_MONITOR.toLocalizedString());

    configureRegionMetrics();

    this.persistentEnabled = region.getAttributes().getDataPolicy().withPersistence();

    this.regionStats = ((LocalRegion) region).getRegionPerfStats();
    if (regionStats != null) {
      regionMonitor.addStatisticsToMonitor(regionStats.getStats()); // fixes 46692
    }

    LocalRegion l = (LocalRegion) region;
    if (l.getEvictionController() != null) {
      LRUStatistics stats = l.getEvictionController().getLRUHelper().getStats();
      if (stats != null) {
        regionMonitor.addStatisticsToMonitor(stats.getStats());
        EvictionAttributes ea = region.getAttributes().getEvictionAttributes();
        if (ea != null && ea.getAlgorithm().isLRUMemory()) {
          this.lruMemoryStats = stats;
        }
      }
    }

    if (regAttrs.getGatewaySenderIds() != null && regAttrs.getGatewaySenderIds().size() > 0) {
      this.isGatewayEnabled = true;
    }

    this.member = GemFireCacheImpl.getInstance().getDistributedSystem().getMemberId();
  }
  /**
   * Tests the compatibility of creating certain kinds of subregions of a local region.
   *
   * @see Region#createSubregion
   */
  public void testIncompatibleSubregions() throws CacheException {
    Region region = createRegion(this.getUniqueName());
    assertEquals(Scope.LOCAL, region.getAttributes().getScope());

    // A region with Scope.LOCAL can only have subregions with
    // Scope.LOCAL.
    try {
      AttributesFactory factory = new AttributesFactory(region.getAttributes());
      factory.setScope(Scope.DISTRIBUTED_NO_ACK);
      RegionAttributes attrs = factory.create();
      region.createSubregion(this.getUniqueName(), attrs);
      fail("Should have thrown an IllegalStateException");

    } catch (IllegalStateException ex) {
      // pass...
    }

    try {
      AttributesFactory factory = new AttributesFactory(region.getAttributes());
      factory.setScope(Scope.DISTRIBUTED_ACK);
      RegionAttributes attrs = factory.create();
      region.createSubregion(this.getUniqueName(), attrs);
      fail("Should have thrown an IllegalStateException");

    } catch (IllegalStateException ex) {
      // pass...
    }

    try {
      AttributesFactory factory = new AttributesFactory(region.getAttributes());
      factory.setScope(Scope.GLOBAL);
      RegionAttributes attrs = factory.create();
      region.createSubregion(this.getUniqueName(), attrs);
      fail("Should have thrown an IllegalStateException");

    } catch (IllegalStateException ex) {
      // pass...
    }
  }
  static <K, V> Exporter<K, V> createExporter(Region<?, ?> region, SnapshotOptions<K, V> options) {
    String pool = region.getAttributes().getPoolName();
    if (pool != null) {
      return new ClientExporter<K, V>(PoolManager.find(pool));

    } else if (InternalDistributedSystem.getAnyInstance().isLoner()
        || region.getAttributes().getDataPolicy().equals(DataPolicy.NORMAL)
        || region.getAttributes().getDataPolicy().equals(DataPolicy.PRELOADED)
        || region instanceof LocalDataSet
        || (((SnapshotOptionsImpl<K, V>) options).isParallelMode()
            && region.getAttributes().getDataPolicy().withPartitioning())) {

      // Avoid function execution:
      //    for loner systems to avoid inlining fn execution
      //    for NORMAL/PRELOAD since they don't support fn execution
      //    for LocalDataSet since we're already running a fn
      //    for parallel ops since we're already running a fn
      return new LocalExporter<K, V>();
    }

    return new WindowedExporter<K, V>();
  }
  public static void waitForDestroyEvent(Region r, final Object key) {
    final CertifiableTestCacheListener ccl =
        (CertifiableTestCacheListener) r.getAttributes().getCacheListener();
    WaitCriterion ev =
        new WaitCriterion() {
          public boolean done() {
            return ccl.destroys.contains(key);
          }

          public String description() {
            return "waiting for destroy event for " + key;
          }
        };
    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
    ccl.destroys.remove(key);
  }
  public Object evaluate(ExecutionContext context) throws RegionNotFoundException {
    Region rgn;
    Cache cache = context.getCache();
    // do PR bucketRegion substitution here for expressions that evaluate to a Region.
    PartitionedRegion pr = context.getPartitionedRegion();

    if (pr != null && pr.getFullPath().equals(this.regionPath)) {
      rgn = context.getBucketRegion();
    } else if (pr != null) {
      // Asif : This is a   very tricky solution to allow equijoin queries on PartitionedRegion
      // locally
      // We have possibly got a situation of equijoin. it may be across PRs. so use the context's
      // bucket region
      // to get ID and then retrieve the this region's bucket region
      BucketRegion br = context.getBucketRegion();
      int bucketID = br.getId();
      // Is current region a partitioned region
      rgn = cache.getRegion(this.regionPath);
      if (rgn.getAttributes().getDataPolicy().withPartitioning()) {
        // convert it into bucket region.
        PartitionedRegion prLocal = (PartitionedRegion) rgn;
        rgn = prLocal.getDataStore().getLocalBucketById(bucketID);
      }

    } else {
      rgn = cache.getRegion(this.regionPath);
    }

    if (rgn == null) {
      // if we couldn't find the region because the cache is closed, throw
      // a CacheClosedException
      if (cache.isClosed()) {
        throw new CacheClosedException();
      }
      throw new RegionNotFoundException(
          LocalizedStrings.CompiledRegion_REGION_NOT_FOUND_0.toLocalizedString(this.regionPath));
    }

    if (context.isCqQueryContext()) {
      return new QRegion(rgn, true, context);
    } else {
      return new QRegion(rgn, false, context);
    }
  }
  public static RegionMBeanBridge getInstance(Region region) {

    if (region.getAttributes().getPartitionAttributes() != null) {

      RegionMBeanBridge bridge = new PartitionedRegionBridge(region);
      PartitionedRegion parRegion = ((PartitionedRegion) region);
      DiskStoreImpl dsi = parRegion.getDiskStore();
      if (dsi != null) {
        DiskRegionStats stats = parRegion.getDiskRegionStats();

        DiskRegionBridge diskRegionBridge = new DiskRegionBridge(stats);
        bridge.addDiskRegionBridge(diskRegionBridge);

        for (DirectoryHolder dh : dsi.getDirectoryHolders()) {
          diskRegionBridge.addDirectoryStats(dh.getDiskDirectoryStats());
        }

        bridge.addDiskRegionBridge(diskRegionBridge);
      }

      return bridge;

    } else {
      RegionMBeanBridge bridge = new RegionMBeanBridge(region);

      LocalRegion localRegion = ((LocalRegion) region);
      DiskStoreImpl dsi = localRegion.getDiskStore();
      if (dsi != null) {
        DiskRegionBridge diskRegionBridge =
            new DiskRegionBridge(localRegion.getDiskRegion().getStats());
        bridge.addDiskRegionBridge(diskRegionBridge);

        for (DirectoryHolder dh : dsi.getDirectoryHolders()) {
          diskRegionBridge.addDirectoryStats(dh.getDiskDirectoryStats());
        }
      }
      return bridge;
    }
  }
  @SuppressWarnings("rawtypes")
  @Test
  public void testReplicaWithAttributes() throws Exception {
    assertTrue(context.containsBean("replicated-with-attributes"));
    Region region = context.getBean("replicated-with-attributes", Region.class);
    RegionAttributes attrs = region.getAttributes();

    assertEquals(10, attrs.getInitialCapacity());
    assertEquals(true, attrs.getIgnoreJTA());
    assertEquals(false, attrs.getIndexMaintenanceSynchronous());
    assertEquals(String.class, attrs.getKeyConstraint());
    assertEquals(String.class, attrs.getValueConstraint());
    assertEquals(true, attrs.isDiskSynchronous());
    assertEquals(Scope.GLOBAL, attrs.getScope());
    // assertEquals(true, attrs.isLockGrantor());
    assertEquals(true, attrs.getEnableAsyncConflation());
    assertEquals(true, attrs.getEnableSubscriptionConflation());
    assertEquals(0.50, attrs.getLoadFactor(), 0.001);
    assertEquals(false, attrs.getCloningEnabled());
    assertEquals(10, attrs.getConcurrencyLevel());
    assertEquals(true, attrs.getMulticastEnabled());
  }
  /**
   * Tests that if a <code>CacheLoader</code> for a local region invokes {@link
   * LoaderHelper#netSearch}, a {@link CacheLoaderException} is thrown.
   */
  public void testLocalLoaderNetSearch() throws CacheException {
    assertEquals(Scope.LOCAL, getRegionAttributes().getScope());

    final String name = this.getUniqueName();
    final Object key = this.getUniqueName();

    TestCacheLoader loader =
        new TestCacheLoader() {
          public Object load2(LoaderHelper helper) throws CacheLoaderException {

            try {
              helper.netSearch(true);

            } catch (TimeoutException ex) {
              fail("Why did I timeout?", ex);
            }

            return null;
          }
        };

    AttributesFactory factory = new AttributesFactory(getRegionAttributes());
    factory.setCacheLoader(loader);
    Region region = createRegion(name, factory.create());
    assertEquals(Scope.LOCAL, region.getAttributes().getScope());

    try {
      region.get(key);
      fail("Should have thrown a CacheLoaderException");

    } catch (CacheLoaderException ex) {
      String expected =
          com.gemstone.gemfire.internal.cache.LoaderHelperImpl.NET_SEARCH_LOCAL.toLocalizedString();
      String message = ex.getMessage();
      assertTrue("Unexpected message \"" + message + "\"", message.indexOf(expected) != -1);
    }
  }
 public static void acquireConnectionsAndDestroyEntriesK1andK2() {
   try {
     Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r1);
     String poolName = r1.getAttributes().getPoolName();
     assertNotNull(poolName);
     PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
     assertNotNull(pool);
     Connection conn = pool.acquireConnection();
     final Connection conn1;
     if (conn.getServer().getPort() != PORT2) {
       conn1 = pool.acquireConnection(); // Ensure we have a server with the proper port
     } else {
       conn1 = conn;
     }
     assertNotNull(conn1);
     assertEquals(PORT2, conn1.getServer().getPort());
     ServerRegionProxy srp = new ServerRegionProxy(Region.SEPARATOR + REGION_NAME, pool);
     srp.destroyOnForTestsOnly(
         conn1,
         "key1",
         null,
         Operation.DESTROY,
         new EntryEventImpl(new EventID(new byte[] {1}, 100000, 1)),
         null);
     srp.destroyOnForTestsOnly(
         conn1,
         "key2",
         null,
         Operation.DESTROY,
         new EntryEventImpl(new EventID(new byte[] {1}, 100000, 2)),
         null);
   } catch (Exception ex) {
     throw new TestException("Failed while setting acquireConnectionsAndDestroyEntry  ", ex);
   }
 }
 /**
  * Appends to a StringBuffer logging that describes a DiskSTore for the given Region.
  *
  * @param aRegion The Region possibly containing a DiskStore.
  * @param aStr The StringBuffer to append to.
  */
 public static void logDiskStore(Region aRegion, StringBuffer aStr) {
   if (aRegion.getAttributes().getDiskStoreName() != null) {
     aStr.append(" diskStoreName " + aRegion.getAttributes().getDiskStoreName());
   }
 }
 private boolean shouldRunInParallel(SnapshotOptions<K, V> options) {
   return ((SnapshotOptionsImpl<K, V>) options).isParallelMode()
       && region.getAttributes().getDataPolicy().withPartitioning()
       && !(region instanceof LocalDataSet);
 }
Beispiel #16
0
  /**
   * Initializes without children.
   *
   * @param region The region from which RegionInfo is extracted.
   */
  @SuppressWarnings({"unchecked", "rawtypes"})
  private void init(Region region) {
    if (region == null) {
      return;
    }
    DistributedMember member =
        CacheFactory.getAnyInstance().getDistributedSystem().getDistributedMember();
    setName(region.getName());
    setFullPath(region.getFullPath());
    GemfireRegionAttributeInfo attrInfo = new GemfireRegionAttributeInfo();
    RegionAttributes<?, ?> attr = region.getAttributes();
    attrInfo.setAttribute(GemfireRegionAttributeInfo.DATA_POLICY, attr.getDataPolicy().toString());
    attrInfo.setAttribute(GemfireRegionAttributeInfo.SCOPE, attr.getScope().toString());
    if (region instanceof PartitionedRegion) {
      PartitionedRegion pr = (PartitionedRegion) region;
      PartitionAttributes pattr = pr.getPartitionAttributes();
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.LOCAL_MAX_MEMORY, pattr.getLocalMaxMemory() + "");
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.REDUNDANT_COPIES, pattr.getRedundantCopies() + "");
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.TOTAL_MAX_MEMORY, pattr.getTotalMaxMemory() + "");
      attrInfo.setAttribute(
          GemfireRegionAttributeInfo.TOTAL_NUM_BUCKETS, pattr.getTotalNumBuckets() + "");

      // data store is null if it's a proxy, i.e., LOCAL_MAX_MEMORY=0
      if (pr.getDataStore() != null) {
        Set<BucketRegion> localtBuketSet = pr.getDataStore().getAllLocalBucketRegions();
        List<BucketInfo> primaryList = new ArrayList<BucketInfo>();
        List<BucketInfo> redundantList = new ArrayList<BucketInfo>();
        this.size = 0;
        for (BucketRegion br : localtBuketSet) {
          BucketInfo bucketInfo =
              new GemfireBucketInfo(
                  br.getId(), br.getBucketAdvisor().isPrimary(), br.size(), br.getTotalBytes());
          //					InternalDistributedMember m = pr.getBucketPrimary(br.getId());
          //					if (m.getId().equals(member.getId())) {
          if (bucketInfo.isPrimary()) {
            primaryList.add(bucketInfo);
            this.size += bucketInfo.getSize();
          } else {
            redundantList.add(bucketInfo);
          }
        }
        Collections.sort(primaryList);
        Collections.sort(redundantList);
        setPrimaryBucketInfoList(primaryList);
        setRedundantBucketInfoList(redundantList);
      }
    } else {
      this.size = region.size();
    }
    setAttrInfo(attrInfo);
    temporalType = GemfireTemporalManager.getTemporalType(region);
    if (region.isDestroyed() == false && region.isEmpty() == false) {
      Set<Map.Entry> regionEntrySet = region.entrySet();
      for (Map.Entry entry : regionEntrySet) {
        Object key = entry.getKey();
        Object value = entry.getValue();
        keyTypeName = key.getClass().getName();
        valueTypeName = value.getClass().getName();
        break;
      }
    }
  }
 /**
  * Return whether the given region has persistence
  *
  * @param aRegion The Region to check for persistence.
  * @return True if the region has persistence, false otherwise.
  */
 public static boolean withPersistence(Region aRegion) {
   // only GemFire 6.5 (and beyond) has a method for DataPolicy.withPersistence()
   return aRegion.getAttributes().getDataPolicy().withPersistence();
 }
  @SuppressWarnings({"rawtypes", "unchecked"})
  private void doRegionTest(final RegionShortcut rs, final String rName, boolean compressed) {
    boolean isPersistent =
        rs == RegionShortcut.LOCAL_PERSISTENT
            || rs == RegionShortcut.REPLICATE_PERSISTENT
            || rs == RegionShortcut.PARTITION_PERSISTENT;
    GemFireCacheImpl gfc = createCache(isPersistent);
    Region r = null;
    try {
      gfc.setCopyOnRead(true);
      final MemoryAllocator ma = gfc.getOffHeapStore();
      assertNotNull(ma);
      assertEquals(0, ma.getUsedMemory());
      Compressor compressor = null;
      if (compressed) {
        compressor = SnappyCompressor.getDefaultInstance();
      }
      r = gfc.createRegionFactory(rs).setOffHeap(true).setCompressor(compressor).create(rName);
      assertEquals(true, r.isEmpty());
      assertEquals(0, ma.getUsedMemory());
      Object data = new Integer(123456789);
      r.put("key1", data);
      // System.out.println("After put of Integer value off heap used memory=" +
      // ma.getUsedMemory());
      assertTrue(ma.getUsedMemory() == 0);
      assertEquals(data, r.get("key1"));
      r.invalidate("key1");
      assertEquals(0, ma.getUsedMemory());
      r.put("key1", data);
      assertTrue(ma.getUsedMemory() == 0);
      long usedBeforeUpdate = ma.getUsedMemory();
      r.put("key1", data);
      assertEquals(usedBeforeUpdate, ma.getUsedMemory());
      assertEquals(data, r.get("key1"));
      r.destroy("key1");
      assertEquals(0, ma.getUsedMemory());

      data = new Long(0x007FFFFFL);
      r.put("key1", data);
      if (!compressed) {
        assertTrue(ma.getUsedMemory() == 0);
      }
      assertEquals(data, r.get("key1"));
      data = new Long(0xFF8000000L);
      r.put("key1", data);
      if (!compressed) {
        assertTrue(ma.getUsedMemory() == 0);
      }
      assertEquals(data, r.get("key1"));

      // now lets set data to something that will be stored offheap
      data = new Long(Long.MAX_VALUE);
      r.put("key1", data);
      assertEquals(data, r.get("key1"));
      // System.out.println("After put of Integer value off heap used memory=" +
      // ma.getUsedMemory());
      assertTrue(ma.getUsedMemory() > 0);
      data = new Long(Long.MIN_VALUE);
      r.put("key1", data);
      assertEquals(data, r.get("key1"));
      // System.out.println("After put of Integer value off heap used memory=" +
      // ma.getUsedMemory());
      assertTrue(ma.getUsedMemory() > 0);
      r.invalidate("key1");
      assertEquals(0, ma.getUsedMemory());
      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      usedBeforeUpdate = ma.getUsedMemory();
      r.put("key1", data);
      assertEquals(usedBeforeUpdate, ma.getUsedMemory());
      assertEquals(data, r.get("key1"));
      r.destroy("key1");
      assertEquals(0, ma.getUsedMemory());

      // confirm that byte[] do use off heap
      {
        byte[] originalBytes = new byte[1024];
        Object oldV = r.put("byteArray", originalBytes);
        long startUsedMemory = ma.getUsedMemory();
        assertEquals(null, oldV);
        byte[] readBytes = (byte[]) r.get("byteArray");
        if (originalBytes == readBytes) {
          fail("Expected different byte[] identity");
        }
        if (!Arrays.equals(readBytes, originalBytes)) {
          fail("Expected byte array contents to be equal");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());
        oldV = r.put("byteArray", originalBytes);
        if (!compressed) {
          assertEquals(null, oldV); // we default to old value being null for offheap
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());

        readBytes = (byte[]) r.putIfAbsent("byteArray", originalBytes);
        if (originalBytes == readBytes) {
          fail("Expected different byte[] identity");
        }
        if (!Arrays.equals(readBytes, originalBytes)) {
          fail("Expected byte array contents to be equal");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());
        if (!r.replace("byteArray", readBytes, originalBytes)) {
          fail("Expected replace to happen");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());
        byte[] otherBytes = new byte[1024];
        otherBytes[1023] = 1;
        if (r.replace("byteArray", otherBytes, originalBytes)) {
          fail("Expected replace to not happen");
        }
        if (r.replace("byteArray", "bogus string", originalBytes)) {
          fail("Expected replace to not happen");
        }
        if (r.remove("byteArray", "bogus string")) {
          fail("Expected remove to not happen");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());

        if (!r.remove("byteArray", originalBytes)) {
          fail("Expected remove to happen");
        }
        assertEquals(0, ma.getUsedMemory());
        oldV = r.putIfAbsent("byteArray", "string value");
        assertEquals(null, oldV);
        assertEquals("string value", r.get("byteArray"));
        if (r.replace("byteArray", "string valuE", originalBytes)) {
          fail("Expected replace to not happen");
        }
        if (!r.replace("byteArray", "string value", originalBytes)) {
          fail("Expected replace to happen");
        }
        oldV = r.destroy("byteArray"); // we default to old value being null for offheap
        if (!compressed) {
          assertEquals(null, oldV);
        }
        MyCacheListener listener = new MyCacheListener();
        r.getAttributesMutator().addCacheListener(listener);
        try {
          Object valueToReplace = "string value1";
          r.put("byteArray", valueToReplace);
          assertEquals(null, listener.ohOldValue);
          if (!compressed) {
            assertEquals("string value1", listener.ohNewValue.getDeserializedForReading());
            valueToReplace = listener.ohNewValue;
          }
          if (!r.replace("byteArray", valueToReplace, "string value2")) {
            fail("expected replace to happen");
          }
          if (!compressed) {
            assertEquals("string value2", listener.ohNewValue.getDeserializedForReading());
            assertEquals("string value1", listener.ohOldValue.getDeserializedForReading());
          }
          // make sure that a custom equals/hashCode are not used when comparing values.

        } finally {
          r.getAttributesMutator().removeCacheListener(listener);
        }
      }
      assertTrue(ma.getUsedMemory() > 0);
      {
        Object key = "MyValueWithPartialEquals";
        MyValueWithPartialEquals v1 = new MyValueWithPartialEquals("s1");
        MyValueWithPartialEquals v2 = new MyValueWithPartialEquals("s2");
        MyValueWithPartialEquals v3 = new MyValueWithPartialEquals("s1");
        r.put(key, v1);
        try {
          if (r.replace(key, v2, "should not happen")) {
            fail("v1 should not be equal to v2 on an offheap region");
          }
          if (!r.replace(key, v3, "should happen")) {
            fail("v1 should be equal to v3 on an offheap region");
          }
          r.put(key, v1);
          if (r.remove(key, v2)) {
            fail("v1 should not be equal to v2 on an offheap region");
          }
          if (!r.remove(key, v3)) {
            fail("v1 should be equal to v3 on an offheap region");
          }
        } finally {
          r.remove(key);
        }
      }
      {
        Object key = "MyPdxWithPartialEquals";
        MyPdxWithPartialEquals v1 = new MyPdxWithPartialEquals("s", "1");
        MyPdxWithPartialEquals v2 = new MyPdxWithPartialEquals("s", "2");
        MyPdxWithPartialEquals v3 = new MyPdxWithPartialEquals("t", "1");
        r.put(key, v1);
        try {
          if (r.replace(key, v3, "should not happen")) {
            fail("v1 should not be equal to v3 on an offheap region");
          }
          if (!r.replace(key, v2, "should happen")) {
            fail("v1 should be equal to v2 on an offheap region");
          }
          r.put(key, v1);
          if (r.remove(key, v3)) {
            fail("v1 should not be equal to v3 on an offheap region");
          }
          if (!r.remove(key, v2)) {
            fail("v1 should be equal to v2 on an offheap region");
          }
        } finally {
          r.remove(key);
        }
      }
      byte[] value = new byte[1024];
      /*while (value != null) */ {
        r.put("byteArray", value);
      }
      r.remove("byteArray");
      assertEquals(0, ma.getUsedMemory());

      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      r.invalidateRegion();
      assertEquals(0, ma.getUsedMemory());

      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      try {
        r.clear();
        assertEquals(0, ma.getUsedMemory());
      } catch (UnsupportedOperationException ok) {
      }

      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      if (r.getAttributes().getDataPolicy().withPersistence()) {
        r.put("key2", Integer.valueOf(1234567890));
        r.put("key3", new Long(0x007FFFFFL));
        r.put("key4", new Long(0xFF8000000L));
        assertEquals(4, r.size());
        r.close();
        assertEquals(0, ma.getUsedMemory());
        // simple test of recovery
        r = gfc.createRegionFactory(rs).setOffHeap(true).create(rName);
        assertEquals(4, r.size());
        assertEquals(data, r.get("key1"));
        assertEquals(Integer.valueOf(1234567890), r.get("key2"));
        assertEquals(new Long(0x007FFFFFL), r.get("key3"));
        assertEquals(new Long(0xFF8000000L), r.get("key4"));
        closeCache(gfc, true);
        assertEquals(0, ma.getUsedMemory());
        gfc = createCache();
        if (ma != gfc.getOffHeapStore()) {
          fail("identity of offHeapStore changed when cache was recreated");
        }
        r = gfc.createRegionFactory(rs).setOffHeap(true).create(rName);
        assertTrue(ma.getUsedMemory() > 0);
        assertEquals(4, r.size());
        assertEquals(data, r.get("key1"));
        assertEquals(Integer.valueOf(1234567890), r.get("key2"));
        assertEquals(new Long(0x007FFFFFL), r.get("key3"));
        assertEquals(new Long(0xFF8000000L), r.get("key4"));
      }

      r.destroyRegion();
      assertEquals(0, ma.getUsedMemory());

    } finally {
      if (r != null && !r.isDestroyed()) {
        r.destroyRegion();
      }
      closeCache(gfc, false);
    }
  }
 public String getRegionType() {
   return region.getAttributes().getDataPolicy().toString();
 }
 public <K, V> Set<Integer> getLocalBucketSet(Region<K, V> region) {
   if (!region.getAttributes().getDataPolicy().withPartitioning()) {
     return null;
   }
   return this.localBucketSet;
 }
  public Index createIndex(
      String indexName,
      IndexType indexType,
      String indexedExpression,
      String fromClause,
      String imports,
      boolean loadEntries,
      Region region)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index creation on the server is not supported from the client.");
    }
    PartitionedIndex parIndex = null;
    if (region == null) {
      region = getRegionFromPath(imports, fromClause);
    }
    RegionAttributes ra = region.getAttributes();

    // Asif: If the evistion action is Overflow to disk then do not allow index creation
    // It is Ok to have index creation if it is persist only mode as data will always
    // exist in memory
    // if(ra.getEvictionAttributes().getAction().isOverflowToDisk() ) {
    //  throw new
    // UnsupportedOperationException(LocalizedStrings.DefaultQueryService_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_REGIONS_WHICH_OVERFLOW_TO_DISK_THE_REGION_INVOLVED_IS_0.toLocalizedString(regionPath));
    // }
    // if its a pr the create index on all of the local buckets.
    if (((LocalRegion) region).heapThresholdReached.get()
        && !InternalResourceManager.isLowMemoryExceptionDisabled()) {
      LocalRegion lr = (LocalRegion) region;
      throw new LowMemoryException(
          LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_INDEX.toLocalizedString(region.getName()),
          lr.getHeapThresholdReachedMembers());
    }
    if (region instanceof PartitionedRegion) {
      try {
        parIndex =
            (PartitionedIndex)
                ((PartitionedRegion) region)
                    .createIndex(
                        false,
                        indexType,
                        indexName,
                        indexedExpression,
                        fromClause,
                        imports,
                        loadEntries);
      } catch (ForceReattemptException ex) {
        region
            .getCache()
            .getLoggerI18n()
            .info(
                LocalizedStrings
                    .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR,
                ex);
      } catch (IndexCreationException exx) {
        region
            .getCache()
            .getLoggerI18n()
            .info(
                LocalizedStrings
                    .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR,
                exx);
      }
      return parIndex;

    } else {

      IndexManager indexManager = IndexUtils.getIndexManager(region, true);
      Index index =
          indexManager.createIndex(
              indexName,
              indexType,
              indexedExpression,
              fromClause,
              imports,
              null,
              null,
              loadEntries);

      return index;
    }
  }