@Test
 public void testSizeAllocation() {
   // prevent cache from closing in reaction to ooom
   System.setProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY, "true");
   GemFireCacheImpl gfc = createCache();
   try {
     MemoryAllocator ma = gfc.getOffHeapStore();
     assertNotNull(ma);
     final long offHeapSize = ma.getFreeMemory();
     assertEquals(0, ma.getUsedMemory());
     MemoryChunk mc1 = ma.allocate(64, null);
     assertEquals(64 + perObjectOverhead(), ma.getUsedMemory());
     assertEquals(offHeapSize - (64 + perObjectOverhead()), ma.getFreeMemory());
     mc1.release();
     assertEquals(offHeapSize, ma.getFreeMemory());
     assertEquals(0, ma.getUsedMemory());
     // do an allocation larger than the slab size
     // TODO: currently the compact will product slabs bigger than the max slab size
     // (see the todo comment on compact() in SimpleMemoryAllocator).
     // So we request 20m here since that it the total size.
     try {
       ma.allocate(1024 * 1024 * 20, null);
       fail("Expected an out of heap exception");
     } catch (OutOfOffHeapMemoryException expected) {
     }
     assertEquals(0, ma.getUsedMemory());
     assertFalse(gfc.isClosed());
   } finally {
     System.clearProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY);
     closeCache(gfc, false);
   }
 }
 @Test
 public void testByteArrayAllocation() {
   GemFireCacheImpl gfc = createCache();
   try {
     MemoryAllocator ma = gfc.getOffHeapStore();
     assertNotNull(ma);
     final long offHeapSize = ma.getFreeMemory();
     assertEquals(0, ma.getUsedMemory());
     byte[] data = new byte[] {1, 2, 3, 4, 5, 6, 7, 8};
     MemoryChunk mc1 = (MemoryChunk) ma.allocateAndInitialize(data, false, false, null);
     assertEquals(data.length + perObjectOverhead(), ma.getUsedMemory());
     assertEquals(offHeapSize - (data.length + perObjectOverhead()), ma.getFreeMemory());
     byte[] data2 = new byte[data.length];
     mc1.readBytes(0, data2);
     assertTrue(Arrays.equals(data, data2));
     mc1.release();
     assertEquals(offHeapSize, ma.getFreeMemory());
     assertEquals(0, ma.getUsedMemory());
     // try some small byte[] that don't need to be stored off heap.
     data = new byte[] {1, 2, 3, 4, 5, 6, 7};
     StoredObject so1 = ma.allocateAndInitialize(data, false, false, null);
     assertEquals(0, ma.getUsedMemory());
     assertEquals(offHeapSize, ma.getFreeMemory());
     data2 = new byte[data.length];
     data2 = (byte[]) so1.getDeserializedForReading();
     assertTrue(Arrays.equals(data, data2));
   } finally {
     closeCache(gfc, false);
   }
 }
  @Override
  public void cmdExecute(Message msg, ServerConnection servConn, long start)
      throws IOException, ClassNotFoundException {
    servConn.setAsTrue(REQUIRES_RESPONSE);
    if (logger.fineEnabled()) {
      logger.fine(
          servConn.getName()
              + ": Received get pdx id for enum request ("
              + msg.getNumberOfParts()
              + " parts) from "
              + servConn.getSocketString());
    }
    int noOfParts = msg.getNumberOfParts();

    EnumInfo enumInfo = (EnumInfo) msg.getPart(0).getObject();
    int enumId = msg.getPart(1).getInt();

    try {
      GemFireCacheImpl cache = (GemFireCacheImpl) servConn.getCache();
      TypeRegistry registry = cache.getPdxRegistry();
      registry.addRemoteEnum(enumId, enumInfo);
    } catch (Exception e) {
      writeException(msg, e, false, servConn);
      servConn.setAsTrue(RESPONDED);
      return;
    }

    writeReply(msg, servConn);
    servConn.setAsTrue(RESPONDED);
  }
 /**
  * Constructs a new <code>CacheClosedException</code> with a message string.
  *
  * @param msg a message string
  */
 public CacheClosedException(String msg) {
   super(msg);
   // bug #43108 - CacheClosedException should include cause of closure
   GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
   if (cache != null) {
     initCause(cache.getDisconnectCause());
   }
 }
  public void begin() throws ResourceException {
    if (DEBUG) {
      try {
        throw new NullPointerException("Asif:JCALocalTransaction:begin");
      } catch (NullPointerException npe) {
        npe.printStackTrace();
      }
    }
    try {
      if (!initDone || this.cache.isClosed()) {
        this.init();
      }
      // System.out.println("JCALocalTransaction:Asif: cache is ="+cache +
      // " for tx ="+this);
      LogWriter logger = cache.getLogger();
      if (logger.fineEnabled()) {
        logger.fine("JCALocalTransaction::begin:");
      }
      TransactionManager tm = cache.getJTATransactionManager();
      if (this.tid != null) {
        throw new LocalTransactionException(" A transaction is already in progress");
      }
      if (tm != null && tm.getTransaction() != null) {
        if (logger.fineEnabled()) {
          logger.fine("JCAManagedConnection: JTA transaction is on");
        }
        // This is having a JTA transaction. Assuming ignore jta flag is true,
        // explicitly being a gemfire transaction.
        TXStateProxy tsp = this.gfTxMgr.getTXState();
        if (tsp == null) {
          this.gfTxMgr.begin();
          tsp = this.gfTxMgr.getTXState();
          tsp.setJCATransaction();
          this.tid = tsp.getTransactionId();
          if (logger.fineEnabled()) {
            logger.fine("JCALocalTransaction:begun GFE transaction");
          }
        } else {
          throw new LocalTransactionException("GemFire is already associated with a transaction");
        }
      } else {
        if (logger.fineEnabled()) {
          logger.fine("JCAManagedConnection: JTA Transaction does not exist.");
        }
      }
    } catch (SystemException e) {
      // this.onError();
      throw new ResourceException(e);
    }
    // Not to be invoked for local transactions managed by the container
    // Iterator<ConnectionEventListener> itr = this.listeners.iterator();
    // ConnectionEvent ce = new ConnectionEvent(this,
    // ConnectionEvent.LOCAL_TRANSACTION_STARTED);
    // while (itr.hasNext()) {
    // itr.next().localTransactionStarted(ce);
    // }

  }
  private void dataPersistenceOfPR(String partitionClause) throws Exception {
    Properties props = new Properties();
    Connection conn = TestUtil.getConnection(props);
    char fileSeparator = System.getProperty("file.separator").charAt(0);
    GemFireCacheImpl cache = Misc.getGemFireCache();
    Statement stmt = conn.createStatement();
    if (cache.findDiskStore("TestPersistenceDiskStore") == null) {

      String path = "." + fileSeparator + "test_dir";
      File file = new File(path);
      if (!file.mkdirs() && !file.isDirectory()) {
        throw new DiskAccessException(
            "Could not create directory for " + " default disk store : " + file.getAbsolutePath(),
            (Region) null);
      }
      try {
        Connection conn1;
        conn1 = TestUtil.getConnection();
        Statement stmt1 = conn1.createStatement();
        stmt1.execute("Create DiskStore " + "TestPersistenceDiskStore" + "'" + path + "'");
        conn1.close();
      } catch (SQLException e) {
        throw GemFireXDRuntimeException.newRuntimeException(null, e);
      }
    }

    stmt.execute("create schema trade");
    stmt.execute(
        "create table trade.customers (cid int not null, cust_name varchar(100), tid int, "
            + "primary key (cid))   "
            + partitionClause
            + "  PERSISTENT "
            + "'"
            + "TestPersistenceDiskStore"
            + "'");
    PreparedStatement ps = conn.prepareStatement("insert into trade.customers values (?,?,?)");
    for (int i = 1; i < 31; ++i) {
      ps.setInt(1, i);
      ps.setString(2, "name" + i);
      ps.setInt(3, i);
      ps.executeUpdate();
    }

    conn.close();
    shutDown();

    conn = TestUtil.getConnection();
    stmt = conn.createStatement();

    ResultSet rs = stmt.executeQuery("select * from trade.customers");
    int expected = 465;
    int actual = 0;
    while (rs.next()) {
      int val = rs.getInt(1);
      actual += val;
    }
    assertEquals(expected, actual);
  }
 /**
  * Gets an arbitrary open instance of {@link Cache} produced by an earlier call to {@link
  * #create()}.
  *
  * @throws CacheClosedException if a cache has not been created or the only created one is {@link
  *     Cache#isClosed closed}
  */
 public static synchronized Cache getAnyInstance() {
   GemFireCacheImpl instance = GemFireCacheImpl.getInstance();
   if (instance == null) {
     throw new CacheClosedException(
         LocalizedStrings.CacheFactory_A_CACHE_HAS_NOT_YET_BEEN_CREATED.toLocalizedString());
   } else {
     instance.getCancelCriterion().checkCancelInProgress(null);
     return instance;
   }
 }
 @Override
 public void publish(LogRecord record) {
   GemFireCacheImpl gemFireCache = GemFireCacheImpl.getInstance();
   if (gemFireCache != null) {
     Logger rootLogger = Logger.getLogger("");
     rootLogger.removeHandler(this);
     Handler gemfireHandler = gemFireCache.getLogger().getHandler();
     rootLogger.addHandler(gemfireHandler);
     gemfireHandler.publish(record);
   } else {
     super.publish(record);
   }
 }
  @Override
  public void execute(FunctionContext context) {
    InternalLocator locator = InternalLocator.getLocator();
    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
    DistributedMember member = cache.getDistributedSystem().getDistributedMember();
    SharedConfigurationStatus status = locator.getSharedConfigurationStatus().getStatus();

    String memberId = member.getName();
    if (StringUtils.isBlank(memberId)) {
      memberId = member.getId();
    }

    CliFunctionResult result = new CliFunctionResult(memberId, new String[] {status.name()});
    context.getResultSender().lastResult(result);
  }
 @Override
 public void processIncoming(
     DistributionManager dm,
     String adviseePath,
     boolean removeProfile,
     boolean exchangeProfiles,
     final List<Profile> replyProfiles) {
   final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
   if (cache != null && !cache.isClosed()) {
     handleDistributionAdvisee(
         cache.getJmxManagerAdvisor().getAdvisee(),
         removeProfile,
         exchangeProfiles,
         replyProfiles);
   }
 }
  public void commit() throws ResourceException {
    if (DEBUG) {
      try {
        throw new NullPointerException("Asif:JCALocalTransaction:commit");
      } catch (NullPointerException npe) {
        npe.printStackTrace();
      }
    }
    LogWriter logger = cache.getLogger();
    if (logger.fineEnabled()) {
      logger.fine("JCALocalTransaction:invoked commit");
    }
    TXStateProxy tsp = this.gfTxMgr.getTXState();
    if (tsp != null && this.tid != tsp.getTransactionId()) {
      throw new IllegalStateException(
          "Local Transaction associated with Tid = "
              + this.tid
              + " attempting to commit a different transaction");
    }
    try {
      this.gfTxMgr.commit();
      this.tid = null;
    } catch (Exception e) {
      throw new LocalTransactionException(e.toString());
    }
    // Iterator<ConnectionEventListener> itr = this.listeners.iterator();
    // ConnectionEvent ce = new
    // ConnectionEvent(this,ConnectionEvent.LOCAL_TRANSACTION_COMMITTED);
    // while( itr.hasNext()) {
    // itr.next().localTransactionCommitted(ce);
    // }

  }
 public NewLRUClockHand(
     Object region, EnableLRU ccHelper, InternalRegionArguments internalRegionArgs) {
   setBucketRegion(region);
   this.lock = new HeadLock();
   // behavior relies on a single evicted node in the pipe when the pipe is empty.
   initHeadAndTail();
   if (this.bucketRegion != null) {
     this.stats =
         internalRegionArgs.getPartitionedRegion() != null
             ? internalRegionArgs.getPartitionedRegion().getEvictionController().stats
             : null;
   } else {
     LRUStatistics tmp = null;
     if (region instanceof PlaceHolderDiskRegion) {
       tmp = ((PlaceHolderDiskRegion) region).getPRLRUStats();
     } else if (region instanceof PartitionedRegion) {
       tmp = ((PartitionedRegion) region).getPRLRUStatsDuringInitialization(); // bug 41938
       PartitionedRegion pr = (PartitionedRegion) region;
       if (tmp != null) {
         pr.getEvictionController().stats = tmp;
       }
     }
     if (tmp == null) {
       StatisticsFactory sf = GemFireCacheImpl.getExisting("").getDistributedSystem();
       tmp = ccHelper.initStats(region, sf);
     }
     this.stats = tmp;
   }
 }
 private void closeCache(GemFireCacheImpl gfc, boolean keepOffHeapAllocated) {
   gfc.close();
   if (!keepOffHeapAllocated) {
     SimpleMemoryAllocatorImpl.freeOffHeapMemory();
   }
   // TODO cleanup default disk store files
 }
 /* (non-Javadoc)
  * @see com.gemstone.gemfire.distributed.internal.DistributionMessage#process(com.gemstone.gemfire.distributed.internal.DistributionManager)
  */
 @Override
 protected void process(DistributionManager dm) {
   Throwable thr = null;
   JmxManagerProfile p = null;
   try {
     final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
     if (cache != null && !cache.isClosed()) {
       final JmxManagerAdvisor adv = cache.getJmxManagerAdvisor();
       p = this.profile;
       if (p != null) {
         adv.putProfile(p);
       }
     } else {
       if (logger.isDebugEnabled()) {
         logger.debug("No cache {}", this);
       }
     }
   } catch (CancelException e) {
     if (logger.isDebugEnabled()) {
       logger.debug("Cache closed, ", this);
     }
   } catch (VirtualMachineError err) {
     SystemFailure.initiateFailure(err);
     // If this ever returns, rethrow the error.  We're poisoned
     // now, so don't let this thread continue.
     throw err;
   } catch (Throwable t) {
     // Whenever you catch Error or Throwable, you must also
     // catch VirtualMachineError (see above).  However, there is
     // _still_ a possibility that you are dealing with a cascading
     // error condition, so you also need to check to see if the JVM
     // is still usable:
     SystemFailure.checkFailure();
     thr = t;
   } finally {
     if (thr != null) {
       dm.getCancelCriterion().checkCancelInProgress(null);
       logger.info(
           LocalizedMessage.create(
               LocalizedStrings.ResourceAdvisor_MEMBER_CAUGHT_EXCEPTION_PROCESSING_PROFILE,
               new Object[] {p, toString()},
               thr));
     }
   }
 }
 private void init() throws SystemException {
   this.cache = (GemFireCacheImpl) CacheFactory.getAnyInstance();
   LogWriter logger = this.cache.getLogger();
   if (logger.fineEnabled()) {
     logger.fine("JCAManagedConnection:init. Inside init");
   }
   gfTxMgr = cache.getTxManager();
   this.initDone = true;
 }
 private static Cache basicGetInstancePart2(DistributedSystem system, boolean closeOk) {
   GemFireCacheImpl instance = GemFireCacheImpl.getInstance();
   if (instance == null) {
     throw new CacheClosedException(
         LocalizedStrings.CacheFactory_A_CACHE_HAS_NOT_YET_BEEN_CREATED.toLocalizedString());
   } else {
     if (instance.isClosed() && !closeOk) {
       throw instance.getCacheClosedException(
           LocalizedStrings.CacheFactory_THE_CACHE_HAS_BEEN_CLOSED.toLocalizedString(), null);
     }
     if (!instance.getDistributedSystem().equals(system)) {
       throw new CacheClosedException(
           LocalizedStrings
               .CacheFactory_A_CACHE_HAS_NOT_YET_BEEN_CREATED_FOR_THE_GIVEN_DISTRIBUTED_SYSTEM
               .toLocalizedString());
     }
     return instance;
   }
 }
  public void keep_testOutOfOffHeapMemoryErrorClosesCache() {
    // this test is redundant but may be useful
    final GemFireCacheImpl gfc = createCache();
    try {
      MemoryAllocator ma = gfc.getOffHeapStore();
      assertNotNull(ma);
      final long offHeapSize = ma.getFreeMemory();
      assertEquals(0, ma.getUsedMemory());
      MemoryChunk mc1 = ma.allocate(64, null);
      assertEquals(64 + perObjectOverhead(), ma.getUsedMemory());
      assertEquals(offHeapSize - (64 + perObjectOverhead()), ma.getFreeMemory());
      mc1.release();
      assertEquals(offHeapSize, ma.getFreeMemory());
      assertEquals(0, ma.getUsedMemory());
      // do an allocation larger than the slab size
      try {
        ma.allocate(1024 * 1024 * 10, null);
        fail("Expected an out of heap exception");
      } catch (OutOfOffHeapMemoryException expected) {
        // passed
      }
      assertEquals(0, ma.getUsedMemory());

      final WaitCriterion waitForDisconnect =
          new WaitCriterion() {
            public boolean done() {
              return gfc.isClosed();
            }

            public String description() {
              return "Waiting for disconnect to complete";
            }
          };
      dunit.DistributedTestCase.waitForCriterion(waitForDisconnect, 10 * 1000, 100, true);

      assertTrue(gfc.isClosed());
    } finally {
      closeCache(gfc, false);
    }
  }
 /** Flush any pending writes. */
 @Override
 protected synchronized PdxReaderImpl getUnmodifiableReader() {
   if (this.dirtyFields != null) {
     PdxOutputStream os = new PdxOutputStream(basicSize() + PdxWriterImpl.HEADER_SIZE);
     PdxWriterImpl writer;
     if (getPdxType().getHasDeletedField()) {
       // Need a new type that does not have the deleted field
       PdxType pt = new PdxType(getPdxType().getClassName(), !getPdxType().getNoDomainClass());
       GemFireCacheImpl gfc =
           GemFireCacheImpl.getForPdx(
               "PDX registry is unavailable because the Cache has been closed.");
       TypeRegistry tr = gfc.getPdxRegistry();
       writer = new PdxWriterImpl(pt, tr, os);
     } else {
       writer = new PdxWriterImpl(getPdxType(), os);
     }
     for (PdxField f : getPdxType().getFields()) {
       if (f.isDeleted()) {
         continue;
       }
       Object dv = this.dirtyFields[f.getFieldIndex()];
       if (dv != null) {
         if (dv == NULL_TOKEN) {
           dv = null;
         }
         writer.writeField(f, dv);
       } else {
         writer.writeRawField(f, getRaw(f));
       }
     }
     writer.completeByteStreamGeneration();
     ByteBuffer bb = os.toByteBuffer();
     bb.position(PdxWriterImpl.HEADER_SIZE);
     basicSetBuffer(bb.slice());
     this.dirtyFields = null;
   }
   return new PdxReaderImpl(this);
 }
  private static synchronized Cache create(
      DistributedSystem system, boolean existingOk, CacheConfig cacheConfig)
      throws CacheExistsException, TimeoutException, CacheWriterException, GatewayException,
          RegionExistsException {
    GemFireCacheImpl instance = GemFireCacheImpl.getInstance();

    if (instance != null && !instance.isClosed()) {
      if (existingOk) {
        // Check if cache configuration matches.
        cacheConfig.validateCacheConfig(instance);

        return instance;
      } else {
        // instance.creationStack argument is for debugging...
        throw new CacheExistsException(
            instance,
            LocalizedStrings.CacheFactory_0_AN_OPEN_CACHE_ALREADY_EXISTS.toLocalizedString(
                instance),
            instance.creationStack);
      }
    }
    return GemFireCacheImpl.create(system, cacheConfig);
  }
Beispiel #20
0
  @SuppressWarnings({"rawtypes", "unchecked"})
  protected void initializeRegion() {
    String regionName = DQueue.NAME_PREFIX + this.id;
    Region region = this.cache.getRegion(regionName);
    boolean createdRegion = false;

    // Create the dq region if necessary
    if (region == null) {
      GemFireCacheImpl gfci = (GemFireCacheImpl) this.cache;
      if (gfci.isClient()) {
        // can't use the createRegion method if this cache is a client-cache
        ClientRegionFactory factory = gfci.createClientRegionFactory(ClientRegionShortcut.PROXY);
        factory.setPoolName(this.dqAttributes.getPoolName());
        region = factory.create(regionName);
      } else {
        AttributesFactory factory = new AttributesFactory();
        factory.setDataPolicy(DataPolicy.EMPTY);
        factory.setScope(Scope.LOCAL);
        factory.setPoolName(this.dqAttributes.getPoolName());
        region = this.dqRegion = this.cache.createRegion(regionName, factory.create());
        createdRegion = true;
      }
    }

    this.dqRegion = region;

    if (this.cache.getLogger().fineEnabled()) {
      this.cache
          .getLogger()
          .fine(
              this
                  + ": "
                  + (createdRegion ? "Created" : "Retrieved")
                  + " DQueue region: "
                  + this.dqRegion.getFullPath());
    }
  }
 private HDFSGatewayEventImpl getNewEvent(
     Object key, Object value, Region r1, int bid, int tailKey) throws Exception {
   EntryEventImpl ev1 =
       EntryEventImpl.create(
           (LocalRegion) r1,
           Operation.CREATE,
           key,
           value,
           null,
           false,
           (DistributedMember) c.getMyId());
   ev1.setEventId(new EventID(this.c.getDistributedSystem()));
   HDFSGatewayEventImpl event = null;
   event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, ev1, null, true, bid);
   event.setShadowKey((long) tailKey);
   return event;
 }
  public void rollback() throws ResourceException {
    if (DEBUG) {
      try {
        throw new NullPointerException("Asif:JJCALocalTransaction:rollback");
      } catch (NullPointerException npe) {
        npe.printStackTrace();
      }
    }
    TXStateProxy tsp = this.gfTxMgr.getTXState();
    if (tsp != null && this.tid != tsp.getTransactionId()) {
      throw new IllegalStateException(
          "Local Transaction associated with Tid = "
              + this.tid
              + " attempting to commit a different transaction");
    }
    LogWriter logger = cache.getLogger();
    if (logger.fineEnabled()) {
      logger.fine("JCALocalTransaction:invoked rollback");
    }
    try {
      this.gfTxMgr.rollback();
    } catch (IllegalStateException ise) {
      // It is possible that the GFE transaction has already been rolled back.
      if (ise.getMessage()
          .equals(
              LocalizedStrings.TXManagerImpl_THREAD_DOES_NOT_HAVE_AN_ACTIVE_TRANSACTION
                  .toLocalizedString())) {
        // /ignore;
      } else {
        throw new ResourceException(ise);
      }
    } catch (Exception e) {
      throw new ResourceException(e);
    } finally {
      this.tid = null;
    }
    // Iterator<ConnectionEventListener> itr = this.listeners.iterator();
    // ConnectionEvent ce = new ConnectionEvent(this,
    // ConnectionEvent.LOCAL_TRANSACTION_ROLLEDBACK);
    // while (itr.hasNext()) {
    // itr.next().localTransactionRolledback(ce);
    // }

  }
  protected RegionMBeanBridge(Region<K, V> region) {
    this.region = region;
    this.regAttrs = region.getAttributes();

    this.isStatisticsEnabled = regAttrs.getStatisticsEnabled();

    this.regionAttributesData = RegionMBeanCompositeDataFactory.getRegionAttributesData(regAttrs);
    this.membershipAttributesData =
        RegionMBeanCompositeDataFactory.getMembershipAttributesData(regAttrs);
    this.evictionAttributesData =
        RegionMBeanCompositeDataFactory.getEvictionAttributesData(regAttrs);

    this.regionMonitor =
        new MBeanStatsMonitor(ManagementStrings.REGION_MONITOR.toLocalizedString());

    configureRegionMetrics();

    this.persistentEnabled = region.getAttributes().getDataPolicy().withPersistence();

    this.regionStats = ((LocalRegion) region).getRegionPerfStats();
    if (regionStats != null) {
      regionMonitor.addStatisticsToMonitor(regionStats.getStats()); // fixes 46692
    }

    LocalRegion l = (LocalRegion) region;
    if (l.getEvictionController() != null) {
      LRUStatistics stats = l.getEvictionController().getLRUHelper().getStats();
      if (stats != null) {
        regionMonitor.addStatisticsToMonitor(stats.getStats());
        EvictionAttributes ea = region.getAttributes().getEvictionAttributes();
        if (ea != null && ea.getAlgorithm().isLRUMemory()) {
          this.lruMemoryStats = stats;
        }
      }
    }

    if (regAttrs.getGatewaySenderIds() != null && regAttrs.getGatewaySenderIds().size() > 0) {
      this.isGatewayEnabled = true;
    }

    this.member = GemFireCacheImpl.getInstance().getDistributedSystem().getMemberId();
  }
  public void testHopQueueWithOneBucket() throws Exception {
    this.c.close();
    this.c = createCache();
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    PartitionedRegion r1 =
        (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
    r1.put("K9", "x1");
    r1.put("K8", "x2");
    // hack to get the queue.
    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue hdfsBQ =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    EntryEventImpl ev1 =
        EntryEventImpl.create(
            (LocalRegion) r1,
            Operation.CREATE,
            (Object) "K1",
            (Object) "V1",
            null,
            false,
            (DistributedMember) c.getMyId());
    // put some keys with multiple updates.
    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2));
    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8));
    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7));
    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3));
    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6));
    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9));

    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);

    // peek a key. it should be the lowesy
    Object[] l = hopqueue.peek(1, 0).toArray();

    assertTrue(
        "First key should be K1 but is " + ((HDFSGatewayEventImpl) l[0]).getKey(),
        ((HDFSGatewayEventImpl) l[0]).getKey().equals("K1"));
    assertTrue(
        " Peeked skip list size should be  0 ",
        getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
    assertTrue(
        " skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);

    // try to fetch the key. it would be in peeked skip list but still available
    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
    assertTrue("First key should be K1", ((HDFSGatewayEventImpl) o).getKey().equals("K1"));

    assertTrue(
        " skip lists size should be  6",
        (getSortedEventQueue(hdfsBQ).getPeeked().size()
                + getSortedEventQueue(hdfsBQ).currentSkipList.size())
            == 6);

    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
    Object v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K2 with value V2a but the value was " + v, ((String) v).equals("V2a"));

    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
    v = ((HDFSGatewayEventImpl) o).getDeserializedValue();
    assertTrue(" key should K3 with value V3b but the value was " + v, ((String) v).equals("V3b"));
  }
  @SuppressWarnings({"rawtypes", "unchecked"})
  private void doRegionTest(final RegionShortcut rs, final String rName, boolean compressed) {
    boolean isPersistent =
        rs == RegionShortcut.LOCAL_PERSISTENT
            || rs == RegionShortcut.REPLICATE_PERSISTENT
            || rs == RegionShortcut.PARTITION_PERSISTENT;
    GemFireCacheImpl gfc = createCache(isPersistent);
    Region r = null;
    try {
      gfc.setCopyOnRead(true);
      final MemoryAllocator ma = gfc.getOffHeapStore();
      assertNotNull(ma);
      assertEquals(0, ma.getUsedMemory());
      Compressor compressor = null;
      if (compressed) {
        compressor = SnappyCompressor.getDefaultInstance();
      }
      r = gfc.createRegionFactory(rs).setOffHeap(true).setCompressor(compressor).create(rName);
      assertEquals(true, r.isEmpty());
      assertEquals(0, ma.getUsedMemory());
      Object data = new Integer(123456789);
      r.put("key1", data);
      // System.out.println("After put of Integer value off heap used memory=" +
      // ma.getUsedMemory());
      assertTrue(ma.getUsedMemory() == 0);
      assertEquals(data, r.get("key1"));
      r.invalidate("key1");
      assertEquals(0, ma.getUsedMemory());
      r.put("key1", data);
      assertTrue(ma.getUsedMemory() == 0);
      long usedBeforeUpdate = ma.getUsedMemory();
      r.put("key1", data);
      assertEquals(usedBeforeUpdate, ma.getUsedMemory());
      assertEquals(data, r.get("key1"));
      r.destroy("key1");
      assertEquals(0, ma.getUsedMemory());

      data = new Long(0x007FFFFFL);
      r.put("key1", data);
      if (!compressed) {
        assertTrue(ma.getUsedMemory() == 0);
      }
      assertEquals(data, r.get("key1"));
      data = new Long(0xFF8000000L);
      r.put("key1", data);
      if (!compressed) {
        assertTrue(ma.getUsedMemory() == 0);
      }
      assertEquals(data, r.get("key1"));

      // now lets set data to something that will be stored offheap
      data = new Long(Long.MAX_VALUE);
      r.put("key1", data);
      assertEquals(data, r.get("key1"));
      // System.out.println("After put of Integer value off heap used memory=" +
      // ma.getUsedMemory());
      assertTrue(ma.getUsedMemory() > 0);
      data = new Long(Long.MIN_VALUE);
      r.put("key1", data);
      assertEquals(data, r.get("key1"));
      // System.out.println("After put of Integer value off heap used memory=" +
      // ma.getUsedMemory());
      assertTrue(ma.getUsedMemory() > 0);
      r.invalidate("key1");
      assertEquals(0, ma.getUsedMemory());
      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      usedBeforeUpdate = ma.getUsedMemory();
      r.put("key1", data);
      assertEquals(usedBeforeUpdate, ma.getUsedMemory());
      assertEquals(data, r.get("key1"));
      r.destroy("key1");
      assertEquals(0, ma.getUsedMemory());

      // confirm that byte[] do use off heap
      {
        byte[] originalBytes = new byte[1024];
        Object oldV = r.put("byteArray", originalBytes);
        long startUsedMemory = ma.getUsedMemory();
        assertEquals(null, oldV);
        byte[] readBytes = (byte[]) r.get("byteArray");
        if (originalBytes == readBytes) {
          fail("Expected different byte[] identity");
        }
        if (!Arrays.equals(readBytes, originalBytes)) {
          fail("Expected byte array contents to be equal");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());
        oldV = r.put("byteArray", originalBytes);
        if (!compressed) {
          assertEquals(null, oldV); // we default to old value being null for offheap
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());

        readBytes = (byte[]) r.putIfAbsent("byteArray", originalBytes);
        if (originalBytes == readBytes) {
          fail("Expected different byte[] identity");
        }
        if (!Arrays.equals(readBytes, originalBytes)) {
          fail("Expected byte array contents to be equal");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());
        if (!r.replace("byteArray", readBytes, originalBytes)) {
          fail("Expected replace to happen");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());
        byte[] otherBytes = new byte[1024];
        otherBytes[1023] = 1;
        if (r.replace("byteArray", otherBytes, originalBytes)) {
          fail("Expected replace to not happen");
        }
        if (r.replace("byteArray", "bogus string", originalBytes)) {
          fail("Expected replace to not happen");
        }
        if (r.remove("byteArray", "bogus string")) {
          fail("Expected remove to not happen");
        }
        assertEquals(startUsedMemory, ma.getUsedMemory());

        if (!r.remove("byteArray", originalBytes)) {
          fail("Expected remove to happen");
        }
        assertEquals(0, ma.getUsedMemory());
        oldV = r.putIfAbsent("byteArray", "string value");
        assertEquals(null, oldV);
        assertEquals("string value", r.get("byteArray"));
        if (r.replace("byteArray", "string valuE", originalBytes)) {
          fail("Expected replace to not happen");
        }
        if (!r.replace("byteArray", "string value", originalBytes)) {
          fail("Expected replace to happen");
        }
        oldV = r.destroy("byteArray"); // we default to old value being null for offheap
        if (!compressed) {
          assertEquals(null, oldV);
        }
        MyCacheListener listener = new MyCacheListener();
        r.getAttributesMutator().addCacheListener(listener);
        try {
          Object valueToReplace = "string value1";
          r.put("byteArray", valueToReplace);
          assertEquals(null, listener.ohOldValue);
          if (!compressed) {
            assertEquals("string value1", listener.ohNewValue.getDeserializedForReading());
            valueToReplace = listener.ohNewValue;
          }
          if (!r.replace("byteArray", valueToReplace, "string value2")) {
            fail("expected replace to happen");
          }
          if (!compressed) {
            assertEquals("string value2", listener.ohNewValue.getDeserializedForReading());
            assertEquals("string value1", listener.ohOldValue.getDeserializedForReading());
          }
          // make sure that a custom equals/hashCode are not used when comparing values.

        } finally {
          r.getAttributesMutator().removeCacheListener(listener);
        }
      }
      assertTrue(ma.getUsedMemory() > 0);
      {
        Object key = "MyValueWithPartialEquals";
        MyValueWithPartialEquals v1 = new MyValueWithPartialEquals("s1");
        MyValueWithPartialEquals v2 = new MyValueWithPartialEquals("s2");
        MyValueWithPartialEquals v3 = new MyValueWithPartialEquals("s1");
        r.put(key, v1);
        try {
          if (r.replace(key, v2, "should not happen")) {
            fail("v1 should not be equal to v2 on an offheap region");
          }
          if (!r.replace(key, v3, "should happen")) {
            fail("v1 should be equal to v3 on an offheap region");
          }
          r.put(key, v1);
          if (r.remove(key, v2)) {
            fail("v1 should not be equal to v2 on an offheap region");
          }
          if (!r.remove(key, v3)) {
            fail("v1 should be equal to v3 on an offheap region");
          }
        } finally {
          r.remove(key);
        }
      }
      {
        Object key = "MyPdxWithPartialEquals";
        MyPdxWithPartialEquals v1 = new MyPdxWithPartialEquals("s", "1");
        MyPdxWithPartialEquals v2 = new MyPdxWithPartialEquals("s", "2");
        MyPdxWithPartialEquals v3 = new MyPdxWithPartialEquals("t", "1");
        r.put(key, v1);
        try {
          if (r.replace(key, v3, "should not happen")) {
            fail("v1 should not be equal to v3 on an offheap region");
          }
          if (!r.replace(key, v2, "should happen")) {
            fail("v1 should be equal to v2 on an offheap region");
          }
          r.put(key, v1);
          if (r.remove(key, v3)) {
            fail("v1 should not be equal to v3 on an offheap region");
          }
          if (!r.remove(key, v2)) {
            fail("v1 should be equal to v2 on an offheap region");
          }
        } finally {
          r.remove(key);
        }
      }
      byte[] value = new byte[1024];
      /*while (value != null) */ {
        r.put("byteArray", value);
      }
      r.remove("byteArray");
      assertEquals(0, ma.getUsedMemory());

      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      r.invalidateRegion();
      assertEquals(0, ma.getUsedMemory());

      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      try {
        r.clear();
        assertEquals(0, ma.getUsedMemory());
      } catch (UnsupportedOperationException ok) {
      }

      r.put("key1", data);
      assertTrue(ma.getUsedMemory() > 0);
      if (r.getAttributes().getDataPolicy().withPersistence()) {
        r.put("key2", Integer.valueOf(1234567890));
        r.put("key3", new Long(0x007FFFFFL));
        r.put("key4", new Long(0xFF8000000L));
        assertEquals(4, r.size());
        r.close();
        assertEquals(0, ma.getUsedMemory());
        // simple test of recovery
        r = gfc.createRegionFactory(rs).setOffHeap(true).create(rName);
        assertEquals(4, r.size());
        assertEquals(data, r.get("key1"));
        assertEquals(Integer.valueOf(1234567890), r.get("key2"));
        assertEquals(new Long(0x007FFFFFL), r.get("key3"));
        assertEquals(new Long(0xFF8000000L), r.get("key4"));
        closeCache(gfc, true);
        assertEquals(0, ma.getUsedMemory());
        gfc = createCache();
        if (ma != gfc.getOffHeapStore()) {
          fail("identity of offHeapStore changed when cache was recreated");
        }
        r = gfc.createRegionFactory(rs).setOffHeap(true).create(rName);
        assertTrue(ma.getUsedMemory() > 0);
        assertEquals(4, r.size());
        assertEquals(data, r.get("key1"));
        assertEquals(Integer.valueOf(1234567890), r.get("key2"));
        assertEquals(new Long(0x007FFFFFL), r.get("key3"));
        assertEquals(new Long(0xFF8000000L), r.get("key4"));
      }

      r.destroyRegion();
      assertEquals(0, ma.getUsedMemory());

    } finally {
      if (r != null && !r.isDestroyed()) {
        r.destroyRegion();
      }
      closeCache(gfc, false);
    }
  }
  /** Creates a <code>BridgeServerResponse</code> in response to the given request. */
  static BridgeServerResponse create(DistributionManager dm, BridgeServerRequest request) {
    BridgeServerResponse m = new BridgeServerResponse();
    m.setRecipient(request.getSender());

    try {
      GemFireCacheImpl cache = (GemFireCacheImpl) CacheFactory.getInstanceCloseOk(dm.getSystem());

      if (request.getCacheId() != System.identityHashCode(cache)) {
        m.bridgeInfo = null;

      } else {
        int operation = request.getOperation();
        switch (operation) {
          case BridgeServerRequest.ADD_OPERATION:
            {
              BridgeServerImpl bridge = (BridgeServerImpl) cache.addBridgeServer();
              m.bridgeInfo = new RemoteBridgeServer(bridge);
              break;
            }

          case BridgeServerRequest.INFO_OPERATION:
            {
              int id = request.getBridgeId();
              // Note that since this is only an informational request
              // it is not necessary to synchronize on allBridgeServersLock
              for (Iterator iter = cache.getBridgeServers().iterator(); iter.hasNext(); ) {
                BridgeServerImpl bridge = (BridgeServerImpl) iter.next();
                if (System.identityHashCode(bridge) == id) {
                  m.bridgeInfo = new RemoteBridgeServer(bridge);
                  break;

                } else {
                  m.bridgeInfo = null;
                }
              }
              break;
            }

          case BridgeServerRequest.START_OPERATION:
            {
              RemoteBridgeServer config = request.getBridgeInfo();
              for (Iterator iter = cache.getBridgeServers().iterator(); iter.hasNext(); ) {
                BridgeServerImpl bridge = (BridgeServerImpl) iter.next();
                if (System.identityHashCode(bridge) == config.getId()) {
                  bridge.configureFrom(config);
                  bridge.start();
                  m.bridgeInfo = new RemoteBridgeServer(bridge);
                  break;

                } else {
                  m.bridgeInfo = null;
                }
              }
              break;
            }

          case BridgeServerRequest.STOP_OPERATION:
            {
              RemoteBridgeServer config = request.getBridgeInfo();
              for (Iterator iter = cache.getBridgeServers().iterator(); iter.hasNext(); ) {
                BridgeServerImpl bridge = (BridgeServerImpl) iter.next();
                if (System.identityHashCode(bridge) == config.getId()) {
                  bridge.stop();
                  m.bridgeInfo = new RemoteBridgeServer(bridge);
                  break;

                } else {
                  m.bridgeInfo = null;
                }
              }
              break;
            }

          default:
            Assert.assertTrue(false, "Unknown bridge server operation: " + operation);
        }
      }

    } catch (CancelException ex) {
      m.bridgeInfo = null;

    } catch (Exception ex) {
      m.exception = ex;
      m.bridgeInfo = null;
    }
    return m;
  }
  private void importOnMember(File snapshot, SnapshotFormat format, SnapshotOptions<K, V> options)
      throws IOException, ClassNotFoundException {
    final LocalRegion local = getLocalRegion(region);

    if (getLoggerI18n().infoEnabled())
      getLoggerI18n().info(LocalizedStrings.Snapshot_IMPORT_BEGIN_0, region.getName());

    long count = 0;
    long bytes = 0;
    long start = CachePerfStats.getStatTime();

    // Would be interesting to use a PriorityQueue ordered on isDone()
    // but this is probably close enough in practice.
    LinkedList<Future<?>> puts = new LinkedList<Future<?>>();
    GFSnapshotImporter in = new GFSnapshotImporter(snapshot);

    try {
      int bufferSize = 0;
      Map<K, V> buffer = new HashMap<K, V>();

      SnapshotRecord record;
      while ((record = in.readSnapshotRecord()) != null) {
        bytes += record.getSize();
        K key = record.getKeyObject();

        // Until we modify the semantics of put/putAll to allow null values we
        // have to subvert the API by using Token.INVALID.  Alternatively we could
        // invoke create/invalidate directly but that prevents us from using
        // bulk operations.  The ugly type coercion below is necessary to allow
        // strong typing elsewhere.
        V val = (V) Token.INVALID;
        if (record.hasValue()) {
          byte[] data = record.getValue();
          // If the underlying object is a byte[], we can't wrap it in a
          // CachedDeserializable.  Somewhere along the line the header bytes
          // get lost and we start seeing serialization problems.
          if (data.length > 0 && data[0] == DSCODE.BYTE_ARRAY) {
            // It would be faster to use System.arraycopy() directly but since
            // length field is variable it's probably safest and simplest to
            // keep the logic in the InternalDataSerializer.
            val = record.getValueObject();
          } else {
            val = (V) CachedDeserializableFactory.create(record.getValue());
          }
        }

        if (includeEntry(options, key, val)) {
          buffer.put(key, val);
          bufferSize += record.getSize();
          count++;

          // Push entries into cache using putAll on a separate thread so we
          // can keep the disk busy. Throttle puts so we don't overwhelm the cache.
          if (bufferSize > BUFFER_SIZE) {
            if (puts.size() == IMPORT_CONCURRENCY) {
              puts.removeFirst().get();
            }

            final Map<K, V> copy = new HashMap<K, V>(buffer);
            Future<?> f =
                GemFireCacheImpl.getExisting("Importing region from snapshot")
                    .getDistributionManager()
                    .getWaitingThreadPool()
                    .submit(
                        new Runnable() {
                          @Override
                          public void run() {
                            local.basicImportPutAll(copy, true);
                          }
                        });

            puts.addLast(f);
            buffer.clear();
            bufferSize = 0;
          }
        }
      }

      // send off any remaining entries
      if (!buffer.isEmpty()) {
        local.basicImportPutAll(buffer, true);
      }

      // wait for completion and check for errors
      while (!puts.isEmpty()) {
        puts.removeFirst().get();
      }

      if (getLoggerI18n().infoEnabled()) {
        getLoggerI18n()
            .info(
                LocalizedStrings.Snapshot_IMPORT_END_0_1_2_3,
                new Object[] {count, bytes, region.getName(), snapshot});
      }

    } catch (InterruptedException e) {
      while (!puts.isEmpty()) {
        puts.removeFirst().cancel(true);
      }
      Thread.currentThread().interrupt();
      throw (IOException) new InterruptedIOException().initCause(e);

    } catch (ExecutionException e) {
      while (!puts.isEmpty()) {
        puts.removeFirst().cancel(true);
      }
      throw new IOException(e);

    } finally {
      in.close();
      local.getCachePerfStats().endImport(count, start);
    }
  }
 private static void stopGemfire() {
   GemFireCacheImpl.getExisting().close();
 }
  public void testSimpleOutOfOffHeapMemoryMemberDisconnects() {
    final DistributedSystem system = getSystem();
    final Cache cache = getCache();
    final DistributionManager dm =
        (DistributionManager) ((InternalDistributedSystem) system).getDistributionManager();

    Region<Object, Object> region =
        cache.createRegionFactory(getRegionShortcut()).setOffHeap(true).create(getRegionName());
    OutOfOffHeapMemoryException ooohme;
    try {
      Object value = new byte[1024];
      for (int i = 0; true; i++) {
        region.put("key-" + i, value);
      }
    } catch (OutOfOffHeapMemoryException e) {
      ooohme = e;
    }
    assertNotNull(ooohme);

    with()
        .pollInterval(100, TimeUnit.MILLISECONDS)
        .await()
        .atMost(10, TimeUnit.SECONDS)
        .until(() -> cache.isClosed() && !system.isConnected() && dm.isClosed());

    // wait for cache instance to be nulled out
    with()
        .pollInterval(100, TimeUnit.MILLISECONDS)
        .await()
        .atMost(10, TimeUnit.SECONDS)
        .until(
            () ->
                GemFireCacheImpl.getInstance() == null
                    && InternalDistributedSystem.getAnyInstance() == null);

    assertNull(GemFireCacheImpl.getInstance());

    // verify system was closed out due to OutOfOffHeapMemoryException
    assertFalse(system.isConnected());
    InternalDistributedSystem ids = (InternalDistributedSystem) system;
    try {
      ids.getDistributionManager();
      fail(
          "InternalDistributedSystem.getDistributionManager() should throw DistributedSystemDisconnectedException");
    } catch (DistributedSystemDisconnectedException expected) {
      assertRootCause(expected, OutOfOffHeapMemoryException.class);
    }

    // verify dm was closed out due to OutOfOffHeapMemoryException
    assertTrue(dm.isClosed());
    try {
      dm.throwIfDistributionStopped();
      fail(
          "DistributionManager.throwIfDistributionStopped() should throw DistributedSystemDisconnectedException");
    } catch (DistributedSystemDisconnectedException expected) {
      assertRootCause(expected, OutOfOffHeapMemoryException.class);
    }

    // verify cache was closed out due to OutOfOffHeapMemoryException
    assertTrue(cache.isClosed());
    try {
      cache.getCancelCriterion().checkCancelInProgress(null);
      fail(
          "GemFireCacheImpl.getCancelCriterion().checkCancelInProgress should throw DistributedSystemDisconnectedException");
    } catch (DistributedSystemDisconnectedException expected) {
      assertRootCause(expected, OutOfOffHeapMemoryException.class);
    }
  }