boolean isUsedInPartitioning() {
    boolean ok = false;
    // TODO: Asif: Handle this case where region is turning out to be null
    // Ideally Bug 39923 workaround should ensure that region is not null. But we are doing
    // this check only for Update type statements. For Select queries, it may stll be null,
    // hence the check

    if (this.tqi == null) {
      return ok;
    }
    Region rgnOwningColumn = this.tqi.getRegion();
    assert rgnOwningColumn != null;
    RegionAttributes ra = rgnOwningColumn.getAttributes();
    // If the region is a Replicated Region or if it is a PR with just
    // itself
    // as a member then we should go with Derby's Activation Object
    DataPolicy policy = ra.getDataPolicy();

    if (policy.withPartitioning()) {
      PartitionedRegion pr = (PartitionedRegion) rgnOwningColumn;
      GfxdPartitionResolver rslvr = (GfxdPartitionResolver) pr.getPartitionResolver();
      ok = rslvr != null && rslvr.isUsedInPartitioning(this.actualColumnName);
    }
    return ok;
  }
    @Override
    public void execute(FunctionContext context) {
      try {
        Region<K, V> local =
            PartitionRegionHelper.getLocalDataForContext((RegionFunctionContext) context);
        ParallelArgs<K, V> args = (ParallelArgs<K, V>) context.getArguments();

        File[] files =
            args.getOptions()
                .getMapper()
                .mapImportPath(
                    local.getCache().getDistributedSystem().getDistributedMember(), args.getFile());

        if (files != null) {
          for (File f : files) {
            if (f.isDirectory() || !f.exists()) {
              throw new IOException(
                  LocalizedStrings.Snapshot_INVALID_IMPORT_FILE.toLocalizedString(f));
            }
            local.getSnapshotService().load(f, args.getFormat(), args.getOptions());
          }
        }
        context.getResultSender().lastResult(Boolean.TRUE);

      } catch (Exception e) {
        context.getResultSender().sendException(e);
      }
    }
Example #3
0
 @Override
 public ByteBuffer processBinaryStorageCommand(
     Object key, byte[] value, long cas, int flags, Cache cache, RequestReader request) {
   ByteBuffer response = request.getResponse();
   Region<Object, ValueWrapper> r = getMemcachedRegion(cache);
   ValueWrapper val = ValueWrapper.getWrappedValue(value, flags);
   try {
     Object oldVal = r.putIfAbsent(key, val);
     // set status
     if (oldVal == null) {
       if (getLogger().fineEnabled()) {
         getLogger().fine("added key: " + key);
       }
       if (isQuiet()) {
         return null;
       }
       response.putShort(POSITION_RESPONSE_STATUS, ResponseStatus.NO_ERROR.asShort());
       // set cas
       response.putLong(POSITION_CAS, val.getVersion());
     } else {
       if (getLogger().fineEnabled()) {
         getLogger().fine("key: " + key + " not added as is already exists");
       }
       response.putShort(POSITION_RESPONSE_STATUS, ResponseStatus.KEY_EXISTS.asShort());
     }
   } catch (Exception e) {
     response = handleBinaryException(key, request, response, "add", e);
   }
   return response;
 }
  private ByteBuffer processAsciiCommand(ByteBuffer buffer, Cache cache) {
    CharBuffer flb = getFirstLineBuffer();
    getAsciiDecoder().reset();
    getAsciiDecoder().decode(buffer, flb, false);
    flb.flip();
    String firstLine = getFirstLine();
    String[] firstLineElements = firstLine.split(" ");

    assert "decr".equals(firstLineElements[0]);
    String key = firstLineElements[1];
    String decrByStr = stripNewline(firstLineElements[2]);
    Long decrBy = Long.parseLong(decrByStr);
    boolean noReply = firstLineElements.length > 3;

    Region<Object, ValueWrapper> r = getMemcachedRegion(cache);
    String reply = Reply.NOT_FOUND.toString();
    ByteBuffer newVal = ByteBuffer.allocate(8);
    while (true) {
      ValueWrapper oldValWrapper = r.get(key);
      if (oldValWrapper == null) {
        break;
      }
      newVal.clear();
      byte[] oldVal = oldValWrapper.getValue();
      long oldLong = getLongFromByteArray(oldVal);
      long newLong = oldLong - decrBy;
      newVal.putLong(newLong);
      ValueWrapper newValWrapper = ValueWrapper.getWrappedValue(newVal.array(), 0 /*flags*/);
      if (r.replace(key, oldValWrapper, newValWrapper)) {
        reply = newLong + "\r\n";
        break;
      }
    }
    return noReply ? null : asciiCharset.encode(reply);
  }
 /** function to create client cache * */
 public static void createClientCache1(String host, Integer port1) throws Exception {
   PORT1 = port1.intValue();
   Properties props = new Properties();
   props.setProperty("mcast-port", "0");
   props.setProperty("locators", "");
   new PutAllDUnitTest("temp").createCache(props);
   props.setProperty("retryAttempts", "2");
   props.setProperty("endpoints", "ep1=" + host + ":" + PORT1);
   props.setProperty("redundancyLevel", "-1");
   props.setProperty("establishCallbackConnection", "true");
   props.setProperty("LBPolicy", "Sticky");
   props.setProperty("readTimeout", "2000");
   props.setProperty("socketBufferSize", "1000");
   props.setProperty("retryInterval", "250");
   props.setProperty("connectionsPerServer", "2");
   AttributesFactory factory = new AttributesFactory();
   factory.setScope(Scope.DISTRIBUTED_ACK);
   PoolImpl p =
       (PoolImpl)
           ClientServerTestCase.configureConnectionPool(
               factory, host, PORT1, -1, true, -1, 2, null);
   CacheListener clientListener = new HAEventIdPropagationListenerForClient1();
   factory.setCacheListener(clientListener);
   RegionAttributes attrs = factory.create();
   cache.createRegion(REGION_NAME, attrs);
   Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
   assertNotNull(region);
   region.registerInterest("ALL_KEYS", InterestResultPolicy.NONE);
   pool = p;
 }
  @Test
  public void get() {
    String key = getKey(getUser("imaPigg"));

    assertThat(usersTemplate.get(key)).isEqualTo(users.get(key));
    assertNullEquals(users.get("mrT"), usersTemplate.get("mrT"));
  }
 /**
  * Update an existing key in region REGION_NAME. The keys to update are specified in keyIntervals.
  *
  * @return true if all keys to be updated have been completed.
  */
 protected boolean updateExistingKey() {
   long nextKey =
       CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.LASTKEY_UPDATE_EXISTING_KEY);
   if (!keyIntervals.keyInRange(KeyIntervals.UPDATE_EXISTING_KEY, nextKey)) {
     Log.getLogWriter().info("All existing keys updated; returning from updateExistingKey");
     return true;
   }
   Object key = NameFactory.getObjectNameForCounter(nextKey);
   QueryObject existingValue = (QueryObject) aRegion.get(key);
   if (existingValue == null)
     throw new TestException("Get of key " + key + " returned unexpected " + existingValue);
   QueryObject newValue = existingValue.modifyWithNewInstance(QueryObject.NEGATE, 0, true);
   newValue.extra = key; // encode the key in the object for later validation
   if (existingValue.aPrimitiveLong < 0)
     throw new TestException(
         "Trying to update a key which was already updated: " + existingValue.toStringFull());
   Log.getLogWriter()
       .info("Updating existing key " + key + " with value " + TestHelper.toString(newValue));
   aRegion.put(key, newValue);
   Log.getLogWriter()
       .info(
           "Done updating existing key "
               + key
               + " with value "
               + TestHelper.toString(newValue)
               + ", num remaining: "
               + (keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY) - nextKey));
   return (nextKey >= keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY));
 }
 /**
  * does an update and return the eventid generated. Eventid is caught in the listener and stored
  * in a static variable*
  */
 public static Object[] putAll() {
   Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
   assertNotNull(region);
   try {
     Map map = new LinkedHashMap();
     map.put(PUTALL_KEY1, PUTALL_VALUE1);
     map.put(PUTALL_KEY2, PUTALL_VALUE2);
     map.put(PUTALL_KEY3, PUTALL_VALUE3);
     map.put(PUTALL_KEY4, PUTALL_VALUE4);
     map.put(PUTALL_KEY5, PUTALL_VALUE5);
     region.putAll(map, "putAllCallbackArg");
     EventID[] evids = new EventID[5];
     evids[0] = putAlleventId1;
     evids[1] = putAlleventId2;
     evids[2] = putAlleventId3;
     evids[3] = putAlleventId4;
     evids[4] = putAlleventId5;
     assertNotNull(evids[0]);
     assertNotNull(evids[1]);
     assertNotNull(evids[2]);
     assertNotNull(evids[3]);
     assertNotNull(evids[4]);
     return evids;
   } catch (Exception e) {
     fail("put failed due to " + e);
   }
   return null;
 }
  @Test
  public void testFailedIndexUpdateOnCommitForPut() throws Exception {
    Person.THROW_ON_INDEX = true;
    AttributesFactory af = new AttributesFactory();
    af.setDataPolicy(DataPolicy.REPLICATE);
    SimpleListener sl = new SimpleListener();
    af.setCacheListener(sl);
    Region region = cache.createRegion("sample", af.create());
    qs.createIndex("foo", IndexType.FUNCTIONAL, "index", "/sample");
    Context ctx = cache.getJNDIContext();

    Integer x = new Integer(0);
    region.getCache().getCacheTransactionManager().begin();
    region.create(x, new Person("xyz", 45));
    try {
      region.getCache().getCacheTransactionManager().commit();
      fail("commit should have thrown an exception because the index maintenance threw");
    } catch (com.gemstone.gemfire.cache.query.IndexMaintenanceException ie) {
      // this is the desired case
    }
    Person p = (Person) region.get(x);
    assertEquals("object shouldn't have made it into region", null, p);
    assertEquals(0, sl.creates);
    assertEquals(0, sl.updates);
  }
  private void exportOnMember(File snapshot, SnapshotFormat format, SnapshotOptions<K, V> options)
      throws IOException {
    LocalRegion local = getLocalRegion(region);
    Exporter<K, V> exp = createExporter(region, options);

    if (getLoggerI18n().fineEnabled()) {
      getLoggerI18n().fine("Writing to snapshot " + snapshot.getAbsolutePath());
    }

    long count = 0;
    long start = CachePerfStats.getStatTime();
    SnapshotWriter writer = GFSnapshot.create(snapshot, region.getFullPath());
    try {
      if (getLoggerI18n().infoEnabled())
        getLoggerI18n().info(LocalizedStrings.Snapshot_EXPORT_BEGIN_0, region.getName());

      SnapshotWriterSink sink = new SnapshotWriterSink(writer);
      count = exp.export(region, sink, options);

      if (getLoggerI18n().infoEnabled()) {
        getLoggerI18n()
            .info(
                LocalizedStrings.Snapshot_EXPORT_END_0_1_2_3,
                new Object[] {count, sink.getBytesWritten(), region.getName(), snapshot});
      }

    } finally {
      writer.snapshotComplete();
      local.getCachePerfStats().endExport(count, start);
    }
  }
  /** Check that remote persistent regions cause conflicts */
  public void testPersistentRestriction() throws Exception {
    final CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
    final String misConfigRegionName = getUniqueName();
    Region misConfigRgn = getCache().createRegion(misConfigRegionName, getDiskRegionAttributes());
    Invoke.invokeInEveryVM(
        new SerializableRunnable("testPersistentRestriction: Illegal Region Configuration") {
          public void run() {
            try {
              getCache().createRegion(misConfigRegionName, getDiskRegionAttributes());
              // rgn1.put("misConfigKey", "oldmisConfigVal");
            } catch (CacheException e) {
              Assert.fail("While creating region", e);
            }
          }
        });
    misConfigRgn.put("misConfigKey", "oldmisConfigVal");

    txMgr.begin();

    try {
      misConfigRgn.put("misConfigKey", "newmisConfigVal");
      fail("Expected an IllegalStateException with information about misconfigured regions");
    } catch (UnsupportedOperationException expected) {
      getSystem().getLogWriter().info("Expected exception: " + expected);
      txMgr.rollback();
    }
    misConfigRgn.destroyRegion();
  }
  @Test
  public void testAllIndexesOnCommitForPut() throws Exception {
    // create region
    AttributesFactory af = new AttributesFactory();
    af.setDataPolicy(DataPolicy.REPLICATE);
    Region region = cache.createRegion("sample", af.create());

    // put data
    for (int i = 0; i < 10; i++) {
      region.put(i, new Portfolio(i));
    }

    String[] queries = {
      "select * from /sample where ID = 5",
      "select ID from /sample where ID < 5",
      "select ID from /sample where ID > 5",
      "select ID from /sample where ID != 5",
      "select status from /sample where status = 'active'",
      "select status from /sample where status > 'active'",
      "select status from /sample where status < 'active'",
      "select status from /sample where status != 'active'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId = 'IBM'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId < 'VMW'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId > 'IBM'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId != 'IBM'"
    };

    SelectResults[][] sr = new SelectResults[queries.length][2];

    // execute queries without indexes
    for (int i = 0; i < queries.length; i++) {
      sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
    }

    // create indexes
    qs.createKeyIndex("IDIndex", "ID", "/sample");
    qs.createIndex("statusIndex", "status", "/sample");
    qs.createIndex("secIdIndex", "pos.secId", "/sample p, p.positions.values pos");

    // begin transaction
    Context ctx = cache.getJNDIContext();
    UserTransaction utx = (UserTransaction) ctx.lookup("java:/UserTransaction");
    utx.begin();

    // update data
    for (int i = 0; i < 10; i++) {
      region.put(i, new Portfolio(i));
    }

    // execute queries with indexes during transaction
    for (int i = 0; i < queries.length; i++) {
      sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
    }

    // complete transaction
    utx.commit();

    // verify results
    com.gemstone.gemfire.cache.query.CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
  }
 /**
  * Get a random key from the given region that is within this instance's range. If no keys qualify
  * in the region, return null.
  *
  * @param aRegion - The region to get the key from.
  * @param excludeKey - The region to get the key from.
  * @returns A key from aRegion, or null.
  */
 public Object getRandomKey(Region aRegion, Object excludeKey) {
   long start = System.currentTimeMillis();
   int lower = ((Integer) (lowerKeyRange.get())).intValue();
   int upper = ((Integer) (upperKeyRange.get())).intValue();
   long randomKeyIndex = TestConfig.tab().getRandGen().nextLong(lower, upper);
   long startKeyIndex = randomKeyIndex;
   Object key = NameFactory.getObjectNameForCounter(randomKeyIndex);
   do {
     boolean done = false;
     if ((!(key.equals(excludeKey))) && (aRegion.containsKey(key))) done = true;
     if (done) break;
     randomKeyIndex++; // go to the next key
     if (randomKeyIndex > upper) randomKeyIndex = lower;
     if (randomKeyIndex == startKeyIndex) { // considered all keys
       key = null;
       break;
     }
     key = NameFactory.getObjectNameForCounter(randomKeyIndex);
   } while (true);
   long end = System.currentTimeMillis();
   Log.getLogWriter()
       .info(
           "Done in TxUtilKeyRange:getRandomKey, key is "
               + key
               + " "
               + aRegion.getFullPath()
               + " getRandomKey took "
               + (end - start)
               + " millis");
   return key;
 }
  private void checkQueueIteration(List<KeyValue> expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    Region r1 = rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
        getSortedEventQueue(brq).rollover(true);
      }
    }

    Iterator<HDFSGatewayEventImpl> iter = brq.iterator(r1);
    List<KeyValue> actual = new ArrayList<KeyValue>();
    while (iter.hasNext()) {
      HDFSGatewayEventImpl evt = iter.next();
      actual.add(new KeyValue((String) evt.getKey(), (String) evt.getDeserializedValue()));
    }

    assertEquals(expected, actual);
  }
 public void xtestNestQueryInWhereClause() throws Exception {
   Region region = CacheUtils.createRegion("Portfolios", Portfolio.class);
   region.put("0", new Portfolio(0));
   region.put("1", new Portfolio(1));
   region.put("2", new Portfolio(2));
   region.put("3", new Portfolio(3));
   Query query =
       CacheUtils.getQueryService()
           .newQuery(
               "SELECT DISTINCT * FROM /Portfolios WHERE NOT (SELECT DISTINCT * FROM positions.values p WHERE p.secId = 'IBM').isEmpty");
   Collection result = (Collection) query.execute();
   Portfolio p = (Portfolio) (result.iterator().next());
   if (!p.positions.containsKey("IBM")) fail(query.getQueryString());
   // query = CacheUtils.getQueryService().newQuery("SELECT DISTINCT * FROM
   // /Portfolios where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p
   // where p.ID = 0).status");
   // result = (Collection)query.execute();
   // CacheUtils.log(result);
   // query = CacheUtils.getQueryService().newQuery("SELECT DISTINCT * FROM
   // /Portfolios x where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios
   // p where p.ID = x.ID).status");
   // result = (Collection)query.execute();
   // SELECT DISTINCT * FROM /Portfolios where status = ELEMENT(SELECT
   // DISTINCT * FROM /Portfolios where ID = 0).status
   // SELECT DISTINCT * FROM /Portfolios x where status = ELEMENT(SELECT
   // DISTINCT * FROM /Portfolios p where p.ID = x.ID).status
 }
  @Test
  public void testBug40428_2() throws Exception {
    Object shortData1 =
        new Object() {
          public short shortField = 4;
        };
    Object shortData2 =
        new Object() {
          public short shortField = 5;
        };

    Region region = CacheUtils.createRegion("shortFieldTest", Object.class);
    region.put("0", shortData1);
    QueryService qs = CacheUtils.getQueryService();
    String qry = "select * from /shortFieldTest.entries sf where sf.value.shortField < 10 ";
    qs.createIndex(
        "shortIndex", IndexType.FUNCTIONAL, "value.shortField", "/shortFieldTest.entries");
    region.put("1", shortData2);
    Query query = null;
    Object result = null;

    query = qs.newQuery(qry);

    SelectResults rs = (SelectResults) query.execute();
    assertEquals(rs.size(), 2);
  }
 @Test
 public void testBug37723() {
   Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
   region.put("0", new Portfolio(0));
   region.put("1", new Portfolio(1));
   region.put("2", new Portfolio(2));
   region.put("3", new Portfolio(3));
   QueryService qs = CacheUtils.getQueryService();
   String qry =
       "select distinct getID, status from /portfolios pf where getID < 10 order by getID desc";
   Query q = qs.newQuery(qry);
   try {
     SelectResults result = (SelectResults) q.execute();
     Iterator itr = result.iterator();
     int j = 3;
     while (itr.hasNext()) {
       Struct struct = (Struct) itr.next();
       assertEquals(j--, ((Integer) struct.get("getID")).intValue());
     }
     qry = "select distinct getID, status from /portfolios pf where getID < 10 order by getID asc";
     q = qs.newQuery(qry);
     result = (SelectResults) q.execute();
     itr = result.iterator();
     j = 0;
     while (itr.hasNext()) {
       Struct struct = (Struct) itr.next();
       assertEquals(j++, ((Integer) struct.get("getID")).intValue());
     }
   } catch (Exception e) {
     e.printStackTrace();
     fail("Test failed because of exception=" + e);
   }
 }
  protected void assertRegionContents(Region<?, ?> region, Object... values) {
    assertThat(region.size(), is(equalTo(values.length)));

    for (Object value : values) {
      assertThat(region.containsValue(value), is(true));
    }
  }
  /**
   * Do operations and pause when directed by the snapshotController Also, writes the snapshot to
   * the blackboard and exports snapshot to disk
   */
  protected static void snapshotResponder(RecoveryTest testInstance) throws Exception {
    long pausing = RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.pausing);
    if (pausing > 0) { // controller has signaled to pause
      if ((Boolean)
          (testInstance.threadIsPaused
              .get())) { // this thread has already paused, so don't increment counter
        Log.getLogWriter().info("Thread has paused");
      } else {
        Log.getLogWriter().info("This thread is pausing");
        testInstance.threadIsPaused.set(new Boolean(true));
        RecoveryBB.getBB().getSharedCounters().incrementAndRead(RecoveryBB.pausing);
      }
      long writeSnapshot = RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.writeSnapshot);
      if (writeSnapshot > 0) {
        long leader = RecoveryBB.getBB().getSharedCounters().incrementAndRead(RecoveryBB.leader);
        if (leader == 1) { // this is the thread to write the snapshot
          Log.getLogWriter().info("This thread is the leader; it will write the snapshot");
          testInstance.writeSnapshot(true); // include non-persistent regions
          long executionNumber =
              RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.executionNumber);

          // Add numFilterObjects entries to each region (SnapshotFilter tests)
          if (SnapshotPrms.useFilterOnExport() || SnapshotPrms.useFilterOnImport()) {
            Map allRegionsSnapshot = new HashMap();
            int numToCreate = SnapshotPrms.numFilterObjects();
            for (Region aRegion : testInstance.allRegions) {
              for (int i = 1; i <= numToCreate; i++) {
                String key = "FilterObject_" + i;
                String value =
                    "object to be filtered via snapshot.save() or snapshot.load(): this should never be a value in the cache once snapshot restored";
                aRegion.put(key, value);
              }
            }
            Log.getLogWriter()
                .info("Wrote " + numToCreate + " FilterObject entries to each region");
          }

          CacheSnapshotService snapshot = CacheHelper.getCache().getSnapshotService();
          SnapshotOptions options = snapshot.createOptions();
          if (SnapshotPrms.useFilterOnExport()) {
            options.setFilter(getSnapshotFilter());
          }

          String currDirName = System.getProperty("user.dir");
          String snapshotDirName =
              currDirName + File.separator + "cacheSnapshotDir_" + executionNumber;
          Log.getLogWriter().info("Starting cacheSnapshot to " + snapshotDirName);
          snapshot.save(new File(snapshotDirName), SnapshotFormat.GEMFIRE, options);
          Log.getLogWriter().info("Completed cacheSnapshot to " + snapshotDirName);
          RecoveryBB.getBB().getSharedCounters().increment(RecoveryBB.snapshotWritten);
        }
      }
      MasterController.sleepForMs(5000);
    } else { // not pausing
      long minTaskGranularitySec = TestConfig.tab().longAt(TestHelperPrms.minTaskGranularitySec);
      long minTaskGranularityMS = minTaskGranularitySec * TestHelper.SEC_MILLI_FACTOR;
      testInstance.doOperations(minTaskGranularityMS);
    }
  }
 public Object call() throws Exception {
   createRegion(rName, false, 1, new InvalidatePRWriter());
   Region r = getCache().getRegion(rName);
   InvalidatePRListener l = new InvalidatePRListener();
   l.originRemote = originRemote;
   r.getAttributesMutator().addCacheListener(l);
   return null;
 }
  @Test
  public void testGetRegionFactoryWithIsGlobalScope() throws Exception {
    serverOptions = JSONFormatter.fromJSON("{ \"scope\": \"GLOBAL\" }");
    new ScopeOption(serverOptions).setOptionOnRegionFactory(regionFactory);

    Region region = regionFactory.create(getCurrentTestName());
    assertThat(region.getAttributes().getScope(), equalTo(Scope.GLOBAL));
  }
 /** destroy key-1 */
 public static void destroy() {
   try {
     Region region1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     region1.localDestroy("key-1");
   } catch (Exception e) {
     e.printStackTrace();
     fail("test failed due to exception in destroy ");
   }
 }
Example #23
0
  /**
   * Prints the region entries. It prints both keys and values formatted.
   *
   * @param region
   * @param startIndex
   * @param startRowNum
   * @param rowCount
   * @param keyList
   * @return Returns the number of rows printed.
   * @throws Exception
   */
  public static int printEntries(
      Region region,
      Iterator regionIterator,
      int startIndex,
      int startRowNum,
      int rowCount,
      List keyList)
      throws Exception {

    if (region == null || regionIterator == null) {
      PadoShell.printlnError("Path is null");
      return 0;
    }

    int endIndex = startIndex + rowCount; // exclusive
    if (endIndex >= region.size()) {
      endIndex = region.size();
    }

    if (startIndex == endIndex) {
      return 0;
    }

    HashSet keyNameSet = new HashSet();
    HashSet valueNameSet = new HashSet();
    Object key = null;
    Object value = null;
    int index = startIndex;

    // Print keys and values
    int row = startRowNum;
    index = startIndex;
    for (Iterator itr = regionIterator; index < endIndex && itr.hasNext(); index++) {
      Region.Entry entry = (Region.Entry) itr.next();
      key = entry.getKey();
      value = entry.getValue();
      keyNameSet.add(key.getClass().getName());
      if (value != null) {
        valueNameSet.add(value.getClass().getName());
      }
      printObject(row, "Key", key, true);
      printObject(row, "Value", value, false);
      PadoShell.println("");
      row++;
    }

    PadoShell.println("");
    PadoShell.println(" Fetch size: " + rowCount);
    PadoShell.println("   Returned: " + (row - 1) + "/" + region.size());
    for (Object keyName : keyNameSet) {
      PadoShell.println("  Key Class: " + keyName);
    }
    for (Object valueName : valueNameSet) {
      PadoShell.println("Value Class: " + valueName);
    }
    return endIndex - startIndex;
  }
 /** do three puts on key-1 */
 public static void putValue2() {
   try {
     Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     r1.put("key-1", "value-2");
   } catch (Exception ex) {
     ex.printStackTrace();
     fail("failed while region.put()", ex);
   }
 }
 public static void assertGotAllValues() {
   Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
   assertNotNull(region);
   assertTrue(region.get(PUTALL_KEY1).equals(PUTALL_VALUE1));
   assertTrue(region.get(PUTALL_KEY2).equals(PUTALL_VALUE2));
   assertTrue(region.get(PUTALL_KEY3).equals(PUTALL_VALUE3));
   assertTrue(region.get(PUTALL_KEY4).equals(PUTALL_VALUE4));
   assertTrue(region.get(PUTALL_KEY5).equals(PUTALL_VALUE5));
 }
 @Before
 public void setUp() throws java.lang.Exception {
   CacheUtils.startCache();
   Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
   for (int i = 0; i < 4; i++) {
     region.put("" + i, new Portfolio(i));
     // CacheUtils.log(new Portfolio(i));
   }
 }
  public String[] listSubRegionPaths(boolean recursive) {
    SortedSet<String> subregionPaths = new TreeSet<String>();
    Set<Region<?, ?>> subregions = region.subregions(recursive);

    for (Region<?, ?> region : subregions) {
      subregionPaths.add(region.getFullPath());
    }
    return subregionPaths.toArray(new String[subregionPaths.size()]);
  }
  @SuppressWarnings("unchecked")
  public static void main(String[] args) throws IOException, InterruptedException {

    /*
     *  Check if port is open. Currently the client pool is hard coded to look for a server on 40404, the default. If already taken,
     *  this process will wait for a while so this forces an immediate exit if the port is in use. There are better ways to handle this
     *  situation, but hey, this is sample code.
     */
    try {
      new ServerPortGenerator().bind(new ServerSocket(), 40404, 1);
    } catch (IOException e) {
      System.out.println(
          "Sorry port 40404 is in use. Do you have another cache server process already running?");
      System.exit(1);
    }

    ApplicationContext context = new ClassPathXmlApplicationContext("server/cache-config.xml");

    Region<Long, Order> region = context.getBean("Order", Region.class);

    /*
     * Create some customer orders
     */

    Product ipod =
        new Product(1L, "Apple iPod", new BigDecimal(99.99), "An Apple portable music player");
    Product ipad = new Product(2L, "Apple iPad", new BigDecimal(499.99), "An Apple tablet device");
    Product macbook =
        new Product(3L, "Apple macBook", new BigDecimal(899.99), "An Apple notebook computer");
    macbook.setAttribute("warantee", "included");

    Order davesOrder = new Order(1L, 1L, new Address("Dave Street", "Matthews", "USA"));

    davesOrder.add(new LineItem(ipad, 2));
    davesOrder.add(new LineItem(macbook));

    Order aliciasFirstOrder = new Order(2L, 2L, new Address("Alicia Street", "Keys", "USA"));

    aliciasFirstOrder.add(new LineItem(ipod, 3));

    Order aliciasNextOrder = new Order(3L, 2L, new Address("Alicia Street", "Keys", "USA"));

    aliciasNextOrder.add(new LineItem(macbook, 4));
    aliciasNextOrder.add(new LineItem(ipad));

    System.out.println("Press <ENTER> to update cache");
    System.in.read();

    region.put(davesOrder.getId(), davesOrder);
    region.put(aliciasFirstOrder.getId(), aliciasFirstOrder);
    region.put(aliciasNextOrder.getId(), aliciasNextOrder);

    System.out.println("Press <ENTER> to terminate the cache server");
    System.in.read();
    System.exit(0);
  }
 public static void verifyNoDestroyEntryInSender() {
   try {
     Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r);
     assertNotNull(r.getEntry("key1"));
     assertNotNull(r.getEntry("key2"));
   } catch (Exception ex) {
     fail("failed while verifyDestroyEntry in C1", ex);
   }
 }
 /** destroy entry */
 public static void destroyEntriesK1andK2() {
   try {
     Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r);
     r.destroy("key1");
     r.destroy("key2");
   } catch (Exception ex) {
     fail("failed while destroyEntry()", ex);
   }
 }