@Test
  public void testBug40428_2() throws Exception {
    Object shortData1 =
        new Object() {
          public short shortField = 4;
        };
    Object shortData2 =
        new Object() {
          public short shortField = 5;
        };

    Region region = CacheUtils.createRegion("shortFieldTest", Object.class);
    region.put("0", shortData1);
    QueryService qs = CacheUtils.getQueryService();
    String qry = "select * from /shortFieldTest.entries sf where sf.value.shortField < 10 ";
    qs.createIndex(
        "shortIndex", IndexType.FUNCTIONAL, "value.shortField", "/shortFieldTest.entries");
    region.put("1", shortData2);
    Query query = null;
    Object result = null;

    query = qs.newQuery(qry);

    SelectResults rs = (SelectResults) query.execute();
    assertEquals(rs.size(), 2);
  }
 public void xtestNestQueryInWhereClause() throws Exception {
   Region region = CacheUtils.createRegion("Portfolios", Portfolio.class);
   region.put("0", new Portfolio(0));
   region.put("1", new Portfolio(1));
   region.put("2", new Portfolio(2));
   region.put("3", new Portfolio(3));
   Query query =
       CacheUtils.getQueryService()
           .newQuery(
               "SELECT DISTINCT * FROM /Portfolios WHERE NOT (SELECT DISTINCT * FROM positions.values p WHERE p.secId = 'IBM').isEmpty");
   Collection result = (Collection) query.execute();
   Portfolio p = (Portfolio) (result.iterator().next());
   if (!p.positions.containsKey("IBM")) fail(query.getQueryString());
   // query = CacheUtils.getQueryService().newQuery("SELECT DISTINCT * FROM
   // /Portfolios where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios p
   // where p.ID = 0).status");
   // result = (Collection)query.execute();
   // CacheUtils.log(result);
   // query = CacheUtils.getQueryService().newQuery("SELECT DISTINCT * FROM
   // /Portfolios x where status = ELEMENT(SELECT DISTINCT * FROM /Portfolios
   // p where p.ID = x.ID).status");
   // result = (Collection)query.execute();
   // SELECT DISTINCT * FROM /Portfolios where status = ELEMENT(SELECT
   // DISTINCT * FROM /Portfolios where ID = 0).status
   // SELECT DISTINCT * FROM /Portfolios x where status = ELEMENT(SELECT
   // DISTINCT * FROM /Portfolios p where p.ID = x.ID).status
 }
  @Test
  public void testAllIndexesOnCommitForPut() throws Exception {
    // create region
    AttributesFactory af = new AttributesFactory();
    af.setDataPolicy(DataPolicy.REPLICATE);
    Region region = cache.createRegion("sample", af.create());

    // put data
    for (int i = 0; i < 10; i++) {
      region.put(i, new Portfolio(i));
    }

    String[] queries = {
      "select * from /sample where ID = 5",
      "select ID from /sample where ID < 5",
      "select ID from /sample where ID > 5",
      "select ID from /sample where ID != 5",
      "select status from /sample where status = 'active'",
      "select status from /sample where status > 'active'",
      "select status from /sample where status < 'active'",
      "select status from /sample where status != 'active'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId = 'IBM'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId < 'VMW'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId > 'IBM'",
      "select pos.secId from /sample p, p.positions.values pos where pos.secId != 'IBM'"
    };

    SelectResults[][] sr = new SelectResults[queries.length][2];

    // execute queries without indexes
    for (int i = 0; i < queries.length; i++) {
      sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
    }

    // create indexes
    qs.createKeyIndex("IDIndex", "ID", "/sample");
    qs.createIndex("statusIndex", "status", "/sample");
    qs.createIndex("secIdIndex", "pos.secId", "/sample p, p.positions.values pos");

    // begin transaction
    Context ctx = cache.getJNDIContext();
    UserTransaction utx = (UserTransaction) ctx.lookup("java:/UserTransaction");
    utx.begin();

    // update data
    for (int i = 0; i < 10; i++) {
      region.put(i, new Portfolio(i));
    }

    // execute queries with indexes during transaction
    for (int i = 0; i < queries.length; i++) {
      sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
    }

    // complete transaction
    utx.commit();

    // verify results
    com.gemstone.gemfire.cache.query.CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
  }
 @Test
 public void testBug37723() {
   Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
   region.put("0", new Portfolio(0));
   region.put("1", new Portfolio(1));
   region.put("2", new Portfolio(2));
   region.put("3", new Portfolio(3));
   QueryService qs = CacheUtils.getQueryService();
   String qry =
       "select distinct getID, status from /portfolios pf where getID < 10 order by getID desc";
   Query q = qs.newQuery(qry);
   try {
     SelectResults result = (SelectResults) q.execute();
     Iterator itr = result.iterator();
     int j = 3;
     while (itr.hasNext()) {
       Struct struct = (Struct) itr.next();
       assertEquals(j--, ((Integer) struct.get("getID")).intValue());
     }
     qry = "select distinct getID, status from /portfolios pf where getID < 10 order by getID asc";
     q = qs.newQuery(qry);
     result = (SelectResults) q.execute();
     itr = result.iterator();
     j = 0;
     while (itr.hasNext()) {
       Struct struct = (Struct) itr.next();
       assertEquals(j++, ((Integer) struct.get("getID")).intValue());
     }
   } catch (Exception e) {
     e.printStackTrace();
     fail("Test failed because of exception=" + e);
   }
 }
  /** Check that remote persistent regions cause conflicts */
  public void testPersistentRestriction() throws Exception {
    final CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
    final String misConfigRegionName = getUniqueName();
    Region misConfigRgn = getCache().createRegion(misConfigRegionName, getDiskRegionAttributes());
    Invoke.invokeInEveryVM(
        new SerializableRunnable("testPersistentRestriction: Illegal Region Configuration") {
          public void run() {
            try {
              getCache().createRegion(misConfigRegionName, getDiskRegionAttributes());
              // rgn1.put("misConfigKey", "oldmisConfigVal");
            } catch (CacheException e) {
              Assert.fail("While creating region", e);
            }
          }
        });
    misConfigRgn.put("misConfigKey", "oldmisConfigVal");

    txMgr.begin();

    try {
      misConfigRgn.put("misConfigKey", "newmisConfigVal");
      fail("Expected an IllegalStateException with information about misconfigured regions");
    } catch (UnsupportedOperationException expected) {
      getSystem().getLogWriter().info("Expected exception: " + expected);
      txMgr.rollback();
    }
    misConfigRgn.destroyRegion();
  }
  @SuppressWarnings("unchecked")
  public static void main(String[] args) throws IOException, InterruptedException {

    /*
     *  Check if port is open. Currently the client pool is hard coded to look for a server on 40404, the default. If already taken,
     *  this process will wait for a while so this forces an immediate exit if the port is in use. There are better ways to handle this
     *  situation, but hey, this is sample code.
     */
    try {
      new ServerPortGenerator().bind(new ServerSocket(), 40404, 1);
    } catch (IOException e) {
      System.out.println(
          "Sorry port 40404 is in use. Do you have another cache server process already running?");
      System.exit(1);
    }

    ApplicationContext context = new ClassPathXmlApplicationContext("server/cache-config.xml");

    Region<Long, Order> region = context.getBean("Order", Region.class);

    /*
     * Create some customer orders
     */

    Product ipod =
        new Product(1L, "Apple iPod", new BigDecimal(99.99), "An Apple portable music player");
    Product ipad = new Product(2L, "Apple iPad", new BigDecimal(499.99), "An Apple tablet device");
    Product macbook =
        new Product(3L, "Apple macBook", new BigDecimal(899.99), "An Apple notebook computer");
    macbook.setAttribute("warantee", "included");

    Order davesOrder = new Order(1L, 1L, new Address("Dave Street", "Matthews", "USA"));

    davesOrder.add(new LineItem(ipad, 2));
    davesOrder.add(new LineItem(macbook));

    Order aliciasFirstOrder = new Order(2L, 2L, new Address("Alicia Street", "Keys", "USA"));

    aliciasFirstOrder.add(new LineItem(ipod, 3));

    Order aliciasNextOrder = new Order(3L, 2L, new Address("Alicia Street", "Keys", "USA"));

    aliciasNextOrder.add(new LineItem(macbook, 4));
    aliciasNextOrder.add(new LineItem(ipad));

    System.out.println("Press <ENTER> to update cache");
    System.in.read();

    region.put(davesOrder.getId(), davesOrder);
    region.put(aliciasFirstOrder.getId(), aliciasFirstOrder);
    region.put(aliciasNextOrder.getId(), aliciasNextOrder);

    System.out.println("Press <ENTER> to terminate the cache server");
    System.in.read();
    System.exit(0);
  }
  @Test
  public void testDisabledThresholds() throws Exception {
    final InternalResourceManager irm = this.cache.getResourceManager();
    final OffHeapMemoryMonitor monitor = irm.getOffHeapMonitor();

    final RegionFactory regionFactory = this.cache.createRegionFactory(RegionShortcut.LOCAL);
    regionFactory.setOffHeap(true);
    final EvictionAttributesImpl evictionAttrs = new EvictionAttributesImpl();
    evictionAttrs.setAlgorithm(EvictionAlgorithm.NONE);
    regionFactory.setEvictionAttributes(evictionAttrs);
    final Region region = regionFactory.create("testDefaultThresholdsRegion");
    TestMemoryThresholdListener listener = new TestMemoryThresholdListener();
    irm.addResourceListener(ResourceType.OFFHEAP_MEMORY, listener);

    region.put("1", new Byte[550000]);
    region.put("2", new Byte[200000]);
    assertEquals(0, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(0, irm.getStats().getOffHeapEvictionStopEvents());
    assertEquals(0, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(0, irm.getStats().getOffHeapSafeEvents());
    assertEquals(0, listener.getEvictionThresholdCalls());
    assertEquals(0, listener.getCriticalThresholdCalls());

    // Enable eviction threshold and make sure event is generated
    monitor.setEvictionThreshold(50f);
    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(0, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(1, listener.getEvictionThresholdCalls());
    assertEquals(0, listener.getCriticalThresholdCalls());

    // Enable critical threshold and make sure event is generated
    region.put("3", new Byte[200000]);
    monitor.setCriticalThreshold(70f);
    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(1, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(2, listener.getEvictionThresholdCalls());
    assertEquals(1, listener.getCriticalThresholdCalls());

    // Disable thresholds and verify events
    monitor.setEvictionThreshold(0f);
    monitor.setCriticalThreshold(0f);

    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(1, irm.getStats().getOffHeapEvictionStopEvents());
    assertEquals(1, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(1, irm.getStats().getOffHeapSafeEvents());

    assertEquals(2, listener.getEvictionThresholdCalls());
    assertEquals(2, listener.getCriticalThresholdCalls());
    assertEquals(0, listener.getNormalCalls());
    assertEquals(2, listener.getEvictionDisabledCalls());
    assertEquals(2, listener.getCriticalDisabledCalls());
  }
  @Test
  public void testBug37119() throws Exception {
    Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
    region.put("0", new Portfolio(0));
    region.put("1", new Portfolio(1));
    region.put("2", new Portfolio(2));
    region.put("3", new Portfolio(3));
    region.put(Integer.MIN_VALUE + "", new Portfolio(Integer.MIN_VALUE));
    region.put("-1", new Portfolio(-1));
    QueryService qs = CacheUtils.getQueryService();

    String qStr = "Select distinct * from /portfolios pf where pf.getID() = " + Integer.MIN_VALUE;
    Query q = qs.newQuery(qStr);
    SelectResults result = (SelectResults) q.execute();
    assertEquals(result.size(), 1);
    Portfolio pf = (Portfolio) result.iterator().next();
    assertEquals(pf.getID(), Integer.MIN_VALUE);
    qStr = "Select distinct * from /portfolios pf where pf.getID() = -1";
    q = qs.newQuery(qStr);
    result = (SelectResults) q.execute();
    assertEquals(result.size(), 1);
    pf = (Portfolio) result.iterator().next();
    assertEquals(pf.getID(), -1);

    qStr =
        "Select distinct * from /portfolios pf where pf.getID() = 3 and pf.getLongMinValue() = "
            + Long.MIN_VALUE
            + 'l';
    q = qs.newQuery(qStr);
    result = (SelectResults) q.execute();
    assertEquals(result.size(), 1);
    pf = (Portfolio) result.iterator().next();
    assertEquals(pf.getID(), 3);

    qStr =
        "Select distinct * from /portfolios pf where pf.getID() = 3 and pf.getFloatMinValue() = "
            + Float.MIN_VALUE
            + 'f';
    q = qs.newQuery(qStr);
    result = (SelectResults) q.execute();
    assertEquals(result.size(), 1);
    pf = (Portfolio) result.iterator().next();
    assertEquals(pf.getID(), 3);

    qStr =
        "Select distinct * from /portfolios pf where pf.getID() = 3 and pf.getDoubleMinValue() = "
            + Double.MIN_VALUE;
    q = qs.newQuery(qStr);
    result = (SelectResults) q.execute();
    assertEquals(result.size(), 1);
    pf = (Portfolio) result.iterator().next();
    assertEquals(pf.getID(), 3);
  }
  protected void setUp() throws Exception {
    CacheUtils.startCache();
    cache = CacheUtils.getCache();
    AttributesFactory attributesFactory = new AttributesFactory();
    //    attributesFactory.setValueConstraint(Portfolio.class);
    RegionAttributes regionAttributes = attributesFactory.create();

    region = cache.createRegion("pos", regionAttributes);
    region.put("0", new Portfolio(0));
    region.put("1", new Portfolio(1));
    region.put("2", new Portfolio(2));
    region.put("3", new Portfolio(3));

    qs = cache.getQueryService();
  }
 public void xtestNestQueryInFromClause() throws Exception {
   Region region = CacheUtils.createRegion("Portfolios", Portfolio.class);
   region.put("0", new Portfolio(0));
   region.put("1", new Portfolio(1));
   region.put("2", new Portfolio(2));
   region.put("3", new Portfolio(3));
   Query query =
       CacheUtils.getQueryService()
           .newQuery(
               "SELECT DISTINCT * FROM (SELECT DISTINCT * FROM /Portfolios where status = 'active') p  where p.ID = 0");
   //    DebuggerSupport.waitForJavaDebugger(CacheUtils.getLogger());
   Collection result = (Collection) query.execute();
   Portfolio p = (Portfolio) (result.iterator().next());
   if (!p.status.equals("active") || p.getID() != 0) fail(query.getQueryString());
 }
  protected void publishMessages_nopool() {
    int count = 0;
    Level2Data level2Data;
    long rate;
    long endTime = 0;
    long startTime = System.currentTimeMillis();
    while (count < messagesToPublish) {
      level2Data = createLevel2Data();
      level2Data.setSeqNum(count);

      // Publish it into the region
      String key = getKey(getIntKey());
      level2Data.setSymbol(key);
      region.put(level2Data.getSymbol(), level2Data);

      count++;

      if (count % rateMessageCount == 0) {
        endTime = System.currentTimeMillis();
        rate = 1000 * rateMessageCount / (endTime - startTime);
        System.out.println("Publish Rate: " + rate + " msg/sec");
        startTime = System.currentTimeMillis();
      }
    }
  }
  private void checkQueueIteration(List<KeyValue> expected, String... entries) throws Exception {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setTotalNumBuckets(1);

    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
    Region r1 = rf1.setPartitionAttributes(paf.create()).create("r1");

    // create the buckets
    r1.put("blah", "blah");

    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
    HDFSBucketRegionQueue brq =
        (HDFSBucketRegionQueue)
            ((PartitionedRegion) hopqueue.getRegion()).getDataStore().getLocalBucketById(0);

    int seq = 0;
    for (String s : entries) {
      if (s.equals("roll")) {
        brq.rolloverSkipList();
      } else {
        String[] kv = s.split("-");
        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
        getSortedEventQueue(brq).rollover(true);
      }
    }

    Iterator<HDFSGatewayEventImpl> iter = brq.iterator(r1);
    List<KeyValue> actual = new ArrayList<KeyValue>();
    while (iter.hasNext()) {
      HDFSGatewayEventImpl evt = iter.next();
      actual.add(new KeyValue((String) evt.getKey(), (String) evt.getDeserializedValue()));
    }

    assertEquals(expected, actual);
  }
 /**
  * Update an existing key in region REGION_NAME. The keys to update are specified in keyIntervals.
  *
  * @return true if all keys to be updated have been completed.
  */
 protected boolean updateExistingKey() {
   long nextKey =
       CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.LASTKEY_UPDATE_EXISTING_KEY);
   if (!keyIntervals.keyInRange(KeyIntervals.UPDATE_EXISTING_KEY, nextKey)) {
     Log.getLogWriter().info("All existing keys updated; returning from updateExistingKey");
     return true;
   }
   Object key = NameFactory.getObjectNameForCounter(nextKey);
   QueryObject existingValue = (QueryObject) aRegion.get(key);
   if (existingValue == null)
     throw new TestException("Get of key " + key + " returned unexpected " + existingValue);
   QueryObject newValue = existingValue.modifyWithNewInstance(QueryObject.NEGATE, 0, true);
   newValue.extra = key; // encode the key in the object for later validation
   if (existingValue.aPrimitiveLong < 0)
     throw new TestException(
         "Trying to update a key which was already updated: " + existingValue.toStringFull());
   Log.getLogWriter()
       .info("Updating existing key " + key + " with value " + TestHelper.toString(newValue));
   aRegion.put(key, newValue);
   Log.getLogWriter()
       .info(
           "Done updating existing key "
               + key
               + " with value "
               + TestHelper.toString(newValue)
               + ", num remaining: "
               + (keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY) - nextKey));
   return (nextKey >= keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY));
 }
  /**
   * Do operations and pause when directed by the snapshotController Also, writes the snapshot to
   * the blackboard and exports snapshot to disk
   */
  protected static void snapshotResponder(RecoveryTest testInstance) throws Exception {
    long pausing = RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.pausing);
    if (pausing > 0) { // controller has signaled to pause
      if ((Boolean)
          (testInstance.threadIsPaused
              .get())) { // this thread has already paused, so don't increment counter
        Log.getLogWriter().info("Thread has paused");
      } else {
        Log.getLogWriter().info("This thread is pausing");
        testInstance.threadIsPaused.set(new Boolean(true));
        RecoveryBB.getBB().getSharedCounters().incrementAndRead(RecoveryBB.pausing);
      }
      long writeSnapshot = RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.writeSnapshot);
      if (writeSnapshot > 0) {
        long leader = RecoveryBB.getBB().getSharedCounters().incrementAndRead(RecoveryBB.leader);
        if (leader == 1) { // this is the thread to write the snapshot
          Log.getLogWriter().info("This thread is the leader; it will write the snapshot");
          testInstance.writeSnapshot(true); // include non-persistent regions
          long executionNumber =
              RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.executionNumber);

          // Add numFilterObjects entries to each region (SnapshotFilter tests)
          if (SnapshotPrms.useFilterOnExport() || SnapshotPrms.useFilterOnImport()) {
            Map allRegionsSnapshot = new HashMap();
            int numToCreate = SnapshotPrms.numFilterObjects();
            for (Region aRegion : testInstance.allRegions) {
              for (int i = 1; i <= numToCreate; i++) {
                String key = "FilterObject_" + i;
                String value =
                    "object to be filtered via snapshot.save() or snapshot.load(): this should never be a value in the cache once snapshot restored";
                aRegion.put(key, value);
              }
            }
            Log.getLogWriter()
                .info("Wrote " + numToCreate + " FilterObject entries to each region");
          }

          CacheSnapshotService snapshot = CacheHelper.getCache().getSnapshotService();
          SnapshotOptions options = snapshot.createOptions();
          if (SnapshotPrms.useFilterOnExport()) {
            options.setFilter(getSnapshotFilter());
          }

          String currDirName = System.getProperty("user.dir");
          String snapshotDirName =
              currDirName + File.separator + "cacheSnapshotDir_" + executionNumber;
          Log.getLogWriter().info("Starting cacheSnapshot to " + snapshotDirName);
          snapshot.save(new File(snapshotDirName), SnapshotFormat.GEMFIRE, options);
          Log.getLogWriter().info("Completed cacheSnapshot to " + snapshotDirName);
          RecoveryBB.getBB().getSharedCounters().increment(RecoveryBB.snapshotWritten);
        }
      }
      MasterController.sleepForMs(5000);
    } else { // not pausing
      long minTaskGranularitySec = TestConfig.tab().longAt(TestHelperPrms.minTaskGranularitySec);
      long minTaskGranularityMS = minTaskGranularitySec * TestHelper.SEC_MILLI_FACTOR;
      testInstance.doOperations(minTaskGranularityMS);
    }
  }
  @Test
  public void testMultipleOrderByClauses() {
    Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
    region.put("2", new Portfolio(2));
    region.put("3", new Portfolio(3));
    region.put("4", new Portfolio(4));
    region.put("5", new Portfolio(5));
    region.put("6", new Portfolio(6));
    region.put("7", new Portfolio(7));
    QueryService qs = CacheUtils.getQueryService();
    String qry =
        "select distinct status, getID from /portfolios pf where getID < 10 order by status asc, getID desc";
    Query q = qs.newQuery(qry);
    try {
      SelectResults result = (SelectResults) q.execute();
      Iterator itr = result.iterator();
      int j = 6;
      while (itr.hasNext() && j > 0) {
        Struct struct = (Struct) itr.next();
        assertEquals("active", struct.get("status"));
        assertEquals(j, ((Integer) struct.get("getID")).intValue());

        j -= 2;
      }
      j = 7;
      while (itr.hasNext()) {
        Struct struct = (Struct) itr.next();
        assertEquals(j, ((Integer) struct.get("getID")).intValue());
        assertEquals("inactive", struct.get("status"));
        j -= 2;
      }
      /*
      qry = "select distinct getID, status from /portfolios pf where getID < 10 order by getID asc";
      q = qs.newQuery(qry);
      result = (SelectResults) q.execute();
      itr = result.iterator();
      j = 0;
      while ( itr.hasNext()) {
       Struct struct = (Struct)itr.next();
       assertEquals(j++, ((Integer)struct.get("getID")).intValue());
      }*/
    } catch (Exception e) {
      e.printStackTrace();
      fail("Test failed because of exception=" + e);
    }
  }
  @Test
  public void testBug()
      throws TimeoutException, CacheWriterException, FunctionDomainException, TypeMismatchException,
          NameResolutionException, QueryInvocationTargetException, Exception {
    Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
    region.put("0", new Portfolio(0));
    region.put("1", new Portfolio(1));
    region.put("2", new Portfolio(2));
    region.put("3", new Portfolio(3));
    QueryService qs = CacheUtils.getQueryService();
    /*String qStr = "Select distinct structset.sos, structset.key " +
    "from /portfolios pfos, pfos.positions.values outerPos, " +
    "(SELECT DISTINCT key: key, sos: pos.sharesOutstanding "+
    "from /portfolios.entries pf, pf.value.positions.values pos " +
    "where outerPos.secId != 'IBM' AND " +
    "pf.key IN (select distinct * from pf.value.collectionHolderMap['0'].arr)) structset " +
     "where structset.sos > 2000";*/

    String qStr =
        "Select distinct * from /portfolios pf, pf.positions.values where status = 'active' and secId = 'IBM'";
    qs.createIndex("index1", IndexType.FUNCTIONAL, "status", "/portfolios pf");

    qs.createIndex(
        "index4",
        IndexType.FUNCTIONAL,
        "itr",
        "/portfolios pf, pf.collectionHolderMap chm, chm.value.arr itr");
    qs.createIndex(
        "index2", IndexType.FUNCTIONAL, "status", "/portfolios pf, positions.values pos");
    qs.createIndex("index3", IndexType.FUNCTIONAL, "secId", "/portfolios pf, positions.values pos");
    qs.createIndex(
        "index5",
        IndexType.FUNCTIONAL,
        "pos.secId",
        "/portfolios pf, pf.collectionHolderMap chm, chm.value.arr, pf.positions.values pos");
    qs.createIndex(
        "index6", IndexType.FUNCTIONAL, "status", "/portfolios pf, pf.collectionHolderMap chm");
    qs.createIndex(
        "index7",
        IndexType.FUNCTIONAL,
        "itr",
        "/portfolios pf, positions.values, pf.collectionHolderMap chm, chm.value.arr itr");
    Query q = qs.newQuery(qStr);
    SelectResults result = (SelectResults) q.execute();
    if (result.size() == 0) fail("Test failed as size is zero");
  }
 /** do three puts on key-1 */
 public static void putValue2() {
   try {
     Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     r1.put("key-1", "value-2");
   } catch (Exception ex) {
     ex.printStackTrace();
     fail("failed while region.put()", ex);
   }
 }
 @Before
 public void setUp() throws java.lang.Exception {
   CacheUtils.startCache();
   Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
   for (int i = 0; i < 4; i++) {
     region.put("" + i, new Portfolio(i));
     // CacheUtils.log(new Portfolio(i));
   }
 }
  public void putJavaClientObject(Region region) {
    // Test cases with Person of type Serializable, DataSerializable
    // Create Person Object and set its fields.
    /*
    Person p = new Person(1l);
          p.setFirstName("Diya");
          p.setMiddleName("Sandip");
          p.setLastName("Patel");
          p.setGender(Gender.FEMALE);
          p.setBirthDate(DateTimeUtils.createDate(2009, Calendar.OCTOBER, 03));
          */

    // region.put("1", "value1");
    region.put("11", 1101);
    region.put("12", "value-12");
    region.put("13", 9001L);

    System.out.println("puts successful on string type keys 11, 12, 13");
  }
  @Before
  public void setup() {
    assertThat(users).isNotNull();

    if (users.isEmpty()) {
      for (User user : TEST_USERS) {
        users.put(getKey(user), user);
      }

      assertThat(users.isEmpty()).isFalse();
      assertThat(users.size()).isEqualTo(TEST_USERS.size());
    }
  }
  /** perform put operation */
  public static void doPuts(String key) throws Exception {
    Region region1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
    // first create

    for (int i = 1; i < 200; i++) {
      Object obj = "sup" + i;
      cache.getLogger().info("put happened value : " + obj);
      region1.getCache().getCacheTransactionManager().begin();
      region1.put(key, obj);
      region1.getCache().getCacheTransactionManager().commit();
    }
    cache.getLogger().info("put happened for key : " + key);
  }
 public void xtestVoidMethods() throws Exception {
   Region region = CacheUtils.createRegion("Data", Data.class);
   region.put("0", new Data());
   Query query =
       CacheUtils.getQueryService().newQuery("SELECT DISTINCT * FROM /Data where voidMethod");
   Collection result = (Collection) query.execute();
   if (result.size() != 0) fail(query.getQueryString());
   query =
       CacheUtils.getQueryService()
           .newQuery("SELECT DISTINCT * FROM /Data where voidMethod = null ");
   result = (Collection) query.execute();
   if (result.size() != 1) fail(query.getQueryString());
 }
  public static void main(String[] args) throws Exception {

    Properties props = new Properties();
    props.setProperty("name", "CqServer");
    props.setProperty("log-level", "warning");

    System.out.println("\nConnecting to the distributed system and creating the cache.");
    DistributedSystem ds = DistributedSystem.connect(props);
    Cache cache = CacheFactory.create(ds);

    // Create region.
    AttributesFactory factory = new AttributesFactory();
    factory.setDataPolicy(DataPolicy.REPLICATE);
    factory.setScope(Scope.DISTRIBUTED_ACK);
    Region testRegion = cache.createRegion("test-cq", factory.create());
    System.out.println("Test region, " + testRegion.getFullPath() + ", created in cache.");

    // Start Cache Server.
    CacheServer server = cache.addCacheServer();
    server.setPort(40404);
    server.setNotifyBySubscription(true);
    server.start();

    System.out.println("Waiting for signal");
    // wait for signal
    BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
    bufferedReader.readLine();

    System.out.println("Received signal");

    testRegion.put("one", 1);
    testRegion.put("two", 2);
    testRegion.put("three", 3);

    System.out.println("Waiting for shutdown");
    bufferedReader.readLine();
  }
 public void xtestBug32763()
     throws FunctionDomainException, TypeMismatchException, NameResolutionException,
         QueryInvocationTargetException, TimeoutException, CacheWriterException {
   Region region = CacheUtils.createRegion("pos", Portfolio.class);
   region.put("0", new Portfolio(0));
   region.put("1", new Portfolio(1));
   region.put("2", new Portfolio(2));
   region.put("3", new Portfolio(3));
   QueryService qs = CacheUtils.getQueryService();
   String qStr =
       "SELECT DISTINCT key: key, iD: entry.value.iD, secId: posnVal.secId  FROM /pos.entries entry, entry.value.positions.values posnVal  WHERE entry.value.\"type\" = 'type0' AND posnVal.secId = 'YHOO'";
   Query q = qs.newQuery(qStr);
   SelectResults result = (SelectResults) q.execute();
   StructType type = (StructType) result.getCollectionType().getElementType();
   String names[] = type.getFieldNames();
   List list = result.asList();
   if (list.size() < 1) fail("Test failed as the resultset's size is zero");
   for (int i = 0; i < list.size(); ++i) {
     Struct stc = (Struct) list.get(i);
     if (!stc.get(names[2]).equals("YHOO")) {
       fail("Test failed as the SecID value is not YHOO");
     }
   }
 }
  protected void runClientCacheProducer() {
    ClientCache localClientCache = null;

    try {
      localClientCache = createClientCache();

      Region<String, Integer> example = localClientCache.getRegion(toRegionPath("Example"));

      assertRegion(example, "Example");

      example.put("four", 4);
      example.put("five", 5);
    } finally {
      closeClientCache(localClientCache);
    }
  }
  @Test
  public void testBug40441() throws Exception {
    CacheUtils.startCache();
    final Cache cache = CacheUtils.getCache();
    AttributesFactory attributesFactory = new AttributesFactory();
    RegionAttributes ra = attributesFactory.create();
    final Region region = cache.createRegion("new_pos", ra);
    String queryStr1 =
        " select distinct r.name, pVal, r.\"type\"  "
            + " from /new_pos r , r.positions.values pVal where "
            + " ( r.undefinedTestField.toString = UNDEFINED  OR false ) "; // AND pVal.mktValue =
                                                                           // 1.00";
    String queryStr2 =
        " select distinct r.name, pVal, r.\"type\"  "
            + " from /new_pos r , r.positions.values pVal where "
            + " ( r.undefinedTestField.toString = UNDEFINED  AND true ) AND pVal.mktValue = 1.00";
    final QueryService qs = CacheUtils.getQueryService();
    for (int i = 1; i < 100; ++i) {
      NewPortfolio pf = new NewPortfolio("name" + i, i);
      region.put("name" + i, pf);
    }

    Index indx1 =
        qs.createIndex(
            "MarketValues",
            IndexType.FUNCTIONAL,
            "itr2.mktValue",
            "/new_pos itr1, itr1.positions.values itr2");
    Index indx2 = qs.createIndex("Name", IndexType.FUNCTIONAL, "itr1.name", "/new_pos itr1");
    Index indx3 = qs.createIndex("nameIndex", IndexType.PRIMARY_KEY, "name", "/new_pos");
    Index indx4 = qs.createIndex("idIndex", IndexType.FUNCTIONAL, "id", "/new_pos");
    Index indx5 = qs.createIndex("statusIndex", IndexType.FUNCTIONAL, "status", "/new_pos");
    Index indx6 =
        qs.createIndex(
            "undefinedFieldIndex", IndexType.FUNCTIONAL, "undefinedTestField.toString", "/new_pos");
    final Query q1 = qs.newQuery(queryStr1);
    final Query q2 = qs.newQuery(queryStr2);
    try {
      SelectResults sr1 = (SelectResults) q1.execute();
      SelectResults sr2 = (SelectResults) q2.execute();
    } catch (Throwable e) {
      e.printStackTrace();
      fail("Test failed due to = " + e.toString());
    }
  }
 /**
  * Load a region with keys and values. The number of keys and values is specified by the total
  * number of keys in keyIntervals. This can be invoked by several threads to accomplish the work.
  */
 public void loadRegion() {
   final long LOG_INTERVAL_MILLIS = 10000;
   int numKeysToCreate = keyIntervals.getNumKeys();
   long lastLogTime = System.currentTimeMillis();
   long startTime = System.currentTimeMillis();
   SharedCounters sc = CQUtilBB.getBB().getSharedCounters();
   do {
     long shouldAddCount =
         CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.SHOULD_ADD_COUNT);
     if (shouldAddCount > numKeysToCreate) {
       String aStr =
           "In loadRegion, shouldAddCount is "
               + shouldAddCount
               + ", numOriginalKeysCreated is "
               + sc.read(CQUtilBB.NUM_ORIGINAL_KEYS_CREATED)
               + ", numKeysToCreate is "
               + numKeysToCreate
               + ", region size is "
               + aRegion.size();
       Log.getLogWriter().info(aStr);
       NameBB.getBB().printSharedCounters();
       throw new StopSchedulingTaskOnClientOrder(aStr);
     }
     Object key = NameFactory.getNextPositiveObjectName();
     QueryObject value = getValueToAdd(key);
     value.extra = key;
     Log.getLogWriter().info("Creating with put, key " + key + ", value " + value.toStringFull());
     aRegion.put(key, value);
     sc.increment(CQUtilBB.NUM_ORIGINAL_KEYS_CREATED);
     if (System.currentTimeMillis() - lastLogTime > LOG_INTERVAL_MILLIS) {
       Log.getLogWriter()
           .info(
               "Added "
                   + NameFactory.getPositiveNameCounter()
                   + " out of "
                   + numKeysToCreate
                   + " entries into "
                   + TestHelper.regionToString(aRegion, false));
       lastLogTime = System.currentTimeMillis();
     }
   } while ((minTaskGranularitySec == -1)
       || (System.currentTimeMillis() - startTime < minTaskGranularityMS));
 }
 /**
  * Creates a new key/value in the given region by creating a new key within the range and a random
  * value.
  *
  * @param aRegion The region to create the new key in.
  * @param exists Not used in this overridden method; this test wants to use unique keys even on
  *     creates, so we don't do anything different here based on the value of exists.
  * @return An instance of Operation describing the create operation.
  */
 @Override
 public Operation createEntry(Region aRegion, boolean exists) {
   int lower = ((Integer) (lowerKeyRange.get())).intValue();
   int upper = ((Integer) (upperKeyRange.get())).intValue();
   long keyIndex = TestConfig.tab().getRandGen().nextInt(lower, upper);
   long startKeyIndex = keyIndex;
   Object key = NameFactory.getObjectNameForCounter(keyIndex);
   boolean containsKey = aRegion.containsKey(key);
   while (containsKey) { // looking for a key that does not exist
     keyIndex++; // go to the next key
     if (keyIndex > upper) keyIndex = lower;
     if (keyIndex == startKeyIndex) { // considered all keys
       return null;
     }
     key = NameFactory.getObjectNameForCounter(keyIndex);
     containsKey = aRegion.containsKey(key);
   }
   BaseValueHolder vh = new ValueHolder(key, randomValues, new Integer(modValInitializer++));
   try {
     Log.getLogWriter()
         .info(
             "createEntryKeyRange: putting key "
                 + key
                 + ", object "
                 + vh.toString()
                 + " in region "
                 + aRegion.getFullPath());
     aRegion.put(key, vh);
     Log.getLogWriter()
         .info(
             "createEntryKeyRange: done putting key "
                 + key
                 + ", object "
                 + vh.toString()
                 + " in region "
                 + aRegion.getFullPath());
   } catch (Exception e) {
     throw new TestException(TestHelper.getStackTrace(e));
   }
   return new Operation(aRegion.getFullPath(), key, Operation.ENTRY_CREATE, null, vh.modVal);
 }
 /**
  * verify that queries on indexes work with transaction
  *
  * @see bug#40842
  * @throws Exception
  */
 @Test
 public void testIndexOnCommitForPut() throws Exception {
   AttributesFactory af = new AttributesFactory();
   af.setDataPolicy(DataPolicy.REPLICATE);
   Region region = cache.createRegion("sample", af.create());
   qs.createIndex("foo", IndexType.FUNCTIONAL, "age", "/sample");
   Context ctx = cache.getJNDIContext();
   UserTransaction utx = (UserTransaction) ctx.lookup("java:/UserTransaction");
   Integer x = new Integer(0);
   utx.begin();
   region.create(x, new Person("xyz", 45));
   utx.commit();
   Query q = qs.newQuery("select * from /sample where age < 50");
   assertEquals(1, ((SelectResults) q.execute()).size());
   Person dsample = (Person) CopyHelper.copy(region.get(x));
   dsample.setAge(55);
   utx.begin();
   region.put(x, dsample);
   utx.commit();
   CacheUtils.log(((Person) region.get(x)));
   assertEquals(0, ((SelectResults) q.execute()).size());
 }
 /**
  * Add a new key to REGION_NAME.
  *
  * @return true if all new keys have been added (specified by CQUtilPrms.numNewKeys)
  */
 protected boolean addNewKey() {
   SharedCounters sc = CQUtilBB.getBB().getSharedCounters();
   long numNewKeysCreated = sc.incrementAndRead(CQUtilBB.NUM_NEW_KEYS_CREATED);
   if (numNewKeysCreated > numNewKeys) {
     Log.getLogWriter().info("All new keys created; returning from addNewKey");
     return true;
   }
   Object key = NameFactory.getNextPositiveObjectName();
   checkContainsValueForKey(key, false, "before addNewKey");
   QueryObject value =
       new QueryObject(
           NameFactory.getCounterForName(key), QueryObject.EQUAL_VALUES, -1, queryDepth);
   value.extra = key; // encode the key in the value for later validation
   Log.getLogWriter().info("Adding new key " + key + " with put");
   aRegion.put(key, value);
   Log.getLogWriter()
       .info(
           "Done adding new key "
               + key
               + " with put, "
               + "num remaining: "
               + (numNewKeys - numNewKeysCreated));
   return (numNewKeysCreated >= numNewKeys);
 }