Exemplo n.º 1
0
 /**
  * Get a random key from the given region that is within this instance's range. If no keys qualify
  * in the region, return null.
  *
  * @param aRegion - The region to get the key from.
  * @param excludeKey - The region to get the key from.
  * @returns A key from aRegion, or null.
  */
 public Object getRandomKey(Region aRegion, Object excludeKey) {
   long start = System.currentTimeMillis();
   int lower = ((Integer) (lowerKeyRange.get())).intValue();
   int upper = ((Integer) (upperKeyRange.get())).intValue();
   long randomKeyIndex = TestConfig.tab().getRandGen().nextLong(lower, upper);
   long startKeyIndex = randomKeyIndex;
   Object key = NameFactory.getObjectNameForCounter(randomKeyIndex);
   do {
     boolean done = false;
     if ((!(key.equals(excludeKey))) && (aRegion.containsKey(key))) done = true;
     if (done) break;
     randomKeyIndex++; // go to the next key
     if (randomKeyIndex > upper) randomKeyIndex = lower;
     if (randomKeyIndex == startKeyIndex) { // considered all keys
       key = null;
       break;
     }
     key = NameFactory.getObjectNameForCounter(randomKeyIndex);
   } while (true);
   long end = System.currentTimeMillis();
   Log.getLogWriter()
       .info(
           "Done in TxUtilKeyRange:getRandomKey, key is "
               + key
               + " "
               + aRegion.getFullPath()
               + " getRandomKey took "
               + (end - start)
               + " millis");
   return key;
 }
  @Override
  protected void setUp() throws Exception {
    super.setUp();
    System.setProperty(HDFSStoreImpl.ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP, "true");

    // This is logged by HDFS when it is stopped.
    TestUtils.addExpectedException("sleep interrupted");
    TestUtils.addExpectedException("java.io.InterruptedIOException");

    testDataDir = new Path("test-case");

    cache = createCache();

    configureHdfsStoreFactory();
    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);

    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
    region = regionfactory.create(getName());

    // disable compaction by default and clear existing queues
    HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
    compactionManager.reset();

    director = HDFSRegionDirector.getInstance();
    director.setCache(cache);
    regionManager = ((LocalRegion) region).getHdfsRegionManager();
    stats = director.getHdfsRegionStats("/" + getName());
    storeStats = hdfsStore.getStats();
    blockCache = hdfsStore.getBlockCache();
    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
  }
Exemplo n.º 3
0
 /**
  * Load a region with keys and values. The number of keys and values is specified by the total
  * number of keys in keyIntervals. This can be invoked by several threads to accomplish the work.
  */
 public void loadRegion() {
   final long LOG_INTERVAL_MILLIS = 10000;
   int numKeysToCreate = keyIntervals.getNumKeys();
   long lastLogTime = System.currentTimeMillis();
   long startTime = System.currentTimeMillis();
   SharedCounters sc = CQUtilBB.getBB().getSharedCounters();
   do {
     long shouldAddCount =
         CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.SHOULD_ADD_COUNT);
     if (shouldAddCount > numKeysToCreate) {
       String aStr =
           "In loadRegion, shouldAddCount is "
               + shouldAddCount
               + ", numOriginalKeysCreated is "
               + sc.read(CQUtilBB.NUM_ORIGINAL_KEYS_CREATED)
               + ", numKeysToCreate is "
               + numKeysToCreate
               + ", region size is "
               + aRegion.size();
       Log.getLogWriter().info(aStr);
       NameBB.getBB().printSharedCounters();
       throw new StopSchedulingTaskOnClientOrder(aStr);
     }
     Object key = NameFactory.getNextPositiveObjectName();
     QueryObject value = getValueToAdd(key);
     value.extra = key;
     Log.getLogWriter().info("Creating with put, key " + key + ", value " + value.toStringFull());
     aRegion.put(key, value);
     sc.increment(CQUtilBB.NUM_ORIGINAL_KEYS_CREATED);
     if (System.currentTimeMillis() - lastLogTime > LOG_INTERVAL_MILLIS) {
       Log.getLogWriter()
           .info(
               "Added "
                   + NameFactory.getPositiveNameCounter()
                   + " out of "
                   + numKeysToCreate
                   + " entries into "
                   + TestHelper.regionToString(aRegion, false));
       lastLogTime = System.currentTimeMillis();
     }
   } while ((minTaskGranularitySec == -1)
       || (System.currentTimeMillis() - startTime < minTaskGranularityMS));
 }
  /**
   * creates a hoplog file with numKeys records. Keys follow key-X pattern and values follow value-X
   * pattern where X=0 to X is = numKeys -1
   *
   * @return the sorted map of inserted KVs
   */
  protected TreeMap<String, String> createHoplog(int numKeys, Hoplog oplog) throws IOException {
    int offset = (numKeys > 10 ? 100000 : 0);

    HoplogWriter writer = oplog.createWriter(numKeys);
    TreeMap<String, String> map = new TreeMap<String, String>();
    for (int i = offset; i < (numKeys + offset); i++) {
      String key = ("key-" + i);
      String value = ("value-" + System.nanoTime());
      writer.append(key.getBytes(), value.getBytes());
      map.put(key, value);
    }
    writer.close();
    return map;
  }
  public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map)
      throws Exception {
    System.setProperty("test.build.data", "hdfs-test-cluster");
    Configuration hconf = new HdfsConfiguration();
    for (Entry<String, String> entry : map.entrySet()) {
      hconf.set(entry.getKey(), entry.getValue());
    }

    hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");

    Builder builder = new MiniDFSCluster.Builder(hconf);
    builder.numDataNodes(numDN);
    builder.nameNodePort(port);
    MiniDFSCluster cluster = builder.build();
    return cluster;
  }
public abstract class BaseHoplogTestCase extends TestCase {
  public static final String HDFS_STORE_NAME = "hdfs";
  public static final Random rand = new Random(System.currentTimeMillis());
  protected Path testDataDir;
  protected Cache cache;

  protected HDFSRegionDirector director;
  protected HdfsRegionManager regionManager;
  protected HDFSStoreFactory hsf;
  protected HDFSStoreImpl hdfsStore;
  protected RegionFactory<Object, Object> regionfactory;
  protected Region<Object, Object> region;
  protected SortedOplogStatistics stats;
  protected HFileStoreStatistics storeStats;
  protected BlockCache blockCache;

  @Override
  protected void setUp() throws Exception {
    super.setUp();
    System.setProperty(HDFSStoreImpl.ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP, "true");

    // This is logged by HDFS when it is stopped.
    TestUtils.addExpectedException("sleep interrupted");
    TestUtils.addExpectedException("java.io.InterruptedIOException");

    testDataDir = new Path("test-case");

    cache = createCache();

    configureHdfsStoreFactory();
    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);

    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
    region = regionfactory.create(getName());

    // disable compaction by default and clear existing queues
    HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
    compactionManager.reset();

    director = HDFSRegionDirector.getInstance();
    director.setCache(cache);
    regionManager = ((LocalRegion) region).getHdfsRegionManager();
    stats = director.getHdfsRegionStats("/" + getName());
    storeStats = hdfsStore.getStats();
    blockCache = hdfsStore.getBlockCache();
    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
  }

  protected void configureHdfsStoreFactory() throws Exception {
    hsf = this.cache.createHDFSStoreFactory();
    hsf.setHomeDir(testDataDir.toString());
    HDFSCompactionConfigFactory cc = hsf.createCompactionConfigFactory(null);
    cc.setAutoCompaction(false);
    cc.setAutoMajorCompaction(false);
    hsf.setHDFSCompactionConfig(cc.create());
  }

  protected Cache createCache() {
    CacheFactory cf = new CacheFactory().set("mcast-port", "0").set("log-level", "info");
    cache = cf.create();
    return cache;
  }

  @Override
  protected void tearDown() throws Exception {
    if (region != null) {
      region.destroyRegion();
    }

    if (hdfsStore != null) {
      hdfsStore.getFileSystem().delete(testDataDir, true);
      hdfsStore.destroy();
    }

    if (cache != null) {
      cache.close();
    }
    super.tearDown();
    TestUtils.removeExpectedException("sleep interrupted");
    TestUtils.removeExpectedException("java.io.InterruptedIOException");
  }

  /**
   * creates a hoplog file with numKeys records. Keys follow key-X pattern and values follow value-X
   * pattern where X=0 to X is = numKeys -1
   *
   * @return the sorted map of inserted KVs
   */
  protected TreeMap<String, String> createHoplog(int numKeys, Hoplog oplog) throws IOException {
    int offset = (numKeys > 10 ? 100000 : 0);

    HoplogWriter writer = oplog.createWriter(numKeys);
    TreeMap<String, String> map = new TreeMap<String, String>();
    for (int i = offset; i < (numKeys + offset); i++) {
      String key = ("key-" + i);
      String value = ("value-" + System.nanoTime());
      writer.append(key.getBytes(), value.getBytes());
      map.put(key, value);
    }
    writer.close();
    return map;
  }

  protected FileStatus[] getBucketHoplogs(String regionAndBucket, final String type)
      throws IOException {
    return getBucketHoplogs(hdfsStore.getFileSystem(), regionAndBucket, type);
  }

  protected FileStatus[] getBucketHoplogs(FileSystem fs, String regionAndBucket, final String type)
      throws IOException {
    FileStatus[] hoplogs =
        fs.listStatus(
            new Path(testDataDir, regionAndBucket),
            new PathFilter() {
              @Override
              public boolean accept(Path file) {
                return file.getName().endsWith(type);
              }
            });
    return hoplogs;
  }

  protected String getRandomHoplogName() {
    String hoplogName = "hoplog-" + System.nanoTime() + "-" + rand.nextInt(10000) + ".hop";
    return hoplogName;
  }

  public static MiniDFSCluster initMiniCluster(int port, int numDN) throws Exception {
    HashMap<String, String> map = new HashMap<String, String>();
    map.put(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
    return initMiniCluster(port, numDN, map);
  }

  public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map)
      throws Exception {
    System.setProperty("test.build.data", "hdfs-test-cluster");
    Configuration hconf = new HdfsConfiguration();
    for (Entry<String, String> entry : map.entrySet()) {
      hconf.set(entry.getKey(), entry.getValue());
    }

    hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");

    Builder builder = new MiniDFSCluster.Builder(hconf);
    builder.numDataNodes(numDN);
    builder.nameNodePort(port);
    MiniDFSCluster cluster = builder.build();
    return cluster;
  }

  public static void setConfigFile(HDFSStoreFactory factory, File configFile, String config)
      throws Exception {
    BufferedWriter bw = new BufferedWriter(new FileWriter(configFile));
    bw.write(config);
    bw.close();
    factory.setHDFSClientConfigFile(configFile.getName());
  }

  public static void alterMajorCompaction(HDFSStoreImpl store, boolean enable) {
    HDFSStoreMutator mutator = store.createHdfsStoreMutator();
    mutator.getCompactionConfigMutator().setAutoMajorCompaction(enable);
    store.alter(mutator);
  }

  public static void alterMinorCompaction(HDFSStoreImpl store, boolean enable) {
    HDFSStoreMutator mutator = store.createHdfsStoreMutator();
    mutator.getCompactionConfigMutator().setAutoCompaction(enable);
    store.alter(mutator);
  }

  public void deleteMiniClusterDir() throws Exception {
    File clusterDir = new File("hdfs-test-cluster");
    if (clusterDir.exists()) {
      FileUtils.deleteDirectory(clusterDir);
    }
  }

  public static class TestEvent extends SortedHDFSQueuePersistedEvent {
    Object key;

    public TestEvent(String k, String v) throws Exception {
      this(k, v, Operation.PUT_IF_ABSENT);
    }

    public TestEvent(String k, String v, Operation op) throws Exception {
      super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
      this.key = k;
    }

    public Object getKey() {
      return key;
    }

    public Object getNewValue() {
      return valueObject;
    }

    public Operation getOperation() {
      return op;
    }

    public Region<Object, Object> getRegion() {
      return null;
    }

    public Object getCallbackArgument() {
      return null;
    }

    public boolean isCallbackArgumentAvailable() {
      return false;
    }

    public boolean isOriginRemote() {
      return false;
    }

    public DistributedMember getDistributedMember() {
      return null;
    }

    public boolean isExpiration() {
      return false;
    }

    public boolean isDistributed() {
      return false;
    }

    public Object getOldValue() {
      return null;
    }

    public SerializedCacheValue<Object> getSerializedOldValue() {
      return null;
    }

    public SerializedCacheValue<Object> getSerializedNewValue() {
      return null;
    }

    public boolean isLocalLoad() {
      return false;
    }

    public boolean isNetLoad() {
      return false;
    }

    public boolean isLoad() {
      return false;
    }

    public boolean isNetSearch() {
      return false;
    }

    public TransactionId getTransactionId() {
      return null;
    }

    public boolean isBridgeEvent() {
      return false;
    }

    public boolean hasClientOrigin() {
      return false;
    }

    public boolean isOldValueAvailable() {
      return false;
    }
  }

  public abstract class AbstractCompactor implements Compactor {
    @Override
    public HDFSStore getHdfsStore() {
      return hdfsStore;
    }

    public void suspend() {}

    public void resume() {}

    public boolean isBusy(boolean isMajor) {
      return false;
    }
  }
}
 protected String getRandomHoplogName() {
   String hoplogName = "hoplog-" + System.nanoTime() + "-" + rand.nextInt(10000) + ".hop";
   return hoplogName;
 }
Exemplo n.º 8
0
  /**
   * Verify the contents of the region, taking into account the keys that were destroyed,
   * invalidated, etc (as specified in keyIntervals) Throw an error of any problems are detected.
   * This must be called repeatedly by the same thread until StopSchedulingTaskOnClientOrder is
   * thrown.
   */
  public void verifyRegionContents() {
    final long LOG_INTERVAL_MILLIS = 10000;
    // we already completed this check once; we can't do it again without reinitializing the
    // verify state variables
    if (verifyRegionContentsCompleted) {
      throw new TestException(
          "Test configuration problem; already verified region contents, "
              + "cannot call this task again without resetting batch variables");
    }

    // iterate keys
    long lastLogTime = System.currentTimeMillis();
    long minTaskGranularitySec = TestConfig.tab().longAt(TestHelperPrms.minTaskGranularitySec);
    long minTaskGranularityMS = minTaskGranularitySec * TestHelper.SEC_MILLI_FACTOR;
    long startTime = System.currentTimeMillis();
    long size = aRegion.size();
    boolean first = true;
    int numKeysToCheck = keyIntervals.getNumKeys() + numNewKeys;
    while (verifyRegionContentsIndex < numKeysToCheck) {
      verifyRegionContentsIndex++;
      if (first) {
        Log.getLogWriter()
            .info(
                "In verifyRegionContents, region has "
                    + size
                    + " keys; starting verify at verifyRegionContentsIndex "
                    + verifyRegionContentsIndex
                    + "; verifying key names with indexes through (and including) "
                    + numKeysToCheck);
        first = false;
      }

      // check region size the first time through the loop to avoid it being called
      // multiple times when this is batched
      if (verifyRegionContentsIndex == 1) {
        if (totalNumKeys != size) {
          String tmpStr = "Expected region size to be " + totalNumKeys + ", but it is size " + size;
          Log.getLogWriter().info(tmpStr);
          verifyRegionContentsErrStr.append(tmpStr + "\n");
        }
      }

      Object key = NameFactory.getObjectNameForCounter(verifyRegionContentsIndex);
      try {
        if (((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.NONE))
                && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.NONE)))
            || ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.GET))
                && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.GET)))) {
          // this key was untouched after its creation
          checkContainsKey(key, true, "key was untouched");
          checkContainsValueForKey(key, true, "key was untouched");
          Object value = aRegion.get(key);
          checkValue(key, value);
        } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.INVALIDATE))
            && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.INVALIDATE))) {
          checkContainsKey(key, true, "key was invalidated");
          checkContainsValueForKey(key, false, "key was invalidated");
        } else if ((verifyRegionContentsIndex
                >= keyIntervals.getFirstKey(KeyIntervals.LOCAL_INVALIDATE))
            && (verifyRegionContentsIndex
                <= keyIntervals.getLastKey(KeyIntervals.LOCAL_INVALIDATE))) {
          // this key was locally invalidated
          checkContainsKey(key, true, "key was locally invalidated");
          checkContainsValueForKey(key, true, "key was locally invalidated");
          Object value = aRegion.get(key);
          checkValue(key, value);
        } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.DESTROY))
            && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.DESTROY))) {
          // this key was destroyed
          checkContainsKey(key, false, "key was destroyed");
          checkContainsValueForKey(key, false, "key was destroyed");
        } else if ((verifyRegionContentsIndex
                >= keyIntervals.getFirstKey(KeyIntervals.LOCAL_DESTROY))
            && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.LOCAL_DESTROY))) {
          // this key was locally destroyed
          checkContainsKey(key, true, "key was locally destroyed");
          checkContainsValueForKey(key, true, "key was locally destroyed");
          Object value = aRegion.get(key);
          checkValue(key, value);
        } else if ((verifyRegionContentsIndex
                >= keyIntervals.getFirstKey(KeyIntervals.UPDATE_EXISTING_KEY))
            && (verifyRegionContentsIndex
                <= keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY))) {
          // this key was updated
          checkContainsKey(key, true, "key was updated");
          checkContainsValueForKey(key, true, "key was updated");
          Object value = aRegion.get(key);
          checkUpdatedValue(key, value);
        } else if (verifyRegionContentsIndex > keyIntervals.getNumKeys()) {
          // key was newly added
          checkContainsKey(key, true, "key was new");
          checkContainsValueForKey(key, true, "key was new");
          Object value = aRegion.get(key);
          checkValue(key, value);
        }
      } catch (TestException e) {
        Log.getLogWriter().info(TestHelper.getStackTrace(e));
        verifyRegionContentsErrStr.append(e.getMessage() + "\n");
      }

      if (System.currentTimeMillis() - lastLogTime > LOG_INTERVAL_MILLIS) {
        Log.getLogWriter()
            .info("Verified key " + verifyRegionContentsIndex + " out of " + totalNumKeys);
        lastLogTime = System.currentTimeMillis();
      }

      if (System.currentTimeMillis() - startTime >= minTaskGranularityMS) {
        Log.getLogWriter()
            .info(
                "In HydraTask_verifyRegionContents, returning before completing verify "
                    + "because of task granularity (this task must be batched to complete); last key verified is "
                    + key);
        return; // task is batched; we are done with this batch
      }
    }
    verifyRegionContentsCompleted = true;
    if (verifyRegionContentsErrStr.length() > 0) {
      throw new TestException(verifyRegionContentsErrStr.toString());
    }
    String aStr =
        "In HydraTask_verifyRegionContents, verified " + verifyRegionContentsIndex + " keys/values";
    Log.getLogWriter().info(aStr);
    throw new StopSchedulingTaskOnClientOrder(aStr);
  }
  /**
   * Tests that we are in {@link GemFireHealth#OKAY_HEALTH okay} health if cache loads take too
   * long.
   *
   * @see CacheHealthEvaluator#checkLoadTime
   */
  public void testCheckLoadTime() throws CacheException {
    Cache cache = CacheFactory.create(this.system);
    CachePerfStats stats = ((GemFireCacheImpl) cache).getCachePerfStats();

    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setCacheLoader(
        new CacheLoader() {
          public Object load(LoaderHelper helper) throws CacheLoaderException {

            return "Loaded";
          }

          public void close() {}
        });

    RegionAttributes attrs = factory.create();
    Region region = cache.createRegion(this.getName(), attrs);

    GemFireHealthConfig config = new GemFireHealthConfigImpl(null);
    config.setMaxLoadTime(100);

    CacheHealthEvaluator eval =
        new CacheHealthEvaluator(config, this.system.getDistributionManager());
    for (int i = 0; i < 10; i++) {
      region.get("Test1 " + i);
    }
    long firstLoadTime = stats.getLoadTime();
    long firstLoadsCompleted = stats.getLoadsCompleted();
    assertTrue(firstLoadTime >= 0);
    assertTrue(firstLoadsCompleted > 0);

    // First time should always be empty
    List status = new ArrayList();
    eval.evaluate(status);
    assertEquals(0, status.size());

    config = new GemFireHealthConfigImpl(null);
    config.setMaxLoadTime(10);
    eval = new CacheHealthEvaluator(config, this.system.getDistributionManager());
    eval.evaluate(status);

    long start = System.currentTimeMillis();
    for (int i = 0; i < 100; i++) {
      region.get("Test2 " + i);
    }
    assertTrue(System.currentTimeMillis() - start < 1000);
    long secondLoadTime = stats.getLoadTime();
    long secondLoadsCompleted = stats.getLoadsCompleted();
    assertTrue(
        "firstLoadTime=" + firstLoadTime + ", secondLoadTime=" + secondLoadTime,
        secondLoadTime >= firstLoadTime);
    assertTrue(secondLoadsCompleted > firstLoadsCompleted);

    // Averge should be less than 10 milliseconds
    status = new ArrayList();
    eval.evaluate(status);
    assertEquals(0, status.size());

    region
        .getAttributesMutator()
        .setCacheLoader(
            new CacheLoader() {
              public Object load(LoaderHelper helper) throws CacheLoaderException {

                try {
                  Thread.sleep(20);

                } catch (InterruptedException ex) {
                  fail("Why was I interrupted?");
                }
                return "Loaded";
              }

              public void close() {}
            });

    for (int i = 0; i < 50; i++) {
      region.get("Test3 " + i);
    }

    long thirdLoadTime = stats.getLoadTime();
    long thirdLoadsCompleted = stats.getLoadsCompleted();
    assertTrue(thirdLoadTime > secondLoadTime);
    assertTrue(thirdLoadsCompleted > secondLoadsCompleted);

    status = new ArrayList();
    eval.evaluate(status);
    assertEquals(1, status.size());

    AbstractHealthEvaluator.HealthStatus ill = (AbstractHealthEvaluator.HealthStatus) status.get(0);
    assertEquals(GemFireHealth.OKAY_HEALTH, ill.getHealthCode());
    String s = "The average duration of a Cache load";
    assertTrue(ill.getDiagnosis().indexOf(s) != -1);
  }