@Test
  public void testNegativeGroupCaching() throws Exception {
    final String user = "******";
    final String failMessage = "Did not throw IOException: ";
    conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 2);
    FakeTimer timer = new FakeTimer();
    Groups groups = new Groups(conf, timer);
    groups.cacheGroupsAdd(Arrays.asList(myGroups));
    groups.refresh();
    FakeGroupMapping.addToBlackList(user);

    // In the first attempt, the user will be put in the negative cache.
    try {
      groups.getGroups(user);
      fail(failMessage + "Failed to obtain groups from FakeGroupMapping.");
    } catch (IOException e) {
      // Expects to raise exception for the first time. But the user will be
      // put into the negative cache
      GenericTestUtils.assertExceptionContains("No groups found for user", e);
    }

    // The second time, the user is in the negative cache.
    try {
      groups.getGroups(user);
      fail(failMessage + "The user is in the negative cache.");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("No groups found for user", e);
    }

    // Brings back the backend user-group mapping service.
    FakeGroupMapping.clearBlackList();

    // It should still get groups from the negative cache.
    try {
      groups.getGroups(user);
      fail(
          failMessage
              + "The user is still in the negative cache, even "
              + "FakeGroupMapping has resumed.");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("No groups found for user", e);
    }

    // Let the elements in the negative cache expire.
    timer.advance(4 * 1000);

    // The groups for the user is expired in the negative cache, a new copy of
    // groups for the user is fetched.
    assertEquals(Arrays.asList(myGroups), groups.getGroups(user));
  }
  @Test
  public void testNegativeCacheEntriesExpire() throws Exception {
    conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 2);
    FakeTimer timer = new FakeTimer();
    // Ensure that stale entries are removed from negative cache every 2 seconds
    Groups groups = new Groups(conf, timer);
    groups.cacheGroupsAdd(Arrays.asList(myGroups));
    groups.refresh();
    // Add both these users to blacklist so that they
    // can be added to negative cache
    FakeGroupMapping.addToBlackList("user1");
    FakeGroupMapping.addToBlackList("user2");

    // Put user1 in negative cache.
    try {
      groups.getGroups("user1");
      fail("Did not throw IOException : Failed to obtain groups" + " from FakeGroupMapping.");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("No groups found for user", e);
    }
    // Check if user1 exists in negative cache
    assertTrue(groups.getNegativeCache().contains("user1"));

    // Advance fake timer
    timer.advance(1000);
    // Put user2 in negative cache
    try {
      groups.getGroups("user2");
      fail("Did not throw IOException : Failed to obtain groups" + " from FakeGroupMapping.");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("No groups found for user", e);
    }
    // Check if user2 exists in negative cache
    assertTrue(groups.getNegativeCache().contains("user2"));

    // Advance timer. Only user2 should be present in negative cache.
    timer.advance(1100);
    assertFalse(groups.getNegativeCache().contains("user1"));
    assertTrue(groups.getNegativeCache().contains("user2"));

    // Advance timer. Even user2 should not be present in negative cache.
    timer.advance(1000);
    assertFalse(groups.getNegativeCache().contains("user2"));
  }
  @Test
  public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception {
    conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
    FakeTimer timer = new FakeTimer();
    final Groups groups = new Groups(conf, timer);
    groups.cacheGroupsAdd(Arrays.asList(myGroups));
    groups.refresh();
    FakeGroupMapping.clearBlackList();
    FakeGroupMapping.setGetGroupsDelayMs(100);

    // We make an initial request to populate the cache
    groups.getGroups("me");
    int startingRequestCount = FakeGroupMapping.getRequestCount();

    // Then expire that entry
    timer.advance(400 * 1000);
    Thread.sleep(100);

    ArrayList<Thread> threads = new ArrayList<Thread>();
    for (int i = 0; i < 10; i++) {
      threads.add(
          new Thread() {
            public void run() {
              try {
                assertEquals(2, groups.getGroups("me").size());
              } catch (IOException e) {
                fail("Should not happen");
              }
            }
          });
    }

    // We start a bunch of threads who all see the cached value
    for (Thread t : threads) {
      t.start();
    }

    for (Thread t : threads) {
      t.join();
    }

    // Only one extra request is made
    assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
  }
  @Test
  public void testCacheEntriesExpire() throws Exception {
    conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
    FakeTimer timer = new FakeTimer();
    final Groups groups = new Groups(conf, timer);
    groups.cacheGroupsAdd(Arrays.asList(myGroups));
    groups.refresh();
    FakeGroupMapping.clearBlackList();

    // We make an entry
    groups.getGroups("me");
    int startingRequestCount = FakeGroupMapping.getRequestCount();

    timer.advance(20 * 1000);

    // Cache entry has expired so it results in a new fetch
    groups.getGroups("me");
    assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
  }