@Override
 public BloomFilter filter(IndexReader reader, String fieldName, boolean asyncLoad) {
   int currentNumDocs = reader.numDocs();
   if (currentNumDocs == 0) {
     return BloomFilter.EMPTY;
   }
   ConcurrentMap<String, BloomFilterEntry> fieldCache = cache.get(reader.getFieldCacheKey());
   if (fieldCache == null) {
     synchronized (creationMutex) {
       fieldCache = cache.get(reader.getFieldCacheKey());
       if (fieldCache == null) {
         fieldCache = ConcurrentCollections.newConcurrentMap();
         cache.put(reader.getFieldCacheKey(), fieldCache);
       }
     }
   }
   BloomFilterEntry filter = fieldCache.get(fieldName);
   if (filter == null) {
     synchronized (fieldCache) {
       filter = fieldCache.get(fieldName);
       if (filter == null) {
         filter = new BloomFilterEntry(reader.numDocs(), BloomFilter.NONE);
         filter.loading.set(true);
         fieldCache.put(fieldName, filter);
         // now, do the async load of it...
         BloomFilterLoader loader = new BloomFilterLoader(reader, fieldName);
         if (asyncLoad) {
           threadPool.cached().execute(loader);
         } else {
           loader.run();
           filter = fieldCache.get(fieldName);
         }
       }
     }
   }
   // if we too many deletes, we need to reload the bloom filter so it will be more effective
   if (filter.numDocs > 1000 && (currentNumDocs / filter.numDocs) < 0.6) {
     if (filter.loading.compareAndSet(false, true)) {
       // do the async loading
       BloomFilterLoader loader = new BloomFilterLoader(reader, fieldName);
       if (asyncLoad) {
         threadPool.cached().execute(loader);
       } else {
         loader.run();
         filter = fieldCache.get(fieldName);
       }
     }
   }
   return filter.filter;
 }
 @Override
 public void close(final boolean delete) {
   try {
     Set<Integer> shardIds = shardIds();
     final CountDownLatch latch = new CountDownLatch(shardIds.size());
     for (final int shardId : shardIds) {
       threadPool
           .cached()
           .execute(
               new Runnable() {
                 @Override
                 public void run() {
                   try {
                     deleteShard(shardId, delete, !delete, delete);
                   } catch (Exception e) {
                     logger.warn("failed to close shard, delete [{}]", e, delete);
                   } finally {
                     latch.countDown();
                   }
                 }
               });
     }
     try {
       latch.await();
     } catch (InterruptedException e) {
       throw new ElasticSearchInterruptedException(
           "interrupted closing index [ " + index().name() + "]", e);
     }
   } finally {
     indicesLifecycle.removeListener(cleanCacheOnIndicesLifecycleListener);
   }
 }
 private void notifyNodeFailure(final DiscoveryNode node, final String reason) {
   threadPool
       .cached()
       .execute(
           new Runnable() {
             @Override
             public void run() {
               for (Listener listener : listeners) {
                 listener.onNodeFailure(node, reason);
               }
             }
           });
 }
 private void notifyMasterFailure(final DiscoveryNode masterNode, final String reason) {
   if (notifiedMasterFailure.compareAndSet(false, true)) {
     threadPool
         .cached()
         .execute(
             new Runnable() {
               @Override
               public void run() {
                 for (Listener listener : listeners) {
                   listener.onMasterFailure(masterNode, reason);
                 }
               }
             });
     stop("master failure, " + reason);
   }
 }