protected void reportStateSize() {
   _getTerminalValuesCallback.reportStateSize();
   if (!s_logger.isInfoEnabled()) {
     return;
   }
   int count = 0;
   for (final Map<ResolveTask, ResolveTask> entries : _requirements.values()) {
     synchronized (entries) {
       count += entries.size();
     }
   }
   s_logger.info("Requirements cache = {} tasks for {} requirements", count, _requirements.size());
   count = 0;
   for (final MapEx<ResolveTask, ResolvedValueProducer> entries : _specifications.values()) {
     synchronized (entries) {
       count += entries.size();
     }
   }
   s_logger.info(
       "Specifications cache = {} tasks for {} specifications", count, _specifications.size());
   s_logger.info("Pending requirements = {}", _pendingRequirements.getValueRequirements().size());
   s_logger.info(
       "Run queue length = {}, deferred queue length = {}",
       _runQueue.size(),
       _deferredQueue.size());
 }
 public ResolvedValueProducer declareTaskProducing(
     final ValueSpecification valueSpecification,
     final ResolveTask task,
     final ResolvedValueProducer producer) {
   do {
     final MapEx<ResolveTask, ResolvedValueProducer> tasks =
         getBuilder().getOrCreateTasks(valueSpecification);
     ResolvedValueProducer result = null;
     if (tasks != null) {
       ResolvedValueProducer discard = null;
       synchronized (tasks) {
         if (!tasks.isEmpty()) {
           if (tasks.containsKey(null)) {
             continue;
           }
           final Map.Entry<ResolveTask, ResolvedValueProducer> resolveTask =
               tasks.getHashEntry(task);
           if (resolveTask != null) {
             if (resolveTask.getKey() == task) {
               // Replace an earlier attempt from this task with the new producer
               discard = resolveTask.getValue();
               producer.addRef(); // The caller already holds an open reference
               resolveTask.setValue(producer);
               result = producer;
             } else {
               // An equivalent task is doing the work
               result = resolveTask.getValue();
             }
             result
                 .addRef(); // Either the caller holds an open reference on the producer, or we've
                            // got the task lock
           }
         }
         if (result == null) {
           final ResolvedValue value = getBuilder().getResolvedValue(valueSpecification);
           if (value != null) {
             result = new SingleResolvedValueProducer(task.getValueRequirement(), value);
           }
         }
         if (result == null) {
           // No matching tasks
           producer.addRef(); // Caller already holds open reference
           tasks.put(task, producer);
           result = producer;
           result.addRef(); // Caller already holds open reference (this is the producer)
         }
       }
       if (discard != null) {
         discard.release(this);
       }
     } else {
       final ResolvedValue value = getBuilder().getResolvedValue(valueSpecification);
       if (value != null) {
         result = new SingleResolvedValueProducer(task.getValueRequirement(), value);
       }
     }
     return result;
   } while (true);
 }
 @SuppressWarnings("unchecked")
 public Pair<ResolveTask[], ResolvedValueProducer[]> getTasksProducing(
     final ValueSpecification valueSpecification) {
   do {
     final MapEx<ResolveTask, ResolvedValueProducer> tasks =
         getBuilder().getTasks(valueSpecification);
     if (tasks != null) {
       final ResolveTask[] resultTasks;
       final ResolvedValueProducer[] resultProducers;
       synchronized (tasks) {
         if (tasks.containsKey(null)) {
           continue;
         }
         if (tasks.isEmpty()) {
           return null;
         }
         resultTasks = new ResolveTask[tasks.size()];
         resultProducers = new ResolvedValueProducer[tasks.size()];
         int i = 0;
         for (final Map.Entry<ResolveTask, ResolvedValueProducer> task :
             (Set<Map.Entry<ResolveTask, ResolvedValueProducer>>) tasks.entrySet()) {
           // Don't ref-count the tasks; they're just used for parent comparisons
           resultTasks[i] = task.getKey();
           resultProducers[i++] = task.getValue();
           task.getValue().addRef(); // We're holding the task lock
         }
       }
       return Pair.of(resultTasks, resultProducers);
     } else {
       return null;
     }
   } while (true);
 }
 public void discardTaskProducing(
     final ValueSpecification valueSpecification, final ResolveTask task) {
   do {
     final MapEx<ResolveTask, ResolvedValueProducer> tasks =
         getBuilder().getTasks(valueSpecification);
     if (tasks != null) {
       final ResolvedValueProducer producer;
       synchronized (tasks) {
         if (tasks.containsKey(null)) {
           continue;
         }
         producer = (ResolvedValueProducer) tasks.remove(task);
         if (producer == null) {
           // Wasn't in the set
           return;
         }
       }
       producer.release(this);
     }
     return;
   } while (true);
 }
 @SuppressWarnings("unchecked")
 protected void abortLoops() {
   s_logger.debug("Checking for tasks to abort");
   s_abortLoops.begin();
   try {
     final Collection<ResolveTask> toCheck = new ArrayList<ResolveTask>();
     for (final MapEx<ResolveTask, ResolvedValueProducer> tasks : _specifications.values()) {
       synchronized (tasks) {
         if (!tasks.containsKey(null)) {
           toCheck.addAll((Collection<ResolveTask>) tasks.keySet());
         }
       }
     }
     for (final Map<ResolveTask, ResolveTask> tasks : _requirements.values()) {
       synchronized (tasks) {
         if (!tasks.containsKey(null)) {
           toCheck.addAll(tasks.keySet());
         }
       }
     }
     final GraphBuildingContext context = new GraphBuildingContext(this);
     int cancelled = 0;
     final Map<Chain, Chain.LoopState> checked = Maps.newHashMapWithExpectedSize(toCheck.size());
     for (ResolveTask task : toCheck) {
       cancelled += task.cancelLoopMembers(context, checked);
     }
     getContext().mergeThreadContext(context);
     if (s_logger.isInfoEnabled()) {
       if (cancelled > 0) {
         s_logger.info("Cancelled {} looped task(s)", cancelled);
       } else {
         s_logger.info("No looped tasks to cancel");
       }
     }
   } finally {
     s_abortLoops.end();
   }
 }
 /**
  * Flushes data that is unlikely to be needed again from the resolution caches. Anything discarded
  * will either never be needed again for any pending resolutions, or is a cached production that
  * can be recalculated if necessary. Discards can be a multiple stage process - repeated calls all
  * the while this function returns true must be used to flush all possible state and make as much
  * memory available as possible for the garbage collector.
  *
  * @return true if one or more states were discarded, false if there was nothing that can be
  *     discarded
  */
 @SuppressWarnings("unchecked")
 protected boolean flushCachedStates() {
   // TODO: use heuristics to throw data away more sensibly (e.g. LRU)
   int removed = 0;
   final List<ResolvedValueProducer> discards = new ArrayList<ResolvedValueProducer>();
   GraphBuildingContext context = null;
   final Iterator<MapEx<ResolveTask, ResolvedValueProducer>> itrSpecifications =
       _specifications.values().iterator();
   while (itrSpecifications.hasNext()) {
     final MapEx<ResolveTask, ResolvedValueProducer> producers = itrSpecifications.next();
     synchronized (producers) {
       if (producers.containsKey(null)) {
         continue;
       }
       final Iterator<ResolvedValueProducer> itrProducer = producers.values().iterator();
       while (itrProducer.hasNext()) {
         final ResolvedValueProducer producer = itrProducer.next();
         if (producer.getRefCount() == 1) {
           discards.add(producer);
           itrProducer.remove();
           removed++;
         }
       }
       if (producers.isEmpty()) {
         itrSpecifications.remove();
         producers.put(null, null);
       }
     }
     if (!discards.isEmpty()) {
       if (context == null) {
         context = new GraphBuildingContext(this);
       }
       for (final ResolvedValueProducer discard : discards) {
         discard.release(context);
       }
       discards.clear();
     }
   }
   // Unfinished resolveTasks will be removed from the _requirements cache when their refCount hits
   // 1 (the cache only). Finished
   // ones are kept, but should be released when we are low on memory.
   final Iterator<Map<ResolveTask, ResolveTask>> itrRequirements =
       _requirements.values().iterator();
   while (itrRequirements.hasNext()) {
     final Map<ResolveTask, ResolveTask> tasks = itrRequirements.next();
     synchronized (tasks) {
       if (tasks.containsKey(null)) {
         continue;
       }
       final Iterator<ResolveTask> itrTask = tasks.keySet().iterator();
       while (itrTask.hasNext()) {
         final ResolveTask task = itrTask.next();
         if (task.getRefCount() == 1) {
           discards.add(task);
           itrTask.remove();
           removed++;
         }
       }
       if (tasks.isEmpty()) {
         itrRequirements.remove();
         tasks.put(null, null);
       }
     }
     if (!discards.isEmpty()) {
       if (context == null) {
         context = new GraphBuildingContext(this);
       }
       for (final ResolvedValueProducer discard : discards) {
         discard.release(context);
       }
       discards.clear();
     }
   }
   if (context != null) {
     getContext().mergeThreadContext(context);
   }
   if (s_logger.isInfoEnabled()) {
     if (removed > 0) {
       s_logger.info("Discarded {} production task(s)", removed);
     } else {
       s_logger.info("No production tasks to discard");
     }
   }
   return removed > 0;
 }