/** Do a hash join of the buffered solutions with the access path. */ private void doHashJoin() { if (state.isEmpty()) return; final IBindingSetAccessPath<?> accessPath = getAccessPath(); if (log.isInfoEnabled()) log.info("accessPath=" + accessPath); stats.accessPathCount.increment(); stats.accessPathRangeCount.add(accessPath.rangeCount(false /* exact */)); final UnsyncLocalOutputBuffer<IBindingSet> unsyncBuffer = new UnsyncLocalOutputBuffer<IBindingSet>(op.getChunkCapacity(), sink); final long cutoffLimit = pred.getProperty( IPredicate.Annotations.CUTOFF_LIMIT, IPredicate.Annotations.DEFAULT_CUTOFF_LIMIT); // Obtain the iterator for the current join dimension. final ICloseableIterator<IBindingSet[]> itr = accessPath.solutions(cutoffLimit, stats); /* * Note: The [stats] are NOT passed in here since the chunksIn and * unitsIn were updated when the pipeline solutions were accepted * into the hash index. If we passed in stats here, they would be * double counted when we executed the hash join against the access * path. */ state.hashJoin( itr, // left null, // stats unsyncBuffer // out ); switch (state.getJoinType()) { case Normal: /* * Nothing to do. */ break; case Optional: case NotExists: { /* * Output the optional solutions. */ // where to write the optional solutions. final AbstractUnsynchronizedArrayBuffer<IBindingSet> unsyncBuffer2 = sink2 == null ? unsyncBuffer : new UnsyncLocalOutputBuffer<IBindingSet>(op.getChunkCapacity(), sink2); state.outputOptionals(unsyncBuffer2); unsyncBuffer2.flush(); if (sink2 != null) sink2.flush(); break; } case Exists: { /* * Output the join set. */ state.outputJoinSet(unsyncBuffer); break; } default: throw new AssertionError(); } unsyncBuffer.flush(); sink.flush(); }