@Override public void close() throws IOException { mReader.close(); Diagnostic.developerLog( "There were " + mDoubleFetched + " tabixed records double-fetched due to overlapping blocks"); }
private void updateProgress() { if (mBasicProgress) { synchronized (mJobs) { mTotalJobsFinished++; final String message = mThreadPoolName + ": " + mTotalJobsFinished + "/" + mTotalJobs + " Jobs Finished"; Diagnostic.progress(message); Diagnostic.developerLog(message); } } }
/** * Constructor for a thread pool. Basic progress output enabled by default. * * @param numberThreads maximum number of threads that will be used. * @param subname textual label to use in threads. * @param logLifecycleEvents logs thread life cycle events */ public SimpleThreadPool( final int numberThreads, final String subname, boolean logLifecycleEvents) { mThreadPoolName = subname; assert numberThreads > 0; mMaxThreads = numberThreads; mQueueThread = new QueueThread( "SimpleThreadPool-" + mThreadPoolName + "-Queue", subname, logLifecycleEvents); mQueueThread.setDaemon(true); mQueueThread.start(); Diagnostic.developerLog( mThreadPoolName + ": Starting SimpleThreadPool with maximum " + numberThreads + " threads"); }
/** * Load the names if they haven't already been loaded. * * @throws IOException if an I/O related error occurs */ private void loadNames() throws IOException { mNames = new PrereadNames(mDirectory, mRegion, false); if (mIndexFile.getVersion() >= IndexFile.SEPARATE_CHECKSUM_VERSION && mRegion.getStart() == 0 && mRegion.getEnd() == mIndexFile.getNumberSequences()) { if (mNames.calcChecksum() != mIndexFile.getNameChecksum()) { throw new CorruptSdfException( "Sequence names failed checksum - SDF may be corrupt: \"" + mDirectory + "\""); } else { Diagnostic.developerLog("Sequence names passed checksum"); } } }
private void loadNameSuffixes(boolean attemptLoad, boolean suffixExists) throws IOException { mNameSuffixes = attemptLoad && suffixExists ? new PrereadNames(mDirectory, mRegion, true) : new EmptyStringPrereadNames(mEnd - mStart); if (attemptLoad && suffixExists) { if (mRegion.getStart() == 0 && mRegion.getEnd() == mIndexFile.getNumberSequences()) { if (mNameSuffixes.calcChecksum() != mIndexFile.getNameSuffixChecksum()) { throw new CorruptSdfException( "Sequence name suffixes failed checksum - SDF may be corrupt: \"" + mDirectory + "\""); } else { Diagnostic.developerLog("Sequence name suffixes passed checksum"); } } } }
private void populateNext(boolean force) throws IOException { final int previousStart = mNextAlignmentStart; final int previousTemplateId = mNextTemplateId; mNextRecord = null; if (force) { advanceSubIterator(); } while (mCurrentOffset <= mOffsets.size()) { if (!mBuffered && !mReader .hasNext()) { // Only happens when stream is exhausted, so effectively just closes // things out. advanceSubIterator(); } else { if (mBuffered) { mBuffered = false; } else { mReader.next(); } final String refName = mReader.getReferenceName(); final Integer refId = mSequenceLookup.get( refName); // Note that we cannot rely on mReader.getReferenceId in this scenario, // as that is built up incrementally if (refId == null) { throw new IOException( "Tabixed input contained a sequence name not found in the corresponding index: " + refName); } if (refId > mCurrentTemplate) { // current offset has exceeded region and block overlapped next // template mBuffered = true; advanceSubIterator(); } else { if (refId < mCurrentTemplate) { // Current block may occasionally return records from the // previous template if the block overlaps // Diagnostic.developerLog("Ignoring record from earlier template at " + // mReader.getReferenceName() + ":" + (mReader.getStartPosition() + 1) + " (" + refId // + "<" + mCurrentTemplate + ")"); continue; } final int alignmentStart = mReader.getStartPosition(); final int alignmentEnd = alignmentStart + mReader.getLengthOnReference(); if (alignmentEnd <= mCurrentRegion.getStart()) { // before region // Diagnostic.developerLog("Ignoring record from earlier than start at " + // mReader.getReferenceName() + ":" + (mReader.getStartPosition() + 1)); continue; } if (alignmentStart <= mPreviousAlignmentStart) { // this record would have been already returned by an // earlier region // Diagnostic.developerLog("Ignoring record from earlier block at " + // mReader.getReferenceName() + ":" + (alignmentStart + 1)); mDoubleFetched++; if (mDoubleFetched % 100000 == 0) { Diagnostic.developerLog( "Many double-fetched records noticed at " + mReader.getReferenceName() + ":" + (alignmentStart + 1) + " in region " + mCurrentRegion + " (skipping through to " + mPreviousAlignmentStart + ")"); } continue; } if (alignmentStart >= mCurrentRegion .getEnd()) { // past current region, advance the iterator and record the // furtherest we got if (previousStart != Integer.MIN_VALUE && previousTemplateId == mCurrentTemplate) { mPreviousAlignmentStart = previousStart; } else { mPreviousAlignmentStart = Integer.MIN_VALUE; } mBuffered = true; advanceSubIterator(); continue; } mNextRecord = mReader.getRecord(); mNextTemplateId = mCurrentTemplate; mNextAlignmentStart = alignmentStart; break; } } } }
@Override public void run() { try { Diagnostic.developerLog(mSubName + ": Started"); while (mProcessJobs) { boolean localBusy = false; synchronized (mJobs) { for (final WorkerThread t : mThreads) { if (!t.hasJob()) { if (!mJobs.isEmpty()) { t.enqueueJob(mJobs.remove()); localBusy = true; if (mLogLifecycleEvents) { Diagnostic.developerLog( mSubName + ": New Job Started by thread: " + t.getName() + " - " + mJobs.size() + " Jobs Left Queued"); } } } else { localBusy = true; } } while (!mJobs.isEmpty() && mThreads.size() < mMaxThreads) { final WorkerThread t = new WorkerThread(mSubName + "-" + mThreads.size(), mJobs); mThreads.add(t); if (mLogLifecycleEvents) { Diagnostic.developerLog( mSubName + ": Worker Thread Created - " + t.getName() + " - " + mThreads.size() + "/" + mMaxThreads + " Threads"); } t.enqueueJob(mJobs.remove()); t.start(); localBusy = true; if (mLogLifecycleEvents) { Diagnostic.developerLog( mSubName + ": New Job Started by thread: " + t.getName() + " - " + mJobs.size() + " Jobs Left Queued"); } } mBusy = localBusy; mJobs.notifyAll(); try { if (mProcessJobs) { mJobs.wait(NOT_DONE_SLEEP_TIME); } } catch (final InterruptedException e) { // dont care } } } } catch (final Throwable t) { mThrown = t; mProcessJobs = false; ProgramState.setAbort(); } finally { for (final WorkerThread t : mThreads) { t.die(); } mBusy = false; synchronized (mJobs) { mJobs.clear(); } Diagnostic.developerLog(mSubName + ": Finished"); synchronized (this) { mQueueDone = true; notifyAll(); } } }