private void checkIfNeedHeartBeat( LinkedList<BackendConnection> heartBeatCons, ConQueue queue, ConcurrentLinkedQueue<BackendConnection> checkLis, long hearBeatTime, long hearBeatTime2) { int MAX_CONS_IN_ONE_CHECK = 10; Iterator<BackendConnection> checkListItor = checkLis.iterator(); while (checkListItor.hasNext()) { BackendConnection con = checkListItor.next(); if (con.isClosed()) { checkListItor.remove(); continue; } if (validSchema(con.getSchema())) { if (con.getLastTime() < hearBeatTime) { if (heartBeatCons.size() < MAX_CONS_IN_ONE_CHECK) { checkListItor.remove(); // Heart beat check con.setBorrowed(true); heartBeatCons.add(con); } } } else if (con.getLastTime() < hearBeatTime2) { { // not valid schema conntion should close for idle // exceed 2*conHeartBeatPeriod checkListItor.remove(); con.close(" heart beate idle "); } } } }
/** * Removes any tasks waiting to be run. Will not interrupt any tasks currently running if {@link * #tick(ExceptionHandlerInterface)} is being called. But will avoid additional tasks from being * run on the current {@link #tick(ExceptionHandlerInterface)} call. * * <p>If tasks are added concurrently during this invocation they may or may not be removed. * * @return List of runnables which were waiting in the task queue to be executed (and were now * removed) */ public List<Runnable> clearTasks() { List<TaskContainer> containers; synchronized (scheduledQueue.getModificationLock()) { containers = new ArrayList<TaskContainer>(executeQueue.size() + scheduledQueue.size()); Iterator<? extends TaskContainer> it = executeQueue.iterator(); while (it.hasNext()) { TaskContainer tc = it.next(); /* we must use executeQueue.remove(Object) instead of it.remove() * This is to assure it is atomically removed (without executing) */ if (!tc.running && executeQueue.remove(tc)) { int index = ListUtils.getInsertionEndIndex(containers, tc, true); containers.add(index, tc); } } it = scheduledQueue.iterator(); while (it.hasNext()) { TaskContainer tc = it.next(); if (!tc.running) { int index = ListUtils.getInsertionEndIndex(containers, tc, true); containers.add(index, tc); } } scheduledQueue.clear(); } return ContainerHelper.getContainedRunnables(containers); }
private AbstractShard getReadMap(byte[] hash) throws IOException { Lock l = gcLock.readLock(); l.lock(); // long v = ct.incrementAndGet(); try { if (!runningGC && !lbf.mightContain(hash)) { // SDFSLogger.getLog().info("not in bloom filter"); return null; } } finally { l.unlock(); } Iterator<ProgressiveFileByteArrayLongMap> iter = activeReadMaps.iterator(); while (iter.hasNext()) { ProgressiveFileByteArrayLongMap _m = iter.next(); if (_m.containsKey(hash)) return _m; } iter = maps.iterator(); while (iter.hasNext()) { ProgressiveFileByteArrayLongMap _m = iter.next(); if (!activeReadMaps.contains(_m) && _m.containsKey(hash)) { al.lock(); try { // SDFSLogger.getLog().info("adding active " + // _m.toString()); if (activeReadMaps.remainingCapacity() == 0) { ProgressiveFileByteArrayLongMap obf = activeReadMaps.poll(); // SDFSLogger.getLog().info("removed active " + // obf.toString()); if (obf != null) obf.stopRun(); } /* * if(activeReadMaps.offer(_m)) * SDFSLogger.getLog().info("added active " + * _m.toString()); else * SDFSLogger.getLog().info("unable to add active " + * _m.toString()); */ try { loadCacheExecutor.execute(_m); } catch (Exception e) { if (SDFSLogger.isDebug()) SDFSLogger.getLog().debug("unable to cache " + _m, e); } } finally { al.unlock(); } return _m; } } /* if(!runningGC) { long mv = mt.incrementAndGet(); double pc = (double)mv/(double)v; SDFSLogger.getLog().info("might be in bloom filter " + runningGC + " pc=" + pc); } */ return null; }
/** * 停止任务 * * @param url * @param remove */ public void stopTask(String url, boolean remove) { // 下载中停止task removeDownloadingMap(url); Iterator<DownloadInfo> downloadingIt = mDownloadingTasks.iterator(); while (downloadingIt.hasNext()) { DownloadInfo b = downloadingIt.next(); if (TextUtils.equals(b.getUrl(), url)) { b.setState(DownloadInfo.PAUSE); downloadingIt.remove(); if (!remove) { mPausingTasks.add(b); // 放入暂停队列 } break; } } Iterator<DownloadInfo> waitIt = mWaitTasks.iterator(); while (waitIt.hasNext()) { DownloadInfo b = waitIt.next(); if (TextUtils.equals(b.getUrl(), url)) { b.setState(DownloadInfo.PAUSE); waitIt.remove(); if (!remove) { mPausingTasks.add(b); // 放入暂停队列 } break; } } }
/** 停止所有任务 */ public void stopAllTask() { // 从下载队列移除 for (Map.Entry<String, DownloadHttpTask> entry : mDownloadingTaskMap.entrySet()) { DownloadHttpTask task = entry.getValue(); task.setInterrupt(true); } mDownloadingTaskMap.clear(); Iterator<DownloadInfo> downloadingIt = mDownloadingTasks.iterator(); while (downloadingIt.hasNext()) { DownloadInfo b = downloadingIt.next(); b.setState(DownloadInfo.PAUSE); downloadingIt.remove(); mPausingTasks.add(b); // 放入暂停队列 } mDownloadingTasks.clear(); // 从等待队列移除 Iterator<DownloadInfo> waitIt = mWaitTasks.iterator(); while (waitIt.hasNext()) { DownloadInfo b = waitIt.next(); b.setState(DownloadInfo.PAUSE); waitIt.remove(); mPausingTasks.add(b); // 放入暂停队列 break; } mWaitTasks.clear(); }
public ResultSet dump() throws SQLException { CachedRowSetImpl rs = new CachedRowSetImpl(); RowSetMetaDataImpl meta = new RowSetMetaDataImpl(); meta.setColumnCount(2); meta.setColumnName(1, "QUEUE_TYPE"); meta.setColumnType(1, Types.VARCHAR); meta.setColumnName(2, "REQUEST"); meta.setColumnType(2, Types.VARCHAR); rs.setMetaData(meta); synchronized (queue) { Iterator<AbstractQueryManager> it = queue.iterator(); while (it.hasNext()) { AbstractQueryManager s = (AbstractQueryManager) it.next(); rs.moveToInsertRow(); rs.updateString(1, "QUEUE"); rs.updateString(2, s.getSql()); rs.insertRow(); rs.moveToCurrentRow(); } it = executingQueries.iterator(); while (it.hasNext()) { AbstractQueryManager s = (AbstractQueryManager) it.next(); rs.moveToInsertRow(); rs.updateString(1, "EXECUTING_QUERIES"); rs.updateString(2, s.getSql()); rs.insertRow(); rs.moveToCurrentRow(); } } return rs; }
@Override public void tick(Stock stock) { Iterator<AsyncContext> it = clients.iterator(); while (it.hasNext()) { AsyncContext actx = it.next(); writeStock(actx, stock); } }
private void removeOldRequestTraces(ConcurrentLinkedQueue<HttpRequestTrace> httpRequestTraces) { for (Iterator<HttpRequestTrace> iterator = httpRequestTraces.iterator(); iterator.hasNext(); ) { HttpRequestTrace httpRequestTrace = iterator.next(); final long timeInBuffer = System.currentTimeMillis() - httpRequestTrace.getTimestampEnd(); if (timeInBuffer > MAX_REQUEST_TRACE_BUFFERING_TIME) { iterator.remove(); } } }
@Override public void cancelTasks(Plugin plugin) { Iterator<PipeTask> iterator = queuedTasks.iterator(); while (iterator.hasNext()) { PipeTask task = iterator.next(); if (task.getOwner().equals(plugin)) { task.cancel0(); iterator.remove(); } } }
public Gamer unregisterGamer(Entity entity) { Iterator<Gamer> itel = gamers.iterator(); while (itel.hasNext()) { Gamer g = itel.next(); if (g.getPlayer() == entity) { itel.remove(); return g; } } return null; }
@Override public void tick(Stock stock) { Iterator<AsyncContext> it = clients.iterator(); while (it.hasNext()) { AsyncContext actx = it.next(); try { writeStock(actx, stock); } catch (Exception e) { // Ignore. The async error handling will deal with this. } } }
@Override public void cancelTask(int taskId) { Iterator<PipeTask> iterator = queuedTasks.iterator(); while (iterator.hasNext()) { PipeTask task = iterator.next(); if (task.getTaskId() == taskId) { task.cancel0(); iterator.remove(); break; } } }
protected void broadcastOnResume(AtmosphereResource<?, ?> r) { Iterator<Entry> i = broadcastOnResume.iterator(); while (i.hasNext()) { Entry e = i.next(); e.multipleAtmoResources = r; push(e); } if (resources.isEmpty()) { broadcastOnResume.clear(); } }
private long getPos(byte[] hash) throws IOException { long pos = -1; Lock l = gcLock.readLock(); l.lock(); try { if (!runningGC && !lbf.mightContain(hash)) return pos; } finally { l.unlock(); } Iterator<ProgressiveFileByteArrayLongMap> iter = activeReadMaps.iterator(); while (iter.hasNext()) { ProgressiveFileByteArrayLongMap m = iter.next(); pos = m.get(hash); if (pos != -1) { return pos; } } if (pos == -1) { iter = maps.iterator(); while (iter.hasNext()) { ProgressiveFileByteArrayLongMap m = iter.next(); pos = m.get(hash); if (pos != -1) { al.lock(); try { if (!activeReadMaps.contains(m)) { if (SDFSLogger.isDebug()) SDFSLogger.getLog().debug("adding active " + m.toString()); if (activeReadMaps.remainingCapacity() == 0) { ProgressiveFileByteArrayLongMap obf = activeReadMaps.poll(); if (obf != null) obf.stopRun(); } activeReadMaps.offer(m); try { loadCacheExecutor.execute(m); } catch (Exception e) { SDFSLogger.getLog().debug("unable to cache " + m, e); } } } finally { al.unlock(); } return pos; } } } return pos; }
private void preTick_native(float f) { AppTask task = pQueue.poll(); task = pQueue.poll(); while (task != null) { while (task.isCancelled()) { task = pQueue.poll(); } try { task.invoke(); } catch (Exception ex) { logger.log(Level.SEVERE, null, ex); } task = pQueue.poll(); } for (Iterator<PhysicsTickListener> it = tickListeners.iterator(); it.hasNext(); ) { PhysicsTickListener physicsTickCallback = it.next(); physicsTickCallback.prePhysicsTick(this, f); } }
public void doTick(int currentTick) { this.currentTick = currentTick; Iterator<PipeTask> iterator = queuedTasks.iterator(); while (iterator.hasNext()) { PipeTask task = iterator.next(); long period = task.getPeriod(); if (period == -2) { iterator.remove(); continue; } if (task.getNextRun() <= currentTick) { runningTasks.put(task.getTaskId(), task); if (period == -1) { iterator.remove(); } else { task.setNextRun(currentTick + task.getPeriod()); } } } for (PipeTask runningTask : runningTasks.values()) { if (runningTask.isSync()) { try { runningTask.run(); } catch (final Throwable throwable) { runningTask .getOwner() .getLogger() .log( Level.WARNING, String.format( "Task #%s for %s generated an exception", runningTask.getTaskId(), runningTask.getOwner().getDescription().getFullName()), throwable); } } else { executor.execute(runningTask); } } runningTasks.clear(); }
public ImageReferenceRow toRowEntry() { return new ImageReferenceRow( urlHash, urllength, // byte-length of complete URL urlcomps, // number of path components wordsintitle, // length of description/length (longer are better?) hitcount, // how often appears this word in the text wordsintext, // total number of words phrasesintext, // total number of phrases positions.iterator().next(), // position of word in all words posinphrase, // position of word in its phrase posofphrase, // number of the phrase where word appears lastModified, // last-modified time of the document where word appears System.currentTimeMillis(), // update time; language, // (guessed) language of document type, // type of document llocal, // outlinks to same domain lother, // outlinks to other domain flags // attributes to the url and to the word according the url ); }
public void handleNotification(Notification notification, Object handback) { String type = notification.getType(); if (type.equals(NotificationType.requestReceived)) { RequestNotificationData data = (RequestNotificationData) notification.getUserData(); String key = keysList.get(data.getMethodName()); if (key != null) { ((MethodStatisticsAbstract) statistics.get(key)) .notifyArrivalOfRequest(notification.getTimeStamp()); } } else if (type.equals(NotificationType.servingStarted)) { RequestNotificationData data = (RequestNotificationData) notification.getUserData(); String key = keysList.get(data.getMethodName()); if (key != null) { ((MethodStatisticsAbstract) statistics.get(key)) .notifyDepartureOfRequest(notification.getTimeStamp()); } } else if (type.equals(NotificationType.replySent)) { RequestNotificationData data = (RequestNotificationData) notification.getUserData(); String key = keysList.get(data.getMethodName()); if (key != null) { ((MethodStatisticsAbstract) statistics.get(key)) .notifyReplyOfRequestSent(notification.getTimeStamp()); } } else if (type.equals(NotificationType.voidRequestServed)) { RequestNotificationData data = (RequestNotificationData) notification.getUserData(); String key = keysList.get(data.getMethodName()); if (key != null) { ((MethodStatisticsAbstract) statistics.get(key)) .notifyReplyOfRequestSent(notification.getTimeStamp()); } } else if (type.equals(NotificationType.setOfNotifications)) { @SuppressWarnings("unchecked") ConcurrentLinkedQueue<Notification> notificationsList = (ConcurrentLinkedQueue<Notification>) notification.getUserData(); for (Iterator<Notification> iterator = notificationsList.iterator(); iterator.hasNext(); ) { handleNotification(iterator.next(), handback); } } }
public Iterator<E> iterator() { return queue.iterator(); }
private void postTick_native(float f) { for (Iterator<PhysicsTickListener> it = tickListeners.iterator(); it.hasNext(); ) { PhysicsTickListener physicsTickCallback = it.next(); physicsTickCallback.physicsTick(this, f); } }
protected void runRefill() { boolean completedRound = (current != null && max != null && current > max); boolean needGetMinMax = (min == null || completedRound); if (needGetMinMax) { // get min & max fetchAll = false; if (startWhenIteration != 0) { if (logs.size() > maxLogsCount) { logs.remove(0); } logs.add(new Object[] {startWhenIteration, System.currentTimeMillis()}); } startWhenIteration = System.currentTimeMillis(); Object[] minMaxBounds = objectFetcher.getMinMaxBounds(); if (minMaxBounds != null && minMaxBounds.length >= 2) { Object minBound = minMaxBounds[0]; Object maxBound = minMaxBounds[1]; if (minBound == null || maxBound == null) { min = null; } else { min = ((Number) minBound).longValue(); max = ((Number) maxBound).longValue(); if (minMaxBounds.length == 3) { Number count = (Number) minMaxBounds[2]; if (count != null && count.longValue() <= minSize) { fetchAll = true; } } current = min; if (!fetchAll) { // try to autoScale try { long bulks = Math.abs(max - min) / fetchSize; if (maxBulksCount > 1 && bulks > maxBulksCount) { // guaranteed to be 1 or more by condition above fetchSize = (int) (Math.abs(max - min) / maxBulksCount); } } catch (Exception e) { e.printStackTrace(); } } } } if (completedRound && runThroughDataSize <= minSize) { // we are in situation, when running through all queue didn't satisfy us. // in this case we need some delay, before try to fill it all again. runThroughNotFilled = true; } runThroughDataSize = 0; } if (!runThroughNotFilled && min != null) { try { boolean needEnd = false; HashMap<Integer, Integer> hashCodes = new HashMap<Integer, Integer>((int) (ROUGH_HASH_SIZE * queue.size())); Iterator it = queue.iterator(); while (it.hasNext()) { Object queueElement = it.next(); hashCodes.put(queueElement.hashCode(), queueElement.hashCode()); } int tries = fillTries; while (!needEnd && tries-- > 0) { Collection collection = null; if (fetchAll) { // don't forget +1 here collection = objectFetcher.getRecordsCollection(min, max - min + 1); } else { collection = objectFetcher.getRecordsCollection(current, fetchSize); } if (collection != null && !collection.isEmpty()) { for (Iterator iterator = collection.iterator(); iterator.hasNext(); ) { Object o = iterator.next(); try { if (o != null && !hashCodes.containsKey(o.hashCode())) { queue.offer(o); runThroughDataSize++; try { hashCodes.put(o.hashCode(), o.hashCode()); } catch (Exception e) { e.printStackTrace(); } } } catch (Exception e) { e.printStackTrace(); queue.offer(o); runThroughDataSize++; try { hashCodes.put(o.hashCode(), o.hashCode()); } catch (Exception e1) { e1.printStackTrace(); } } } } current += fetchSize; needEnd = (queue.size() >= minSize) || current > max; } } catch (Exception e) { e.printStackTrace(); } } }
@Override public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException { if (this.isClosed()) throw new IOException("Hashtable " + this.fileName + " is close"); executor = new ThreadPoolExecutor( Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS, worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler); csz = new AtomicLong(0); Lock l = this.gcLock.writeLock(); l.lock(); this.runningGC = true; lbf = null; lbf = new LargeBloomFilter(maxSz, .01); l.unlock(); try { SDFSLogger.getLog() .info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]"); SDFSEvent tEvt = SDFSEvent.claimInfoEvent( "Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt); tEvt.maxCt = this.maps.size(); Iterator<ProgressiveFileByteArrayLongMap> iter = maps.iterator(); while (iter.hasNext()) { tEvt.curCt++; ProgressiveFileByteArrayLongMap m = null; try { m = iter.next(); executor.execute(new ClaimShard(m, bf, lbf, csz)); } catch (Exception e) { tEvt.endEvent( "Unable to claim records for " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR); SDFSLogger.getLog().error("Unable to claim records for " + m, e); throw new IOException(e); } } executor.shutdown(); try { while (!executor.awaitTermination(10, TimeUnit.SECONDS)) { SDFSLogger.getLog().debug("Awaiting fdisk completion of threads."); } } catch (InterruptedException e) { throw new IOException(e); } this.kSz.getAndAdd(-1 * csz.get()); tEvt.endEvent("removed [" + csz.get() + "] records"); SDFSLogger.getLog().info("removed [" + csz.get() + "] records"); iter = maps.iterator(); while (iter.hasNext()) { ProgressiveFileByteArrayLongMap m = null; try { m = iter.next(); if (m.isFull() && !m.isActive()) { double pf = (double) m.size() / (double) m.maxSize(); // SDFSLogger.getLog().info("pfull=" + pf); if (pf < .4 || pf == Double.NaN) { // SDFSLogger.getLog().info("deleting " + // m.toString()); m.iterInit(); KVPair p = m.nextKeyValue(); while (p != null) { ProgressiveFileByteArrayLongMap _m = this.getWriteMap(); try { _m.put(p.key, p.value); } catch (HashtableFullException e) { _m.setActive(false); _m = this.createWriteMap(); _m.put(p.key, p.value); } finally { this.activeWriteMaps.offer(_m); } p = m.nextKeyValue(); } int mapsz = maps.size(); maps.remove(m); mapsz = mapsz - maps.size(); // SDFSLogger.getLog().info( // "removing map " + m.toString() + " sz=" // + maps.size() + " rm=" + mapsz); m.vanish(); m = null; } } } catch (Exception e) { tEvt.endEvent( "Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR); SDFSLogger.getLog().error("to compact " + m, e); throw new IOException(e); } } return csz.get(); } finally { l.lock(); this.runningGC = false; l.unlock(); executor = null; } }
protected void push(Entry entry) { if (destroyed.get()) { return; } String prevMessage = entry.message.toString(); if (!delayedBroadcast.isEmpty()) { Iterator<Entry> i = delayedBroadcast.iterator(); StringBuilder b = new StringBuilder(); while (i.hasNext()) { Entry e = i.next(); e.future.cancel(true); try { // Append so we do a single flush if (e.message instanceof String && entry.message instanceof String) { b.append(e.message); } else { push(e); } } finally { i.remove(); } } if (b.length() > 0) { entry.message = b.append(entry.message).toString(); } } if (resources.isEmpty()) { trackBroadcastMessage(null, entry.message); if (entry.future != null) { entry.future.done(); } return; } Object finalMsg = translate(entry.message); entry.message = finalMsg; try { if (entry.multipleAtmoResources == null) { for (AtmosphereResource<?, ?> r : resources) { finalMsg = perRequestFilter(r, entry); if (entry.writeLocally) { asyncWriteQueue.put(new AsyncWriteToken(r, finalMsg, entry.future)); } } } else if (entry.multipleAtmoResources instanceof AtmosphereResource<?, ?>) { finalMsg = perRequestFilter((AtmosphereResource<?, ?>) entry.multipleAtmoResources, entry); if (entry.writeLocally) { asyncWriteQueue.put( new AsyncWriteToken( (AtmosphereResource<?, ?>) entry.multipleAtmoResources, finalMsg, entry.future)); } } else if (entry.multipleAtmoResources instanceof Set) { Set<AtmosphereResource<?, ?>> sub = (Set<AtmosphereResource<?, ?>>) entry.multipleAtmoResources; for (AtmosphereResource<?, ?> r : sub) { finalMsg = perRequestFilter(r, entry); if (entry.writeLocally) { asyncWriteQueue.put(new AsyncWriteToken(r, finalMsg, entry.future)); } } } entry.message = prevMessage; } catch (InterruptedException ex) { logger.debug(ex.getMessage(), ex); } }
/** * initializes the Object set of this hash table. * * @param initialCapacity an <code>int</code> value * @return an <code>int</code> value * @throws HashtableFullException * @throws FileNotFoundException */ public long setUp() throws Exception { File _fs = new File(fileName); if (!_fs.getParentFile().exists()) { _fs.getParentFile().mkdirs(); } SDFSLogger.getLog().info("Folder = " + _fs.getPath()); SDFSLogger.getLog().info("Loading freebits bitset"); long rsz = 0; long _tbs = maxSz / (256); int max = Integer.MAX_VALUE / ProgressiveFileByteArrayLongMap.EL; if (_tbs > max) { this.hashTblSz = max; } else if (_tbs > this.hashTblSz) { this.hashTblSz = (int) _tbs; } SDFSLogger.getLog() .info( "table setup max=" + max + " maxsz=" + this.maxSz + " _tbs=" + _tbs + " hashTblSz=" + this.hashTblSz); this.hashTblSz = NextPrime.getNextPrimeI((int) (this.hashTblSz)); File[] files = _fs.getParentFile().listFiles(new DBFileFilter()); if (files.length > 0) { CommandLineProgressBar bar = new CommandLineProgressBar("Loading Existing Hash Tables", files.length, System.out); this.loadEvent.maxCt = files.length + 128; for (int i = 0; i < files.length; i++) { this.loadEvent.curCt = this.loadEvent.curCt + 1; int sz = NextPrime.getNextPrimeI((int) (this.hashTblSz)); // SDFSLogger.getLog().debug("will create byte array of size " // + sz + " propsize was " + propsize); ProgressiveFileByteArrayLongMap m = null; String pth = files[i].getPath(); String pfx = pth.substring(0, pth.length() - 5); m = new ProgressiveFileByteArrayLongMap(pfx, sz); long mep = m.setUp(); if (mep > endPos) endPos = mep; maps.add(m); rsz = rsz + m.size(); bar.update(i); if (!m.isFull() && this.activeWriteMaps.remainingCapacity() > 0) { m.setActive(true); this.activeWriteMaps.add(m); this.loadCacheExecutor.execute(m); } else { m.setActive(false); m.full = true; } } bar.finish(); } this.loadEvent.shortMsg = "Loading BloomFilters"; if (maps.size() == 0) lbf = new LargeBloomFilter(maxSz, .01); else { try { lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, true); } catch (Exception e) { SDFSLogger.getLog().warn("Recreating BloomFilters..."); this.loadEvent.shortMsg = "Recreating BloomFilters"; lbf = new LargeBloomFilter(maxSz, .01); executor = new ThreadPoolExecutor( Main.writeThreads, Main.writeThreads, 10, TimeUnit.SECONDS, worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler); CommandLineProgressBar bar = new CommandLineProgressBar("ReCreating BloomFilters", maps.size(), System.out); Iterator<ProgressiveFileByteArrayLongMap> iter = maps.iterator(); int i = 0; ArrayList<LBFReconstructThread> al = new ArrayList<LBFReconstructThread>(); while (iter.hasNext()) { ProgressiveFileByteArrayLongMap m = iter.next(); LBFReconstructThread th = new LBFReconstructThread(lbf, m); executor.execute(th); al.add(th); i++; bar.update(i); } executor.shutdown(); bar.finish(); try { System.out.print("Waiting for all BloomFilters creation threads to finish"); while (!executor.awaitTermination(10, TimeUnit.SECONDS)) { SDFSLogger.getLog().debug("Awaiting fdisk completion of threads."); System.out.print("."); } for (LBFReconstructThread th : al) { if (th.ex != null) throw th.ex; } System.out.println(" done"); } catch (Exception e1) { throw new IOException(e1); } } } while (this.activeWriteMaps.remainingCapacity() > 0) { String guid = null; boolean written = false; while (!written) { guid = RandomGUID.getGuid(); File f = new File(fileName + "-" + guid + ".keys"); if (!f.exists()) { ProgressiveFileByteArrayLongMap activeWMap = new ProgressiveFileByteArrayLongMap(fileName + "-" + guid, this.hashTblSz); activeWMap.setUp(); this.maps.add(activeWMap); written = true; activeWMap.setActive(true); this.activeWriteMaps.offer(activeWMap); } } } if (SDFSLogger.isDebug()) { long mem = MemoryMeasurer.measureBytes(lbf); long mmem = MemoryMeasurer.measureBytes(maps); SDFSLogger.getLog().debug("Large BloomFilter Size=" + StorageUnit.of(mem).format(mem)); SDFSLogger.getLog().debug("Maps Size=" + StorageUnit.of(mmem).format(mmem)); } this.loadEvent.endEvent("Loaded entries " + rsz); System.out.println("Loaded entries " + rsz); SDFSLogger.getLog().info("Active Maps " + this.activeWriteMaps.size()); SDFSLogger.getLog().info("Loaded entries " + rsz); SDFSLogger.getLog().info("Loading BloomFilters " + rsz); this.kSz.set(rsz); this.closed = false; return size; }