synchronized void save() { Long lastKey = this.lastSyncKey; Set<Entry<Long, RedoLogValue>> entrySet = lastKey == null ? skipListMap.entrySet() : skipListMap.tailMap(lastKey, false).entrySet(); if (!entrySet.isEmpty()) { WriteBuffer buff = WriteBufferPool.poll(); try { for (Entry<Long, RedoLogValue> e : entrySet) { lastKey = e.getKey(); keyType.write(buff, lastKey); valueType.write(buff, e.getValue()); } int chunkLength = buff.position(); if (chunkLength > 0) { buff.limit(chunkLength); buff.position(0); fileStorage.writeFully(pos, buff.getBuffer()); pos += chunkLength; fileStorage.sync(); } this.lastSyncKey = lastKey; } finally { WriteBufferPool.offer(buff); } } }
public void fetch() throws IOException { long start = System.currentTimeMillis(); LOG.info("Processing " + file); int cnt = 0; try { BufferedReader data = new BufferedReader(new InputStreamReader(new FileInputStream(file))); String line; while ((line = data.readLine()) != null) { String[] arr = line.split("\t"); long id = Long.parseLong(arr[0]); String username = (arr.length > 1) ? arr[1] : "a"; String url = getUrl(id, username); connections.incrementAndGet(); crawlURL(url, new TweetFetcherHandler(id, username, url, 0, !this.noFollow)); cnt++; if (cnt % TWEET_BLOCK_SIZE == 0) { LOG.info(cnt + " requests submitted"); } } } catch (IOException e) { e.printStackTrace(); } // Wait for the last requests to complete. LOG.info("Waiting for remaining requests (" + connections.get() + ") to finish!"); while (connections.get() > 0) { try { Thread.sleep(1000); } catch (Exception e) { e.printStackTrace(); } } asyncHttpClient.close(); long end = System.currentTimeMillis(); long duration = end - start; LOG.info("Total request submitted: " + cnt); LOG.info(crawl.size() + " tweets fetched in " + duration + "ms"); LOG.info("Writing tweets..."); int written = 0; Configuration conf = new Configuration(); OutputStreamWriter out = new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(output))); for (Map.Entry<Long, String> entry : crawl.entrySet()) { written++; out.write(entry.getValue() + "\n"); } out.close(); LOG.info(written + " statuses written."); LOG.info("Done!"); }
protected void cleanupCalls(long rpcTimeout) { Iterator<Entry<Integer, Call>> itor = calls.entrySet().iterator(); while (itor.hasNext()) { Call c = itor.next().getValue(); long waitTime = System.currentTimeMillis() - c.getStartTime(); if (waitTime >= rpcTimeout) { if (this.closeException == null) { // There may be no exception in the case that there are many calls // being multiplexed over this connection and these are succeeding // fine while this Call object is taking a long time to finish // over on the server; e.g. I just asked the regionserver to bulk // open 3k regions or its a big fat multiput into a heavily-loaded // server (Perhaps this only happens at the extremes?) this.closeException = new CallTimeoutException( "Call id=" + c.id + ", waitTime=" + waitTime + ", rpcTimetout=" + rpcTimeout); } c.setException(this.closeException); synchronized (c) { c.notifyAll(); } itor.remove(); } else { break; } } try { if (!calls.isEmpty()) { Call firstCall = calls.get(calls.firstKey()); long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime(); if (maxWaitTime < rpcTimeout) { rpcTimeout -= maxWaitTime; } } if (!shouldCloseConnection.get()) { closeException = null; if (socket != null) { socket.setSoTimeout((int) rpcTimeout); } } } catch (SocketException e) { LOG.debug("Couldn't lower timeout, which may result in longer than expected calls"); } }
public Set<ODirtyPage> logDirtyPagesTable() throws IOException { synchronized (syncObject) { if (writeAheadLog == null) return Collections.emptySet(); Set<ODirtyPage> logDirtyPages = new HashSet<ODirtyPage>(writeGroups.size() * 16); for (Map.Entry<GroupKey, WriteGroup> writeGroupEntry : writeGroups.entrySet()) { final GroupKey groupKey = writeGroupEntry.getKey(); final WriteGroup writeGroup = writeGroupEntry.getValue(); for (int i = 0; i < 16; i++) { final OCachePointer cachePointer = writeGroup.pages[i]; if (cachePointer != null) { final OLogSequenceNumber lastFlushedLSN = cachePointer.getLastFlushedLsn(); final String fileName = files.get(groupKey.fileId).getName(); final long pageIndex = (groupKey.groupIndex << 4) + i; final ODirtyPage logDirtyPage = new ODirtyPage(fileName, pageIndex, lastFlushedLSN); logDirtyPages.add(logDirtyPage); } } } writeAheadLog.logDirtyPages(logDirtyPages); return logDirtyPages; } }
Set<Entry<Long, RedoLogValue>> entrySet() { return skipListMap.entrySet(); }
Iterator<Entry<Long, RedoLogValue>> cursor(Long from) { return from == null ? skipListMap.entrySet().iterator() : skipListMap.tailMap(from).entrySet().iterator(); }