private void doReadAheadLoads() { List to_submit = null; try { lock_mon.enter(); while (loading_messages.size() + queued_messages.size() < request_read_ahead && !requests.isEmpty() && !destroyed) { DiskManagerReadRequest dmr = (DiskManagerReadRequest) requests.removeFirst(); loading_messages.add(dmr); if (to_submit == null) to_submit = new ArrayList(); to_submit.add(dmr); } } finally { lock_mon.exit(); } /* if ( peer.getIp().equals( "64.71.5.2")){ TimeFormatter.milliTrace( "obt read_ahead: -> " + (to_submit==null?0:to_submit.size()) + " [lo=" + loading_messages.size() + ",qm=" + queued_messages.size() + ",re=" + requests.size() + ",rl=" + request_read_ahead + "]"); } */ if (to_submit != null) { for (int i = 0; i < to_submit.size(); i++) { peer.getManager() .getAdapter() .enqueueReadRequest(peer, (DiskManagerReadRequest) to_submit.get(i), read_req_listener); } } }
public Message[] removeDecodedMessages() { if (messages_last_read.isEmpty()) return null; Message[] msgs = (Message[]) messages_last_read.toArray(new Message[messages_last_read.size()]); messages_last_read.clear(); return msgs; }
/** * Get a list of piece numbers being requested * * @return list of Long values */ public int[] getRequestedPieceNumbers() { if (destroyed) return new int[0]; /** Cheap hack to reduce (but not remove all) the # of duplicate entries */ int iLastNumber = -1; int pos = 0; int[] pieceNumbers; try { lock_mon.enter(); // allocate max size needed (we'll shrink it later) pieceNumbers = new int[queued_messages.size() + loading_messages.size() + requests.size()]; for (Iterator iter = queued_messages.keySet().iterator(); iter.hasNext(); ) { BTPiece msg = (BTPiece) iter.next(); if (iLastNumber != msg.getPieceNumber()) { iLastNumber = msg.getPieceNumber(); pieceNumbers[pos++] = iLastNumber; } } for (Iterator iter = loading_messages.iterator(); iter.hasNext(); ) { DiskManagerReadRequest dmr = (DiskManagerReadRequest) iter.next(); if (iLastNumber != dmr.getPieceNumber()) { iLastNumber = dmr.getPieceNumber(); pieceNumbers[pos++] = iLastNumber; } } for (Iterator iter = requests.iterator(); iter.hasNext(); ) { DiskManagerReadRequest dmr = (DiskManagerReadRequest) iter.next(); if (iLastNumber != dmr.getPieceNumber()) { iLastNumber = dmr.getPieceNumber(); pieceNumbers[pos++] = iLastNumber; } } } finally { lock_mon.exit(); } int[] trimmed = new int[pos]; System.arraycopy(pieceNumbers, 0, trimmed, 0, pos); return trimmed; }
private void sendPendingHaves() { if (destroyed) { return; } try { pending_haves_mon.enter(); int num_haves = pending_haves.size(); if (num_haves == 0) { return; } // single have -> use BT if (num_haves == 1 || az_have_version < BTMessageFactory.MESSAGE_VERSION_SUPPORTS_PADDING) { for (int i = 0; i < num_haves; i++) { Integer piece_num = (Integer) pending_haves.get(i); outgoing_message_q.addMessage(new BTHave(piece_num.intValue(), bt_have_version), true); } } else { int[] piece_numbers = new int[num_haves]; for (int i = 0; i < num_haves; i++) { piece_numbers[i] = ((Integer) pending_haves.get(i)).intValue(); } outgoing_message_q.addMessage(new AZHave(piece_numbers, az_have_version), true); } outgoing_message_q.doListenerNotifications(); pending_haves.clear(); } finally { pending_haves_mon.exit(); } }
/** * Queue a new have message for aggregated sending. * * @param piece_number of the have message * @param force if true, send this and any other pending haves right away */ public void queueHaveMessage(int piece_number, boolean force) { if (destroyed) return; try { pending_haves_mon.enter(); pending_haves.add(new Integer(piece_number)); if (force) { sendPendingHaves(); } else { int pending_bytes = pending_haves.size() * 9; if (pending_bytes >= outgoing_message_q.getMssSize()) { // System.out.println("enough pending haves for a full packet!"); // there's enough pending bytes to fill a packet payload sendPendingHaves(); } } } finally { pending_haves_mon.exit(); } }
public ByteBuffer destroy() { if (destroyed) { Debug.out("Trying to redestroy message decoder, stack trace follows: " + this); Debug.outStackTrace(); } is_paused = true; destroyed = true; // there's a concurrency issue with the decoder whereby it can be destroyed while will being // messed with. Don't // have the energy to look into it properly atm so just try to ensure that it doesn't bork too // badly (parg: 29/04/2012) // only occasional but does have potential to generate direct buffer mem leak ;( int lbuff_read = 0; int pbuff_read = 0; length_buffer.limit(SS, 4); DirectByteBuffer plb = payload_buffer; if (reading_length_mode) { lbuff_read = length_buffer.position(SS); } else { // reading payload length_buffer.position(SS, 4); lbuff_read = 4; pbuff_read = plb == null ? 0 : plb.position(SS); } ByteBuffer unused = ByteBuffer.allocate(lbuff_read + pbuff_read); // TODO convert to direct? length_buffer.flip(SS); unused.put(length_buffer.getBuffer(SS)); try { if (plb != null) { plb.flip(SS); unused.put( plb.getBuffer( SS)); // Got a buffer overflow exception here in the past - related to PEX? } } catch (RuntimeException e) { Debug.out("hit known threading issue"); } unused.flip(); length_buffer.returnToPool(); if (plb != null) { plb.returnToPool(); payload_buffer = null; } try { for (int i = 0; i < messages_last_read.size(); i++) { Message msg = (Message) messages_last_read.get(i); msg.destroy(); } } catch (RuntimeException e) { // happens if messages modified by alt thread... Debug.out("hit known threading issue"); } messages_last_read.clear(); return unused; }
public boolean isStalledPendingLoad() { return (queued_messages.size() == 0 && loading_messages.size() > 0); }
public int getRequestCount() { return (queued_messages.size() + loading_messages.size() + requests.size()); }