/**
   * Produce a bundle by coping all payload and payload block metadata to buffer.
   *
   * @param bundle Bundle to produce
   * @param block Payload block of Bundle
   * @param buf Copy the data on the current position this buffer
   * @param offset Offset of the block data. Keep track of how many bytes have been produced before.
   * @param len Number of bytes to produce
   */
  public void produce(
      final Bundle bundle, final BlockInfo block, IByteBuffer buf, int offset, int len) {

    int old_position = buf.position();
    // First copy out the specified range of the preamble

    if (offset < block.data_offset()) {
      int tocopy = Math.min(len, block.data_offset() - offset);

      BufferHelper.copy_data(buf, buf.position(), block.contents(), offset, tocopy);

      buf.position(buf.position() + tocopy);
      offset += tocopy;
      len -= tocopy;
    }

    if (len == 0) {
      buf.position(old_position);
      return;
    }

    // Adjust offset to account for the preamble
    int payload_offset = offset - block.data_offset();

    int tocopy = (int) Math.min(len, bundle.payload().length() - payload_offset);
    bundle.payload().read_data(payload_offset, tocopy, buf);

    buf.position(old_position);

    return;
  }
Пример #2
0
  /** Sending and receiving data by the socket */
  @Override
  void handle_poll_activity(int timeout) {

    if (!socket_.isConnected()) {
      Log.d(TAG, "Socket is not connected");
      break_contact(ContactEvent.reason_t.BROKEN);
    }

    // if we have something to send , send it first
    if (sendbuf_.position() > 0) send_data();

    // poll to receive and process data
    try {

      num_to_read_ = read_stream_.available();
      // check that there's something to read
      if (num_to_read_ > 0) {

        Log.d(TAG, "before reading position is " + recvbuf_.position());

        java.nio.ByteBuffer temp_java_nio_buf = java.nio.ByteBuffer.allocate(recvbuf_.remaining());
        read_channel_.read(temp_java_nio_buf);

        BufferHelper.copy_data(
            recvbuf_, recvbuf_.position(), temp_java_nio_buf, 0, temp_java_nio_buf.position());

        recvbuf_.position(recvbuf_.position() + temp_java_nio_buf.position());

        if (DTNService.is_test_data_logging())
          TestDataLogger.getInstance()
              .set_downloaded_size(
                  TestDataLogger.getInstance().downloaded_size() + temp_java_nio_buf.position());

        Log.d(TAG, "buffer position now is " + recvbuf_.position());

        process_data();

        if (recvbuf_.remaining() == 0) {
          Log.e(TAG, "after process_data left no space in recvbuf!!");
        }
      }

    } catch (IOException e) {
      Log.e(TAG, "IOException, in reading data from the read_stream_:" + e.getMessage());
    }

    // send keep alive message if we should send it
    if (contact_up_ && !contact_broken_) {
      check_keepalive();
    }

    if (!contact_broken_) check_timeout();
  }
Пример #3
0
  @Override
  void send_data() {

    int last_position = sendbuf_.position();
    sendbuf_.rewind();
    try {

      Log.d(TAG, "Going to write " + last_position + " bytes to the stream");
      java.nio.ByteBuffer temp = java.nio.ByteBuffer.allocate(last_position);
      BufferHelper.copy_data(temp, 0, sendbuf_, 0, last_position);

      WriteSocketTimeoutTimer write_socket_timeout_timer =
          new WriteSocketTimeoutTimer(write_channel_);

      Log.d(TAG, "scheduling write_timeout_task in " + SOCKET_TIMEOUT + " seconds");
      try {
        // add the timer to keep looking for Socket timeout
        write_socket_timeout_timer.schedule_in(SOCKET_TIMEOUT);

      } catch (IllegalStateException e) {
        Log.e(TAG, "write socket timer stop when it shouldn't be stopped");
      }
      write_channel_.write(temp);

      // cancel the timer if it's come here, this means the writting is
      // successful
      write_socket_timeout_timer.cancel();
      write_socket_timeout_timer = null;

      // move the remaining data back to beginning for next writting
      // the position of the buffer will be moved to the newly available
      // position after movement
      sendbuf_.rewind();

      if (DTNService.is_test_data_logging())
        TestDataLogger.getInstance()
            .set_uploaded_size(TestDataLogger.getInstance().uploaded_size() + last_position);

    } catch (AsynchronousCloseException e) {
      Log.e(TAG, "another thread close the channel because of the timeout");
      break_contact(reason_t.CL_ERROR);
      sendbuf_.position(last_position);
    } catch (IOException e) {

      Log.e(TAG, "writting broken pipe");
      break_contact(reason_t.CL_ERROR);
      sendbuf_.position(last_position);
    }
  }
  /**
   * This function consumes the primary block of the bundle. It is a virtual from BlockProcessor.
   *
   * @param bundle Bundle to set data after consuming
   * @param blcok Primary block to set data after consuming
   * @param buf Populated buffer to read data from for consuming
   * @param len Number of bytes to consume
   * @return Return number of bytes successfully consumed, In case of error return -1
   */
  public int consume(Bundle bundle, BlockInfo block, IByteBuffer buffer, int len) {

    int consumed = buffer.position();

    PrimaryBlock primary = new PrimaryBlock();

    //        buf.position(0);
    assert (!block.complete()) : TAG + ": consume() block already complete";

    Dictionary dict = bundle.recv_blocks().dict();

    IByteBuffer byte_buffer_temp = new SerializableByteBuffer(len);

    //      	byte_buffer_temp = BufferHelper.reserve(byte_buffer_temp, len);
    block.set_contents(byte_buffer_temp);

    BufferHelper.copy_data(
        byte_buffer_temp, byte_buffer_temp.position(), buffer, buffer.position(), len);
    byte_buffer_temp.position(byte_buffer_temp.position() + len);

    IByteBuffer buf_block_content = block.contents();

    int primary_len = len = buf_block_content.capacity() - buf_block_content.remaining();
    buf_block_content.position(0);

    Log.d(TAG, " primary_len: " + primary_len + " : len:" + len);

    assert (primary_len == len) : TAG + ":  consume() primary!=len";

    primary.set_version(buf_block_content.get());

    if (primary.version() != BundleProtocol.CURRENT_VERSION) {
      Log.e(
          TAG,
          String.format(
              "protocol version mismatch %s != %s",
              primary.version, BundleProtocol.CURRENT_VERSION));
      return -1;
    }
    len -= 1;

    try {
      // Grab the SDNVs representing the flags and the block length.
      len -= read_sdnv(buf_block_content, primary.processing_flags());
      len -= read_sdnv(buf_block_content, primary.block_length());

      Log.d(
          TAG,
          String.format(
              "parsed primary block: version %s length %s",
              primary.version(), block.data_length()));

      // Parse the flags.
      parse_bundle_flags(bundle, primary.processing_flags_value());
      parse_cos_flags(bundle, primary.processing_flags_value());
      parse_srr_flags(bundle, primary.processing_flags_value());

      // What remains in the buffer should now be equal to what the block-length
      // field advertised./
      assert (len == block.data_length()) : TAG + ": consume() data and block length not equal";

      // set data_offset

      block.set_data_offset(buf_block_content.position());
      block.set_data_length((int) primary.block_length_value());

      len -= read_sdnv(buf_block_content, primary.dest_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.dest_ssp_offset());
      len -= read_sdnv(buf_block_content, primary.source_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.source_ssp_offset());
      len -= read_sdnv(buf_block_content, primary.replyto_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.replyto_ssp_offset());
      len -= read_sdnv(buf_block_content, primary.custodian_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.custodian_ssp_offset());

      len -= read_sdnv(buf_block_content, primary.creation_time());
      if (primary.creation_time_value() > Integer.MAX_VALUE) {
        Log.e(
            TAG,
            String.format(
                "creation timestamp time is too large: %s", primary.creation_time_value()));
        return -1;
      }

      len -= read_sdnv(buf_block_content, primary.creation_sequence());
      if (primary.creation_sequence_value() > Integer.MAX_VALUE) {
        Log.e(
            TAG,
            String.format(
                "creation timestamp sequence is too large: %s", primary.creation_sequence()));
        return -1;
      }

      len -= read_sdnv(buf_block_content, primary.lifetime());
      if (primary.lifetime_value() > Integer.MAX_VALUE) {
        Log.e(TAG, String.format("lifetime is too large: %s", primary.lifetime));
        return -1;
      }

      len -= read_sdnv(buf_block_content, primary.dictionary_length());

      // Make sure that the creation timestamp parts and the lifetime fit into
      // a 32 bit integer.

      bundle.set_creation_ts(
          new BundleTimestamp(primary.creation_time_value(), primary.creation_sequence_value()));
      bundle.set_expiration((int) primary.lifetime_value());

      /*
       * Verify that we have the whole dictionary.
       */
      if (len < primary.dictionary_length_value()) {

        Log.e(
            TAG,
            String.format("primary block advertised incorrect length %s", block.data_length()));

        return -1;
      }

      /*
       * Make sure that the dictionary ends with a null byte./
       */
      if (buf_block_content.get(
              (int) (buf_block_content.position() + primary.dictionary_length_value() - 1))
          != '\0') {
        Log.e(TAG, "dictionary does not end with a NULL character! " + primary_len);
        return -1;
      }

      /*
       * Now use the dictionary buffer to parse out the various endpoint
       * identifiers, making sure that none of them peeks past the end
       * of the dictionary block.
       */
      IByteBuffer dictionary = buf_block_content;

      len -= primary.dictionary_length_value();

      Log.d(TAG, "Dict starting point :" + (primary_len - primary.dictionary_length_value()));
      //        dictionary.position((int)(primary_len-primary.dictionary_length_value()));

      dict.set_dict(dictionary, (int) primary.dictionary_length_value());

      Log.d(TAG, "Extract source :" + (primary_len - primary.dictionary_length_value()));

      if (!dict.extract_eid(
          bundle.source(), primary.source_scheme_offset(), primary.source_ssp_offset())) {
        Log.e(TAG, "Extract source fail:");
      } else {
        block.eid_list().add(bundle.source());
        Log.d(TAG, "Extract source :" + bundle.source().str());
      }

      if (!dict.extract_eid(
          bundle.dest(), primary.dest_scheme_offset(), primary.dest_ssp_offset())) {
        Log.e(TAG, "Extract dest fail:");
      } else {
        block.eid_list().add(bundle.dest());
        Log.d(TAG, "Extract dest :" + bundle.dest().str());
      }

      if (!dict.extract_eid(
          bundle.replyto(), primary.replyto_scheme_offset(), primary.replyto_ssp_offset())) {

        Log.e(TAG, "Extract reply fail :");
      } else {
        block.eid_list().add(bundle.replyto());
        Log.d(TAG, "Extract reply :" + bundle.replyto().str());
      }

      if (!dict.extract_eid(
          bundle.custodian(), primary.custodian_scheme_offset(), primary.custodian_ssp_offset())) {
        Log.e(TAG, "Extract custodian fail:");
      } else {
        block.eid_list().add(bundle.custodian());
        Log.d(TAG, "Extract custodian :" + bundle.custodian().str());
      }

      buf_block_content.position(
          (int) (buf_block_content.position() + primary.dictionary_length_value()));
      // If the bundle is a fragment, grab the fragment offset and original
      // bundle size (and make sure they fit in a 32 bit integer).
      if (bundle.is_fragment()) {

        int[] sdnv_buf = new int[1];
        sdnv_buf[0] = 0;

        len -= read_sdnv(buf_block_content, sdnv_buf);
        if ((int) sdnv_buf[0] > Integer.MAX_VALUE) {
          Log.e(TAG, String.format("fragment offset is too large: %s", sdnv_buf));
          return -1;
        }

        bundle.set_frag_offset(sdnv_buf[0]);
        sdnv_buf[0] = 0;

        len -= read_sdnv(buf_block_content, sdnv_buf);

        if (sdnv_buf[0] > Integer.MAX_VALUE) {
          Log.e(TAG, String.format("fragment original length is too large: %s", sdnv_buf));
          return -1;
        }

        bundle.set_orig_length(sdnv_buf[0]);

        Log.d(
            TAG,
            String.format(
                TAG,
                "parsed fragmentation info: offset %s orig_len %s",
                bundle.frag_offset(),
                bundle.orig_length()));
      }

      Log.d(TAG, "primary_len: " + primary_len + " : ln" + len + ": Consumed" + consumed);

      block.set_complete(true);

      return primary_len - len;

    } catch (BlockProcessorTooShortException e) {
      // revert position
      buf_block_content.position();
      return -1;
    }
  }