/**
   * This function consumes the primary block of the bundle. It is a virtual from BlockProcessor.
   *
   * @param bundle Bundle to set data after consuming
   * @param blcok Primary block to set data after consuming
   * @param buf Populated buffer to read data from for consuming
   * @param len Number of bytes to consume
   * @return Return number of bytes successfully consumed, In case of error return -1
   */
  public int consume(Bundle bundle, BlockInfo block, IByteBuffer buffer, int len) {

    int consumed = buffer.position();

    PrimaryBlock primary = new PrimaryBlock();

    //        buf.position(0);
    assert (!block.complete()) : TAG + ": consume() block already complete";

    Dictionary dict = bundle.recv_blocks().dict();

    IByteBuffer byte_buffer_temp = new SerializableByteBuffer(len);

    //      	byte_buffer_temp = BufferHelper.reserve(byte_buffer_temp, len);
    block.set_contents(byte_buffer_temp);

    BufferHelper.copy_data(
        byte_buffer_temp, byte_buffer_temp.position(), buffer, buffer.position(), len);
    byte_buffer_temp.position(byte_buffer_temp.position() + len);

    IByteBuffer buf_block_content = block.contents();

    int primary_len = len = buf_block_content.capacity() - buf_block_content.remaining();
    buf_block_content.position(0);

    Log.d(TAG, " primary_len: " + primary_len + " : len:" + len);

    assert (primary_len == len) : TAG + ":  consume() primary!=len";

    primary.set_version(buf_block_content.get());

    if (primary.version() != BundleProtocol.CURRENT_VERSION) {
      Log.e(
          TAG,
          String.format(
              "protocol version mismatch %s != %s",
              primary.version, BundleProtocol.CURRENT_VERSION));
      return -1;
    }
    len -= 1;

    try {
      // Grab the SDNVs representing the flags and the block length.
      len -= read_sdnv(buf_block_content, primary.processing_flags());
      len -= read_sdnv(buf_block_content, primary.block_length());

      Log.d(
          TAG,
          String.format(
              "parsed primary block: version %s length %s",
              primary.version(), block.data_length()));

      // Parse the flags.
      parse_bundle_flags(bundle, primary.processing_flags_value());
      parse_cos_flags(bundle, primary.processing_flags_value());
      parse_srr_flags(bundle, primary.processing_flags_value());

      // What remains in the buffer should now be equal to what the block-length
      // field advertised./
      assert (len == block.data_length()) : TAG + ": consume() data and block length not equal";

      // set data_offset

      block.set_data_offset(buf_block_content.position());
      block.set_data_length((int) primary.block_length_value());

      len -= read_sdnv(buf_block_content, primary.dest_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.dest_ssp_offset());
      len -= read_sdnv(buf_block_content, primary.source_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.source_ssp_offset());
      len -= read_sdnv(buf_block_content, primary.replyto_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.replyto_ssp_offset());
      len -= read_sdnv(buf_block_content, primary.custodian_scheme_offset());
      len -= read_sdnv(buf_block_content, primary.custodian_ssp_offset());

      len -= read_sdnv(buf_block_content, primary.creation_time());
      if (primary.creation_time_value() > Integer.MAX_VALUE) {
        Log.e(
            TAG,
            String.format(
                "creation timestamp time is too large: %s", primary.creation_time_value()));
        return -1;
      }

      len -= read_sdnv(buf_block_content, primary.creation_sequence());
      if (primary.creation_sequence_value() > Integer.MAX_VALUE) {
        Log.e(
            TAG,
            String.format(
                "creation timestamp sequence is too large: %s", primary.creation_sequence()));
        return -1;
      }

      len -= read_sdnv(buf_block_content, primary.lifetime());
      if (primary.lifetime_value() > Integer.MAX_VALUE) {
        Log.e(TAG, String.format("lifetime is too large: %s", primary.lifetime));
        return -1;
      }

      len -= read_sdnv(buf_block_content, primary.dictionary_length());

      // Make sure that the creation timestamp parts and the lifetime fit into
      // a 32 bit integer.

      bundle.set_creation_ts(
          new BundleTimestamp(primary.creation_time_value(), primary.creation_sequence_value()));
      bundle.set_expiration((int) primary.lifetime_value());

      /*
       * Verify that we have the whole dictionary.
       */
      if (len < primary.dictionary_length_value()) {

        Log.e(
            TAG,
            String.format("primary block advertised incorrect length %s", block.data_length()));

        return -1;
      }

      /*
       * Make sure that the dictionary ends with a null byte./
       */
      if (buf_block_content.get(
              (int) (buf_block_content.position() + primary.dictionary_length_value() - 1))
          != '\0') {
        Log.e(TAG, "dictionary does not end with a NULL character! " + primary_len);
        return -1;
      }

      /*
       * Now use the dictionary buffer to parse out the various endpoint
       * identifiers, making sure that none of them peeks past the end
       * of the dictionary block.
       */
      IByteBuffer dictionary = buf_block_content;

      len -= primary.dictionary_length_value();

      Log.d(TAG, "Dict starting point :" + (primary_len - primary.dictionary_length_value()));
      //        dictionary.position((int)(primary_len-primary.dictionary_length_value()));

      dict.set_dict(dictionary, (int) primary.dictionary_length_value());

      Log.d(TAG, "Extract source :" + (primary_len - primary.dictionary_length_value()));

      if (!dict.extract_eid(
          bundle.source(), primary.source_scheme_offset(), primary.source_ssp_offset())) {
        Log.e(TAG, "Extract source fail:");
      } else {
        block.eid_list().add(bundle.source());
        Log.d(TAG, "Extract source :" + bundle.source().str());
      }

      if (!dict.extract_eid(
          bundle.dest(), primary.dest_scheme_offset(), primary.dest_ssp_offset())) {
        Log.e(TAG, "Extract dest fail:");
      } else {
        block.eid_list().add(bundle.dest());
        Log.d(TAG, "Extract dest :" + bundle.dest().str());
      }

      if (!dict.extract_eid(
          bundle.replyto(), primary.replyto_scheme_offset(), primary.replyto_ssp_offset())) {

        Log.e(TAG, "Extract reply fail :");
      } else {
        block.eid_list().add(bundle.replyto());
        Log.d(TAG, "Extract reply :" + bundle.replyto().str());
      }

      if (!dict.extract_eid(
          bundle.custodian(), primary.custodian_scheme_offset(), primary.custodian_ssp_offset())) {
        Log.e(TAG, "Extract custodian fail:");
      } else {
        block.eid_list().add(bundle.custodian());
        Log.d(TAG, "Extract custodian :" + bundle.custodian().str());
      }

      buf_block_content.position(
          (int) (buf_block_content.position() + primary.dictionary_length_value()));
      // If the bundle is a fragment, grab the fragment offset and original
      // bundle size (and make sure they fit in a 32 bit integer).
      if (bundle.is_fragment()) {

        int[] sdnv_buf = new int[1];
        sdnv_buf[0] = 0;

        len -= read_sdnv(buf_block_content, sdnv_buf);
        if ((int) sdnv_buf[0] > Integer.MAX_VALUE) {
          Log.e(TAG, String.format("fragment offset is too large: %s", sdnv_buf));
          return -1;
        }

        bundle.set_frag_offset(sdnv_buf[0]);
        sdnv_buf[0] = 0;

        len -= read_sdnv(buf_block_content, sdnv_buf);

        if (sdnv_buf[0] > Integer.MAX_VALUE) {
          Log.e(TAG, String.format("fragment original length is too large: %s", sdnv_buf));
          return -1;
        }

        bundle.set_orig_length(sdnv_buf[0]);

        Log.d(
            TAG,
            String.format(
                TAG,
                "parsed fragmentation info: offset %s orig_len %s",
                bundle.frag_offset(),
                bundle.orig_length()));
      }

      Log.d(TAG, "primary_len: " + primary_len + " : ln" + len + ": Consumed" + consumed);

      block.set_complete(true);

      return primary_len - len;

    } catch (BlockProcessorTooShortException e) {
      // revert position
      buf_block_content.position();
      return -1;
    }
  }
  /**
   * Internal function get the total length of primary block to write on the buffer
   *
   * @param bundle Bundle to generate
   * @param dict Dictionary to get the offsets of the endpoint eids
   * @param primary PrimaryBlock data strucre object
   * @return Total numbers of Bytes required to write primary block
   */
  protected static int get_primary_len(final Bundle bundle, Dictionary dict, PrimaryBlock primary) {
    int primary_len = 0;
    int block_len = 0;
    primary.set_dictionary_length(0);
    primary.set_block_length(0);

    /*
     * We need to figure out the total length of the primary block,
     * except for the SDNVs used to encode flags and the length itself and
     * the one byte version field.
     *
     * First, we determine the size of the dictionary by first
     * figuring out all the unique strings, and in the process,
     * remembering their offsets and summing up their lengths
     * (including the null terminator for each).
     */

    dict.get_offsets(bundle.dest(), primary.dest_scheme_offset(), primary.dest_ssp_offset());

    block_len += SDNV.encoding_len(primary.dest_scheme_offset());
    block_len += SDNV.encoding_len(primary.dest_ssp_offset());

    dict.get_offsets(bundle.source(), primary.source_scheme_offset(), primary.source_ssp_offset());

    block_len += SDNV.encoding_len(primary.source_scheme_offset());
    block_len += SDNV.encoding_len(primary.source_ssp_offset());

    dict.get_offsets(
        bundle.replyto(), primary.replyto_scheme_offset(), primary.replyto_ssp_offset());

    block_len += SDNV.encoding_len(primary.replyto_scheme_offset());
    block_len += SDNV.encoding_len(primary.replyto_ssp_offset());

    dict.get_offsets(
        bundle.custodian(), primary.custodian_scheme_offset(), primary.custodian_ssp_offset());

    block_len += SDNV.encoding_len(primary.custodian_scheme_offset());
    block_len += SDNV.encoding_len(primary.custodian_ssp_offset());

    primary.set_dictionary_length(dict.dict_length());

    block_len += SDNV.encoding_len(bundle.creation_ts().seconds());
    block_len += SDNV.encoding_len(bundle.creation_ts().seqno());
    block_len += SDNV.encoding_len(bundle.expiration());

    block_len += SDNV.encoding_len(primary.dictionary_length_value());
    block_len += primary.dictionary_length_value();

    /*
     * If the bundle is a fragment, we need to include space for the
     * fragment offset and the original payload length.
     *
     * Note: Any changes to this protocol must be reflected into the
     * FragmentManager since it depends on this length when
     * calculating fragment sizes.
     */
    if (bundle.is_fragment()) {
      block_len += SDNV.encoding_len(bundle.frag_offset());
      block_len += SDNV.encoding_len(bundle.orig_length());
    }

    // Format the processing flags.
    primary.set_processing_flags(format_bundle_flags(bundle));

    primary.set_processing_flags(format_bundle_flags(bundle));
    primary.set_processing_flags(primary.processing_flags_value() | format_cos_flags(bundle));
    primary.set_processing_flags(primary.processing_flags_value() | format_srr_flags(bundle));

    /*
     * Finally, add up the initial preamble and the variable
     * length part.
     */

    primary.set_block_length(block_len);

    primary_len =
        (int)
            (1
                + SDNV.encoding_len(primary.processing_flags)
                + SDNV.encoding_len(primary.block_length())
                + primary.block_length_value());

    Log.d(TAG, "get_primary_len: for bundleid = " + bundle.bundleid() + ": " + primary_len);
    // Fill in the remaining values of 'primary' just for the sake of returning
    // a complete data structure.
    primary.set_version(BundleProtocol.CURRENT_VERSION);
    primary.set_creation_time(bundle.creation_ts().seconds());
    primary.set_creation_sequence(bundle.creation_ts().seqno());
    primary.set_lifetime(bundle.expiration());
    return primary_len;
  }
  /**
   * Generate primary block by encoding all the metadata of the primary block and copy to primary
   * block writeable buffer.
   *
   * @param bundle Bundle to generate
   * @param xmit_blocks xmit_blocks of the bundle
   * @param block Primary block of the bundle to generate and write to the writeable buffer
   */
  public void generate_primary(final Bundle bundle, BlockInfoVec xmit_blocks, BlockInfo block) {

    // point at the local dictionary
    Dictionary dict = xmit_blocks.dict();
    int primary_len = 0; // total length of the primary block
    PrimaryBlock primary = new PrimaryBlock();

    primary_len = get_primary_len(bundle, dict, primary);

    block.set_contents(new SerializableByteBuffer(primary_len));

    block.set_data_length((int) primary.block_length_value());
    block.set_data_offset((int) (primary_len - primary.block_length_value()));
    /*
     * Advance buf and decrement len as we go through the process.
     */
    IByteBuffer buf = block.writable_contents();
    int len = primary_len;

    Log.d(TAG, String.format("generating primary: length %s", primary_len));

    // Stick the version number in the first byte.
    buf.put((byte) BundleProtocol.CURRENT_VERSION);
    len -= 1;

    len -= write_sdnv(primary.processing_flags(), buf);
    len -= write_sdnv(primary.block_length(), buf);
    len -= write_sdnv(primary.dest_scheme_offset(), buf);
    len -= write_sdnv(primary.dest_ssp_offset(), buf);
    len -= write_sdnv(primary.source_scheme_offset(), buf);
    len -= write_sdnv(primary.source_ssp_offset(), buf);
    len -= write_sdnv(primary.replyto_scheme_offset(), buf);
    len -= write_sdnv(primary.replyto_ssp_offset(), buf);
    len -= write_sdnv(primary.custodian_scheme_offset(), buf);
    len -= write_sdnv(primary.custodian_ssp_offset(), buf);
    len -= write_sdnv(bundle.creation_ts().seconds(), buf);
    len -= write_sdnv(bundle.creation_ts().seqno(), buf);
    len -= write_sdnv(bundle.expiration(), buf);
    len -= write_sdnv(primary.dictionary_length(), buf);

    // Add the dictionary.
    Log.d(TAG, "Current Buf: " + buf.position());
    Log.d(TAG, "Dict length: " + dict.dict_length());
    Log.d(TAG, "Dict length: " + dict.dict_length());
    buf.put(dict.dict());
    // memcpy(buf, dict->dict(), dict->length());
    //        buf += dict->length();
    len -= dict.dict_length();
    Log.d(TAG, "Preparing len:" + len);
    /*
     * If the bundle is a fragment, stuff in SDNVs for the fragment
     * offset and original length.
     */

    if (bundle.is_fragment()) {
      len -= write_sdnv(bundle.frag_offset(), buf);
      Log.d(TAG, "Preparing len:" + len);

      len -= write_sdnv(bundle.orig_length(), buf);
      Log.d(TAG, "Preparing len:" + len);
    }
    /*
     * Asuming that get_primary_len is written correctly, len should
     * now be zero since we initialized it to primary_len at the
     * beginning of the function.
     */

    buf.position(0);
    assert (len == 0) : TAG + ": len not ==0";
    Log.e(TAG, "Current Len: " + len);
  }