예제 #1
0
 /** {@inheritDoc} */
 public void decodeHeader() {
   // XXX check signature?
   // SIGNATURE, lets just skip
   fillBuffer(9);
   header = new FLVHeader();
   in.skip(3);
   header.setVersion(in.get());
   header.setTypeFlags(in.get());
   header.setDataOffset(in.getInt());
   if (log.isDebugEnabled()) {
     log.debug("Header: " + header.toString());
   }
 }
예제 #2
0
  @Override
  public IMessage recognize(ByteBuffer buf) throws MessageParseException {
    // 长度尚不足,确保可读长度超过4字节
    if (buf.remaining() < IMessage.MIN_MESSAGE_LENGTH) {
      return null;
    }

    // 读取前4字节
    int len = buf.getShort(); // 预期长度
    short type = buf.getShort();
    if (type == MessageType.FLASH_POLICY) {
      // Policy请求
      boolean finished = false;
      // 找到结束的'\0'标志
      while (buf.remaining() > 0) {
        byte b = buf.get();
        if (b == 0) {
          finished = true;
          break;
        }
      }
      if (finished) {
        // Policy请求全部接收完毕
      } else {
        // 不足,下次继续
        return null;
      }
    } else {
      // 普通消息,检查长度是否满足
      if (buf.remaining() < len - IMessage.HEADER_SIZE) {
        return null;
      }
    }
    return createMessage(type);
  }
예제 #3
0
  /** Tests that a big message gets reassembled by the filter. */
  public void testBigReceive() {
    // Send a 62KB message in 1KB chunks
    int len = 62 * 1024;
    byte[] expected = new byte[len];
    sizeBuf.putInt(len).flip();
    harness.recv(sizeBuf.asReadOnlyBuffer());

    ByteBuffer buf = ByteBuffer.wrap(getByteSequence(1024)).asReadOnlyBuffer();

    // Do the recvs
    for (int i = 0; i < 62; ++i) {
      assertEquals(0, sentMessages.size());
      assertEquals(0, receivedMessages.size());

      buf.rewind();
      buf.get(expected, i * 1024, 1024);
      buf.rewind();
      harness.recv(buf);
    }

    assertEquals(0, sentMessages.size());
    assertEquals(1, receivedMessages.size());

    byte[] actual = receivedMessages.get(0);

    assertTrue("Incorrect recv!", Arrays.equals(actual, expected));
  }
예제 #4
0
 protected static String readString(String charset, ByteBuffer buffer) {
   short len = buffer.getShort();
   byte[] bytes = new byte[len];
   buffer.get(bytes);
   try {
     return new String(bytes, charset);
   } catch (UnsupportedEncodingException e) {
     e.printStackTrace();
     return "";
   }
 }
예제 #5
0
 /**
  * 从ByteBuffer解析得到PropertyBag
  *
  * @param buf 采用mina的ByteBuffer,因为这个支持自动增长
  * @return
  */
 public static PropertyBag readBuffer(ByteBuffer buf) {
   PropertyBag bag = new PropertyBag();
   short fieldCount = buf.getShort();
   for (int i = 0; i < fieldCount; i++) {
     int property = buf.getInt();
     ValueType type = getTypeCode(buf.get());
     Object val = ProtocolUtils.readValueFromPkt(buf, type);
     bag.put(property, val);
   }
   return bag;
 }
예제 #6
0
  public int getAudioCodecId() {
    KeyFrameMeta meta = analyzeKeyFrames();
    if (meta == null) return -1;

    long old = getCurrentPosition();
    setCurrentPosition(firstAudioTag);
    readTagHeader();
    fillBuffer(1);
    byte frametype = in.get();
    setCurrentPosition(old);
    return frametype & MASK_SOUND_FORMAT;
  }
예제 #7
0
  public int getVideoCodecId() {
    KeyFrameMeta meta = analyzeKeyFrames();
    if (meta == null) return -1;

    long old = getCurrentPosition();
    setCurrentPosition(firstVideoTag);
    readTagHeader();
    fillBuffer(1);
    byte frametype = in.get();
    setCurrentPosition(old);
    return frametype & MASK_VIDEO_CODEC;
  }
예제 #8
0
  /**
   * Create tag for metadata event.
   *
   * @return Metadata event tag
   */
  private ITag createFileMeta() {
    // Create tag for onMetaData event
    ByteBuffer buf = ByteBuffer.allocate(1024);
    buf.setAutoExpand(true);
    Output out = new Output(buf);

    // Duration property
    out.writeString("onMetaData");
    Map<Object, Object> props = new HashMap<Object, Object>();
    props.put("duration", duration / 1000.0);
    if (firstVideoTag != -1) {
      long old = getCurrentPosition();
      setCurrentPosition(firstVideoTag);
      readTagHeader();
      fillBuffer(1);
      byte frametype = in.get();
      // Video codec id
      props.put("videocodecid", frametype & MASK_VIDEO_CODEC);
      setCurrentPosition(old);
    }
    if (firstAudioTag != -1) {
      long old = getCurrentPosition();
      setCurrentPosition(firstAudioTag);
      readTagHeader();
      fillBuffer(1);
      byte frametype = in.get();
      // Audio codec id
      props.put("audiocodecid", (frametype & MASK_SOUND_FORMAT) >> 4);
      setCurrentPosition(old);
    }
    props.put("canSeekToEnd", true);
    out.writeMap(props, new Serializer());
    buf.flip();

    ITag result = new Tag(IoConstants.TYPE_METADATA, 0, buf.limit(), null, 0);
    result.setBody(buf);
    return result;
  }
예제 #9
0
  /**
   * 转换为字节数组,包括长度
   *
   * @return
   */
  public byte[] getByties() {
    int size = countSize();

    ByteBuffer buffer = ByteBuffer.allocate(size);

    writeBuffer(buffer);

    buffer.flip();

    byte[] ret = new byte[size];

    buffer.get(ret);

    return ret;
  }
예제 #10
0
  /**
   * Read only header part of a tag.
   *
   * @return Tag header
   */
  private ITag readTagHeader() {
    // PREVIOUS TAG SIZE
    fillBuffer(15);
    int previousTagSize = in.getInt();

    // START OF FLV TAG
    byte dataType = in.get();

    // The next two lines use a utility method which reads in
    // three consecutive bytes but stores them in a 4 byte int.
    // We are able to write those three bytes back out by using
    // another utility method which strips off the last byte
    // However, we will have to check into this during optimization.
    int bodySize = IOUtils.readUnsignedMediumInt(in);
    int timestamp = IOUtils.readUnsignedMediumInt(in);
    // reserved
    in.getInt();

    return new Tag(dataType, timestamp, bodySize, null, previousTagSize);
  }
예제 #11
0
  /** Tests that the send filter correctly prepends the message length. */
  public void testSend() {
    int len = 1000;
    byte[] sendData = getByteSequence(len);
    ByteBuffer buf = ByteBuffer.allocate(len + 4, false);
    buf.putInt(len);
    buf.put(sendData);
    buf = buf.asReadOnlyBuffer();
    buf.flip();

    byte[] expected = new byte[buf.remaining()];
    buf.get(expected);

    assertEquals(0, sentMessages.size());
    assertEquals(0, receivedMessages.size());

    harness.send(sendData);

    assertEquals(1, sentMessages.size());
    assertEquals(0, receivedMessages.size());

    byte[] actual = sentMessages.get(0);

    assertTrue("Incorrect send!", Arrays.equals(actual, expected));
  }
예제 #12
0
 @Override
 protected boolean doDecode(IoSession session, ByteBuffer in, ProtocolDecoderOutput out)
     throws Exception {
   final int origonPos = in.position();
   if (in.remaining() < 16) {
     in.position(origonPos);
     return false;
   }
   int magic = in.getInt();
   if (magic != Const.MAGIC)
     throw new IOException("flag error, except: " + Const.MAGIC + ", but get " + magic);
   int request = in.getInt();
   int pcode = in.getInt();
   int len = in.getInt();
   if (in.remaining() < len) {
     in.position(origonPos);
     return false;
   }
   byte[] data = new byte[len];
   in.get(data);
   BaseResponsePacket packet = PacketCode.createPacket(pcode, request, data);
   out.write(packet);
   return true;
 }
예제 #13
0
 private void addToList(List<byte[]> list, ByteBuffer buf) {
   byte[] bytes = new byte[buf.remaining()];
   buf.get(bytes);
   list.add(bytes);
 }
예제 #14
0
  /**
   * Key frames analysis may be used as a utility method so synchronize it.
   *
   * @return Keyframe metadata
   */
  public synchronized KeyFrameMeta analyzeKeyFrames() {
    if (keyframeMeta != null) {
      return keyframeMeta;
    }

    // check for cached keyframe informations
    if (keyframeCache != null) {
      keyframeMeta = keyframeCache.loadKeyFrameMeta(file);
      if (keyframeMeta != null) {
        // Keyframe data loaded, create other mappings
        duration = keyframeMeta.duration;
        posTimeMap = new HashMap<Long, Long>();
        for (int i = 0; i < keyframeMeta.positions.length; i++) {
          posTimeMap.put(keyframeMeta.positions[i], (long) keyframeMeta.timestamps[i]);
        }
        // XXX: We currently lose pos -> tag mapping, but that isn't used anywhere, so that's okay
        // for now...
        posTagMap = new HashMap<Long, Integer>();
        posTagMap.put((long) 0, 0);
        return keyframeMeta;
      }
    }

    // Lists of video positions and timestamps
    List<Long> positionList = new ArrayList<Long>();
    List<Integer> timestampList = new ArrayList<Integer>();
    // Lists of audio positions and timestamps
    List<Long> audioPositionList = new ArrayList<Long>();
    List<Integer> audioTimestampList = new ArrayList<Integer>();
    long origPos = getCurrentPosition();
    // point to the first tag
    setCurrentPosition(9);

    // Maps positions to tags
    posTagMap = new HashMap<Long, Integer>();
    int idx = 0;
    boolean audioOnly = true;
    while (this.hasMoreTags()) {
      long pos = getCurrentPosition();
      posTagMap.put(pos, idx++);
      // Read tag header and duration
      ITag tmpTag = this.readTagHeader();
      duration = tmpTag.getTimestamp();
      if (tmpTag.getDataType() == IoConstants.TYPE_VIDEO) {
        if (audioOnly) {
          audioOnly = false;
          audioPositionList.clear();
          audioTimestampList.clear();
        }
        if (firstVideoTag == -1) {
          firstVideoTag = pos;
        }

        // Grab Frame type
        fillBuffer(1);
        byte frametype = in.get();
        if (((frametype & MASK_VIDEO_FRAMETYPE) >> 4) == FLAG_FRAMETYPE_KEYFRAME) {
          positionList.add(pos);
          timestampList.add(tmpTag.getTimestamp());
        }

      } else if (tmpTag.getDataType() == IoConstants.TYPE_AUDIO) {
        if (firstAudioTag == -1) {
          firstAudioTag = pos;
        }
        if (audioOnly) {
          audioPositionList.add(pos);
          audioTimestampList.add(tmpTag.getTimestamp());
        }
      }
      // XXX Paul: this 'properly' handles damaged FLV files - as far as
      // duration/size is concerned
      long newPosition = pos + tmpTag.getBodySize() + 15;
      // log.debug("---->" + in.remaining() + " limit=" + in.limit() + "
      // new pos=" + newPosition);
      if (newPosition >= getTotalBytes()) {
        log.info("New position exceeds limit");
        if (log.isDebugEnabled()) {
          log.debug("-----");
          log.debug("Keyframe analysis");
          log.debug(" data type=" + tmpTag.getDataType() + " bodysize=" + tmpTag.getBodySize());
          log.debug(
              " remaining="
                  + getRemainingBytes()
                  + " limit="
                  + getTotalBytes()
                  + " new pos="
                  + newPosition);
          log.debug(" pos=" + pos);
          log.debug("-----");
        }
        break;
      } else {
        setCurrentPosition(newPosition);
      }
    }
    // restore the pos
    setCurrentPosition(origPos);

    keyframeMeta = new KeyFrameMeta();
    keyframeMeta.duration = duration;
    posTimeMap = new HashMap<Long, Long>();
    if (audioOnly) {
      // The flv only contains audio tags, use their lists
      // to support pause and seeking
      positionList = audioPositionList;
      timestampList = audioTimestampList;
    }
    keyframeMeta.audioOnly = audioOnly;
    keyframeMeta.positions = new long[positionList.size()];
    keyframeMeta.timestamps = new int[timestampList.size()];
    for (int i = 0; i < keyframeMeta.positions.length; i++) {
      keyframeMeta.positions[i] = positionList.get(i);
      keyframeMeta.timestamps[i] = timestampList.get(i);
      posTimeMap.put((long) positionList.get(i), (long) timestampList.get(i));
    }
    if (keyframeCache != null) keyframeCache.saveKeyFrameMeta(file, keyframeMeta);
    return keyframeMeta;
  }