@Override
  public int read(ExtractorInput input, PositionHolder seekPosition)
      throws IOException, InterruptedException {

    if (vorbisSetup == null) {
      vorbisSetup = readSetupHeaders(input, scratch);
      ArrayList<byte[]> codecInitialisationData = new ArrayList<>();
      codecInitialisationData.clear();
      codecInitialisationData.add(vorbisSetup.idHeader.data);
      codecInitialisationData.add(vorbisSetup.setupHeaderData);

      long duration =
          input.getLength() == C.LENGTH_UNBOUNDED
              ? C.UNKNOWN_TIME_US
              : input.getLength() * 8000000 / vorbisSetup.idHeader.getApproximateBitrate();
      trackOutput.format(
          MediaFormat.createAudioFormat(
              null,
              MimeTypes.AUDIO_VORBIS,
              this.vorbisSetup.idHeader.bitrateNominal,
              OGG_MAX_SEGMENT_SIZE * 255,
              duration,
              this.vorbisSetup.idHeader.channels,
              (int) this.vorbisSetup.idHeader.sampleRate,
              codecInitialisationData,
              null));
    }
    if (oggReader.readPacket(input, scratch)) {
      // if this is an audio packet...
      if ((scratch.data[0] & 0x01) != 1) {
        // ... we need to decode the block size
        int packetBlockSize = decodeBlockSize(scratch.data[0], vorbisSetup);
        // a packet contains samples produced from overlapping the previous and current frame data
        // (https://www.xiph.org/vorbis/doc/Vorbis_I_spec.html#x1-350001.3.2)
        int samplesInPacket =
            seenFirstAudioPacket ? (packetBlockSize + previousPacketBlockSize) / 4 : 0;
        // codec expects the number of samples appended to audio data
        appendNumberOfSamples(scratch, samplesInPacket);

        // calculate time and send audio data to codec
        long timeUs = elapsedSamples * C.MICROS_PER_SECOND / vorbisSetup.idHeader.sampleRate;
        trackOutput.sampleData(scratch, scratch.limit());
        trackOutput.sampleMetadata(timeUs, C.SAMPLE_FLAG_SYNC, scratch.limit(), 0, null);

        // update state in members for next iteration
        seenFirstAudioPacket = true;
        elapsedSamples += samplesInPacket;
        previousPacketBlockSize = packetBlockSize;
      }
      scratch.reset();
      return RESULT_CONTINUE;
    }
    return RESULT_END_OF_INPUT;
  }
  private MediaFormat initManifestTrack(
      SmoothStreamingManifest manifest, int elementIndex, int trackIndex) {
    int manifestTrackKey = getManifestTrackKey(elementIndex, trackIndex);
    MediaFormat mediaFormat = mediaFormats.get(manifestTrackKey);
    if (mediaFormat != null) {
      // Already initialized.
      return mediaFormat;
    }

    // Build the media format.
    long durationUs = live ? C.UNKNOWN_TIME_US : manifest.durationUs;
    StreamElement element = manifest.streamElements[elementIndex];
    Format format = element.tracks[trackIndex].format;
    byte[][] csdArray = element.tracks[trackIndex].csd;
    int mp4TrackType;
    switch (element.type) {
      case StreamElement.TYPE_VIDEO:
        mediaFormat =
            MediaFormat.createVideoFormat(
                format.mimeType,
                format.bitrate,
                MediaFormat.NO_VALUE,
                durationUs,
                format.width,
                format.height,
                0,
                Arrays.asList(csdArray));
        mp4TrackType = Track.TYPE_vide;
        break;
      case StreamElement.TYPE_AUDIO:
        List<byte[]> csd;
        if (csdArray != null) {
          csd = Arrays.asList(csdArray);
        } else {
          csd =
              Collections.singletonList(
                  CodecSpecificDataUtil.buildAacAudioSpecificConfig(
                      format.audioSamplingRate, format.audioChannels));
        }
        mediaFormat =
            MediaFormat.createAudioFormat(
                format.mimeType,
                format.bitrate,
                MediaFormat.NO_VALUE,
                durationUs,
                format.audioChannels,
                format.audioSamplingRate,
                csd);
        mp4TrackType = Track.TYPE_soun;
        break;
      case StreamElement.TYPE_TEXT:
        mediaFormat =
            MediaFormat.createTextFormat(
                format.mimeType, format.bitrate, format.language, durationUs);
        mp4TrackType = Track.TYPE_text;
        break;
      default:
        throw new IllegalStateException("Invalid type: " + element.type);
    }

    // Build the extractor.
    FragmentedMp4Extractor mp4Extractor =
        new FragmentedMp4Extractor(
            FragmentedMp4Extractor.WORKAROUND_EVERY_VIDEO_FRAME_IS_SYNC_FRAME);
    Track mp4Track =
        new Track(
            trackIndex,
            mp4TrackType,
            element.timescale,
            durationUs,
            mediaFormat,
            trackEncryptionBoxes,
            mp4TrackType == Track.TYPE_vide ? 4 : -1);
    mp4Extractor.setTrack(mp4Track);

    // Store the format and a wrapper around the extractor.
    mediaFormats.put(manifestTrackKey, mediaFormat);
    extractorWrappers.put(manifestTrackKey, new ChunkExtractorWrapper(mp4Extractor));
    return mediaFormat;
  }