/** Disable a track. */ void disableTrack(ProcInfo pInfo, TrackInfo remove) { remove.tc.setEnabled(false); remove.disabled = true; // Shift all the stream indexes to match. TrackInfo ti; for (int type = AUDIO; type < MEDIA_TYPES; type++) { for (int j = 0; j < pInfo.numTracksByType[type]; j++) { ti = pInfo.tracksByType[type][j]; if (ti.idx >= remove.idx) ti.idx--; } } }
/** With the given processor info generated from matchTracks, build each of the processors. */ public boolean buildTracks(ProcInfo pInfo[]) { ContentDescriptor cd = new ContentDescriptor(ContentDescriptor.RAW); Processor p; for (int i = 0; i < pInfo.length; i++) { p = pInfo[i].p; p.setContentDescriptor(cd); // We are done with programming the processor. Let's just // realize the it. if (!waitForState(p, Controller.Realized)) { System.err.println("- Failed to realize the processor."); return false; } // Set the JPEG quality to .5. setJPEGQuality(p, 0.5f); PushBufferStream pbs[]; TrackInfo tInfo; int trackID; // Cheating. I should have checked the type of DataSource // returned. pInfo[i].ds = (PushBufferDataSource) p.getDataOutput(); pbs = pInfo[i].ds.getStreams(); // Find the matching data stream for the given track for audio. for (int type = AUDIO; type < MEDIA_TYPES; type++) { for (trackID = 0; trackID < pInfo[i].numTracksByType[type]; trackID++) { tInfo = pInfo[i].tracksByType[type][trackID]; tInfo.pbs = pbs[tInfo.idx]; } } } return true; }
@Override public void read(Buffer buffer) throws IOException { pbs.read(buffer); // Remap the time stamps so it won't wrap around // while changing to a new file. if (buffer.getTimeStamp() != Buffer.TIME_UNKNOWN) { long diff = buffer.getTimeStamp() - lastTS; lastTS = buffer.getTimeStamp(); if (diff > 0) timeStamp += diff; buffer.setTimeStamp(timeStamp); } // If this track is to be used as the master time base, // we'll need to compute the master time based on this track. if (useAsMaster) { if (buffer.getFormat() instanceof AudioFormat) { AudioFormat af = (AudioFormat) buffer.getFormat(); masterAudioLen += buffer.getLength(); long t = af.computeDuration(masterAudioLen); if (t > 0) { masterTime = t; } else { masterTime = buffer.getTimeStamp(); } } else { masterTime = buffer.getTimeStamp(); } } if (buffer.isEOM()) { tInfo.done = true; if (!ds.handleEOM(tInfo)) { // This is not the last processor to be done. // We'll need to un-set the EOM flag. buffer.setEOM(false); buffer.setDiscard(true); } } }
/** * Try to match all the tracks and find common formats to concatenate the tracks. A database of * results will be generated. */ public boolean matchTracks(ProcInfo pInfo[], ContentDescriptor cd) { TrackControl tcs[]; // Vector<String> aTracks; // Vector<String> vTracks; int aIdx, vIdx; int i, j, type; TrackInfo tInfo; // Build the ProcInfo data structure for each processor. // Sparate out the audio from video tracks. for (i = 0; i < pInfo.length; i++) { if (!waitForState(pInfo[i].p, Processor.Configured)) { System.err.println("- Failed to configure the processor."); return false; } tcs = pInfo[i].p.getTrackControls(); pInfo[i].tracksByType = new TrackInfo[MEDIA_TYPES][]; for (type = AUDIO; type < MEDIA_TYPES; type++) { pInfo[i].tracksByType[type] = new TrackInfo[tcs.length]; } pInfo[i].numTracksByType = new int[MEDIA_TYPES]; aIdx = vIdx = 0; // Separate the audio and video tracks. for (j = 0; j < tcs.length; j++) { if (tcs[j].getFormat() instanceof AudioFormat) { tInfo = new TrackInfo(); tInfo.idx = j; tInfo.tc = tcs[j]; pInfo[i].tracksByType[AUDIO][aIdx++] = tInfo; } else if (tcs[j].getFormat() instanceof VideoFormat) { tInfo = new TrackInfo(); tInfo.idx = j; tInfo.tc = tcs[j]; pInfo[i].tracksByType[VIDEO][vIdx++] = tInfo; } } pInfo[i].numTracksByType[AUDIO] = aIdx; pInfo[i].numTracksByType[VIDEO] = vIdx; pInfo[i].p.setContentDescriptor(cd); } // Different movies has different number of tracks. Obviously, // we cannot concatenate all the tracks of 3-track movie with a // 2-track one. We'll concatenate up to the smallest # of tracks // of all the movies. We'll also need to disable the unused tracks. int total[] = new int[MEDIA_TYPES]; for (type = AUDIO; type < MEDIA_TYPES; type++) { total[type] = pInfo[0].numTracksByType[type]; } for (i = 1; i < pInfo.length; i++) { for (type = AUDIO; type < MEDIA_TYPES; type++) { if (pInfo[i].numTracksByType[type] < total[type]) total[type] = pInfo[i].numTracksByType[type]; } } if (total[AUDIO] < 1 && total[VIDEO] < 1) { System.err.println("There is no audio or video tracks to concatenate."); return false; } totalTracks = 0; for (type = AUDIO; type < MEDIA_TYPES; type++) totalTracks += total[type]; // Disable all the unused tracks. for (i = 0; i < pInfo.length; i++) { for (type = AUDIO; type < MEDIA_TYPES; type++) { for (j = total[type]; j < pInfo[i].numTracksByType[type]; j++) { tInfo = pInfo[i].tracksByType[type][j]; disableTrack(pInfo[i], tInfo); System.err.println( "- Disable the following track since the other input media do not have a matching type."); System.err.println(" " + tInfo.tc.getFormat()); } pInfo[i].numTracksByType[type] = total[type]; } } // Try to find common formats to concatenate the tracks. // Deal with the tracks by type. for (type = AUDIO; type < MEDIA_TYPES; type++) { for (i = 0; i < total[type]; i++) { if (!tryMatch(pInfo, type, i)) { System.err.println( "- Cannot transcode the tracks to a common format for concatenation! Sorry."); return false; } } } return true; }