*/
IStreamCoder ic = null;
IStreamCoder oc = null;
IAudioResampler as = null;
IVideoResampler vs = null;
IVideoPicture inFrame = null;
IVideoPicture reFrame = null;
/**
* Now, we've already opened the files in #setupStreams(CommandLine). We
* just keep reading packets from it until the IContainer returns <0
*/
while (mIContainer.readNextPacket(iPacket) == 0)
{
/**
* Find out which stream this packet belongs to.
*/
int i = iPacket.getStreamIndex();
int offset = 0;
/**
* Find out if this stream has a starting timestamp
*/
IStream stream = mIContainer.getStream(i);
long tsOffset = 0;
if (stream.getStartTime() != Global.NO_PTS && stream.getStartTime() > 0
&& stream.getTimeBase() != null)
{
IRational defTimeBase = IRational.make(1,
(int) Global.DEFAULT_PTS_PER_SECOND);
tsOffset = defTimeBase.rescale(stream.getStartTime(), stream
.getTimeBase());
}
/**
* And look up the appropriate objects that are working on that stream.
*/
ic = mICoders[i];
oc = mOCoders[i];
as = mASamplers[i];
vs = mVSamplers[i];
inFrame = mIVideoPictures[i];
reFrame = mOVideoPictures[i];
inSamples = mISamples[i];
reSamples = mOSamples[i];
if (oc == null)
// we didn't set up this coder; ignore the packet
continue;
/**
* Find out if the stream is audio or video.
*/
ICodec.Type cType = ic.getCodecType();
if (cType == ICodec.Type.CODEC_TYPE_AUDIO && mHasAudio)
{
/**
* Decoding audio works by taking the data in the packet, and eating
* chunks from it to create decoded raw data.
*
* However, there may be more data in a packet than is needed to get one
* set of samples (or less), so you need to iterate through the byts to
* get that data.
*
* The following loop is the standard way of doing that.
*/
while (offset < iPacket.getSize())
{
retval = ic.decodeAudio(inSamples, iPacket, offset);
if (retval <= 0)
throw new RuntimeException("could not decode audio. stream: " + i);
if (inSamples.getTimeStamp() != Global.NO_PTS)
inSamples.setTimeStamp(inSamples.getTimeStamp() - tsOffset);
log.trace("packet:{}; samples:{}; offset:{}", new Object[]
{
iPacket, inSamples, tsOffset
});
/**
* If not an error, the decodeAudio returns the number of bytes it
* consumed. We use that so the next time around the loop we get new
* data.
*/
offset += retval;
int numSamplesConsumed = 0;
/**
* If as is not null then we know a resample was needed, so we do that
* resample now.
*/
if (as != null && inSamples.getNumSamples() > 0)
{
retval = as.resample(reSamples, inSamples, inSamples
.getNumSamples());
outSamples = reSamples;
}
else
{
outSamples = inSamples;
}
/**
* Include call a hook to derivied classes to allow them to alter the
* audio frame.
*/
outSamples = alterAudioFrame(outSamples);
/**
* Now that we've resampled, it's time to encode the audio.
*
* This workflow is similar to decoding; you may have more, less or
* just enough audio samples available to encode a packet. But you
* must iterate through.
*
* Unfortunately (don't ask why) there is a slight difference between
* encodeAudio and decodeAudio; encodeAudio returns the number of
* samples consumed, NOT the number of bytes. This can be confusing,
* and we encourage you to read the IAudioSamples documentation to
* find out what the difference is.
*
* But in any case, the following loop encodes the samples we have
* into packets.
*/
while (numSamplesConsumed < outSamples.getNumSamples())
{
retval = oc.encodeAudio(oPacket, outSamples, numSamplesConsumed);
if (retval <= 0)
throw new RuntimeException("Could not encode any audio: "
+ retval);
/**
* Increment the number of samples consumed, so that the next time
* through this loop we encode new audio
*/
numSamplesConsumed += retval;
log.trace("out packet:{}; samples:{}; offset:{}", new Object[]{
oPacket, outSamples, tsOffset
});
writePacket(oPacket);
}
}
}
else if (cType == ICodec.Type.CODEC_TYPE_VIDEO && mHasVideo)
{
/**
* This encoding workflow is pretty much the same as the for the audio
* above.
*
* The only major delta is that encodeVideo() will always consume one
* frame (whereas encodeAudio() might only consume some samples in an
* IAudioSamples buffer); it might not be able to output a packet yet,
* but you can assume that you it consumes the entire frame.
*/
IVideoPicture outFrame = null;
while (offset < iPacket.getSize())
{
retval = ic.decodeVideo(inFrame, iPacket, offset);
if (retval <= 0)
throw new RuntimeException("could not decode any video. stream: "
+ i);
log.trace("decoded vid ts: {}; pkts ts: {}", inFrame.getTimeStamp(),
iPacket.getTimeStamp());
if (inFrame.getTimeStamp() != Global.NO_PTS)
inFrame.setTimeStamp(inFrame.getTimeStamp() - tsOffset);
offset += retval;
if (inFrame.isComplete())
{
if (vs != null)
{
retval = vs.resample(reFrame, inFrame);
if (retval < 0)
throw new RuntimeException("could not resample video");
outFrame = reFrame;
}
else
{
outFrame = inFrame;
}
/**
* Include call a hook to derivied classes to allow them to alter
* the audio frame.
*/
outFrame = alterVideoFrame(outFrame);
outFrame.setQuality(0);
retval = oc.encodeVideo(oPacket, outFrame, 0);
if (retval < 0)
throw new RuntimeException("could not encode video");
writePacket(oPacket);
}