Untitled
unknown
plain_text
a year ago
8.1 kB
9
Indexable
private void resampleAudio(MediaMuxer mediaMuxer, MediaCodec audioDecoder, MediaExtractor audioExtractor, SamplerClip clip, MergeProgressListener listener) throws IOException {
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
int inputChunk = 0;
int outputCount = 0;
long endTime = clip.getEndTime();
if (endTime == -1) {
endTime = clip.getVideoDuration();
}
boolean outputDoneNextTimeWeCheck = false;
boolean outputDone = false;
boolean inputDone = false;
boolean decoderDone = false;
while (!outputDone) {
if (!inputDone) {
int inputBufIndex = audioDecoder.dequeueInputBuffer(TIMEOUT_USEC);
if (inputBufIndex >= 0) {
if (audioExtractor.getSampleTime() / 1000 >= endTime) {
// End of stream -- send empty frame with EOS flag set.
audioDecoder.queueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
inputDone = true;
} else {
// Copy a chunk of input to the decoder. The first chunk should have
// the BUFFER_FLAG_CODEC_CONFIG flag set.
ByteBuffer inputBuf;
inputBuf = audioDecoder.getInputBuffer(inputBufIndex);
assert inputBuf != null;
inputBuf.clear();
int sampleSize = audioExtractor.readSampleData(inputBuf, 0);
if (sampleSize < 0) {
audioDecoder.queueInputBuffer(inputBufIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
} else {
audioDecoder.queueInputBuffer(inputBufIndex, 0, sampleSize, audioExtractor.getSampleTime(), 0);
audioExtractor.advance();
}
inputChunk++;
}
} else {
if (VERBOSE)
Log.d(TAG, "input buffer not available");
}
}
// Assume output is available. Loop until both assumptions are false.
boolean decoderOutputAvailable = !decoderDone;
boolean encoderOutputAvailable = true;
while (decoderOutputAvailable || encoderOutputAvailable) {
// Start by draining any pending output from the encoder. It's important to
// do this before we try to stuff any more data in.
int encoderStatus = audioEncoder.dequeueOutputBuffer(info, TIMEOUT_USEC);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
encoderOutputAvailable = false;
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
if (VERBOSE)
Log.d(TAG, "encoder output buffers changed");
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
} else if (encoderStatus < 0) {
throw new RuntimeException("unexpected result from mEncoder.dequeueOutputBuffer: " + encoderStatus);
} else { // encoderStatus >= 0
ByteBuffer encodedData;
encodedData = audioEncoder.getOutputBuffer(encoderStatus);
if (encodedData == null) {
throw new RuntimeException("encoderOutputBuffer " + encoderStatus + " was null");
}
// Write the data to the output "file".
if (info.size != 0) {
encodedData.position(info.offset);
encodedData.limit(info.offset + info.size);
outputCount++;
mediaMuxer.writeSampleData(outputAudioTrack, encodedData, info);
}
outputDone = (info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0;
audioEncoder.releaseOutputBuffer(encoderStatus, false);
}
if (outputDoneNextTimeWeCheck) {
outputDone = true;
}
if (encoderStatus != MediaCodec.INFO_TRY_AGAIN_LATER) {
// Continue attempts to drain output.
continue;
}
// Encoder is drained, check to see if we've got a new frame of output from
// the decoder. (The output is going to a Surface, rather than a ByteBuffer,
// but we still get information through BufferInfo.)
if (!decoderDone) {
int decoderStatus = audioDecoder.dequeueOutputBuffer(info, TIMEOUT_USEC);
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
Log.d(TAG, "resampleVideo: ");
}
if (decoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
decoderOutputAvailable = false;
}else if(decoderStatus >= 0) { // decoderStatus >= 0
// The ByteBuffers are null references, but we still get a nonzero
// size for the decoded data.
boolean doRender = (info.size != 0);
// As soon as we call releaseOutputBuffer, the buffer will be forwarded
// to SurfaceTexture to convert to a texture. The API doesn't
// guarantee that the texture will be available before the call
// returns, so we need to wait for the onFrameAvailable callback to
// fire. If we don't wait, we risk rendering from the previous frame.
if (doRender) {
ByteBuffer outputBuffer = audioDecoder.getOutputBuffer(decoderStatus);
int size = info.size;
int inIndexEncode = audioEncoder.dequeueInputBuffer(1000);
if (inIndexEncode >= 0) {
ByteBuffer inputBuffer = audioEncoder.getInputBuffer(inIndexEncode);
// Fill inputBuffer with decoded data
assert inputBuffer != null;
inputBuffer.put(outputBuffer);
long nSecs = info.presentationTimeUs;
if (clip.getStartTime() != -1) {
nSecs = info.presentationTimeUs - clip.getStartTime();
}
nSecs = Math.max(0, nSecs);
mEncoderPresentationTimeUs2 += (nSecs - mLastSampleTime2);
mLastSampleTime2 = nSecs;
info.presentationTimeUs = mEncoderPresentationTimeUs2;
audioEncoder.queueInputBuffer(inIndexEncode, 0, size, info.presentationTimeUs, 0);
if (listener != null) {
listener.onProgress((((float) (mEncoderPresentationTimeUs / 1000) + (float) (mEncoderPresentationTimeUs2)) / 10 / (float) (2 * mDuration)));
}
}
}
audioDecoder.releaseOutputBuffer(decoderStatus, doRender);
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
outputDoneNextTimeWeCheck = true;
}
}
}
}
}
if (inputChunk != outputCount) {
throw new RuntimeException("frame lost: " + inputChunk + " in, " + outputCount + " out");
}
}Editor is loading...
Leave a Comment