我当前的项目要求我将视频的音轨与音频文件混合,并且已经设法使用以下代码来实现:
while (mCopyAudio && !audioInternalDecoderDone && pendingInternalAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) {
int decoderOutputBufferIndex = this.internalAudioDecoder.dequeueOutputBuffer(audioInternalDecoderOutputBufferInfo, TIMEOUT_USEC);
if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
break;
}
if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
audioInternalDecoderOutputBuffers = this.internalAudioDecoder.getOutputBuffers();
break;
}
if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
decoderOutputAudioFormat = this.internalAudioDecoder.getOutputFormat();
iDecoderOutputChannelNum = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
iDecoderOutputAudioSampleRate = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE);
break;
}
if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
//Not in indent because I couldn't fit it in the editor
this.internalAudioDecoder.releaseOutputBuffer(decoderOutputBufferIndex,
false);
break;
}
pendingInternalAudioDecoderOutputBufferIndex = decoderOutputBufferIndex;
audioDecodedFrameCount++;
break;
}
while (mCopyAudio && !audioExternalDecoderDone && pendingExternalAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) {
int decoderOutputBufferIndex = this.externalAudioDecoder.dequeueOutputBuffer(audioExternalDecoderOutputBufferInfo, TIMEOUT_USEC);
if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
break;
}
if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
audioExternalDecoderOutputBuffers = this.externalAudioDecoder.getOutputBuffers();
break;
}
if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
decoderOutputAudioFormat = this.externalAudioDecoder.getOutputFormat();
eDecoderOutputChannelNum = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
eDecoderOutputAudioSampleRate = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE);
break;
}
if ((audioExternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
//Not in indent because I couldn't fit it in the editor
this.externalAudioDecoder.releaseOutputBuffer(decoderOutputBufferIndex,
false);
break;
}
pendingExternalAudioDecoderOutputBufferIndex = decoderOutputBufferIndex;
audioDecodedFrameCount++;
break;
}
while (mCopyAudio && pendingInternalAudioDecoderOutputBufferIndex != -1 && pendingExternalAudioDecoderOutputBufferIndex != -1) {
int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC);
if (encoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
break;
}
ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex];
int size = audioInternalDecoderOutputBufferInfo.size;
long presentationTime = audioInternalDecoderOutputBufferInfo.presentationTimeUs - musicStartUs;
if (size >= 0) {
ByteBuffer iDecoderOutputBuffer = audioInternalDecoderOutputBuffers[pendingInternalAudioDecoderOutputBufferIndex].duplicate();
ByteBuffer eDecoderOutputBuffer = audioExternalDecoderOutputBuffers[pendingExternalAudioDecoderOutputBufferIndex].duplicate();
byte[] initContents = new byte[ audioInternalDecoderOutputBufferInfo.size];
byte[] eInitContents = new byte[audioExternalDecoderOutputBufferInfo.size];
iDecoderOutputBuffer.get(initContents, 0, audioInternalDecoderOutputBufferInfo.size);
eDecoderOutputBuffer.get(eInitContents, 0, audioExternalDecoderOutputBufferInfo.size);
/*
The following is my attempt at compensating for different buffer sizes and timestamps - when the internal and external decoder buffer infos' timestampUs don't sync up with each other. This hasn't gone well.
if(audioExternalDecoderOutputBufferInfo.presentationTimeUs <= totalTime) {
if (eInitContents.length > initContents.length) {
SliceAndRemainder sar = sliceArray(eInitContents, initContents.length - remainderForNextBB.length);
Log.i("slice_and_remainder", sar.slice.length+" "+sar.remainder.length);
if(remainderForNextBB.length == initContents.length) {
eInitContents = remainderForNextBB;
remainderForNextBB = new byte[]{};
} else {
eInitContents = concatTwoArrays(remainderForNextBB, sar.slice);
remainderForNextBB = sar.remainder;
}
}else if(eInitContents.length < initContents.length) {
eInitContents = minorUpsamplingFrom44kTo48k(eInitContents);
}
}
For brevity's sake, this code is commented out, so assume the ideal condition that the timestamps in both decoders are synced up properly
*/
byte[] alteredIContents = scaleByteArrayByScalar(initContents, internalAudioGain);
byte[] alteredEContents = scaleByteArrayByScalar(eInitContents, externalAudioGain);
ByteBuffer endByteBuffer;
if(audioExternalDecoderOutputBufferInfo.presentationTimeUs <= totalTime) {
byte[] res = mixTwoByteArrays(alteredIContents, alteredEContents, alteredEContents.length);
Log.i("bytebuffer_mixed_len", res.length+"");
endByteBuffer = ByteBuffer.wrap(res);
} else {
endByteBuffer = ByteBuffer.wrap(alteredIContents);
}
iDecoderOutputBuffer.position(audioInternalDecoderOutputBufferInfo.offset);
iDecoderOutputBuffer.limit(audioInternalDecoderOutputBufferInfo.offset + size);
encoderInputBuffer.position(0);
encoderInputBuffer.put(endByteBuffer);
if((presentationTime < totalTime)) {
Log.i("presentation_time", presentationTime+" "+totalTime);
audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, size, presentationTime, audioInternalDecoderOutputBufferInfo.flags);
} else {
audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
}
}
this.internalAudioDecoder.releaseOutputBuffer(pendingInternalAudioDecoderOutputBufferIndex, false);
this.externalAudioDecoder.releaseOutputBuffer(pendingExternalAudioDecoderOutputBufferIndex, false);
pendingInternalAudioDecoderOutputBufferIndex = -1;
pendingExternalAudioDecoderOutputBufferIndex = -1;
if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
lastAudioDecoderFinalFrameTimestamp += temporaryAudioDecoderTimestamp + 33333;
temporaryAudioDecoderTimestamp = 0;
audioDecoderTimestampOffset = lastAudioDecoderFinalFrameTimestamp;
audioInternalDecoderDone = true;
audioExternalDecoderDone = true;
}
break;
}
基本上,创建两个Extractor-Decoder对并将它们合并到第三个
while()
块中以进行混合和处理,其中mixTwoByteArrays()
为:private byte[] mixTwoByteArrays(byte[] src, byte[] with, int numOfMixSamples) {
final int length = Math.min(src.length, numOfMixSamples);
byte[] result = new byte[length];
for(int i = 0; i < length; i++) {
result[i]=(byte)Math.min(0.999f,((float)src[i]+(float)with[i]));
}
return result;
}
如上面注释掉的代码所述,这对于时间戳彼此同步的音轨/文件非常有效。我的问题是在某些情况下,例如最近,音频轨道的时间戳是26666的倍数,而音频文件的时间戳是27000左右的倍数。
我曾考虑过分别处理音频轨道,然后将结果与原始视频轨道合并,但是这会对处理时间产生不利影响,因此我宁愿实时进行处理并将该解决方案用作最后一招。
有没有办法做到实时?
最佳答案
这就像准备ArrayList<Byte>()
一样简单,可以将外部解码器样本中的所有字节放入其中。然后从该size
内取出前4096个字节(或内部解码器的缓冲区信息的ArrayList
是什么),然后从该ArrayList
的索引0中删除该数量的样本,以与内部解码器的样本混合。
private ArrayList<Byte> externalBytesArrayList = new ArrayList<Byte>();
//All the other stuff omitted
while (mCopyAudio && pendingInternalAudioDecoderOutputBufferIndex != -1 && pendingExternalAudioDecoderOutputBufferIndex != -1) {
int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC);
ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex];
int size = audioInternalDecoderOutputBufferInfo.size;
long presentationTime = audioInternalDecoderOutputBufferInfo.presentationTimeUs - musicStartUs;
if (size >= 0) {
ByteBuffer iDecoderOutputBuffer = audioInternalDecoderOutputBuffers[pendingInternalAudioDecoderOutputBufferIndex].duplicate();
ByteBuffer eDecoderOutputBuffer = audioExternalDecoderOutputBuffers[pendingExternalAudioDecoderOutputBufferIndex].duplicate();
byte[] initContents = new byte[ audioInternalDecoderOutputBufferInfo.size];
byte[] eInitContents = new byte[audioExternalDecoderOutputBufferInfo.size];
iDecoderOutputBuffer.get(initContents, 0, audioInternalDecoderOutputBufferInfo.size);
eDecoderOutputBuffer.get(eInitContents, 0, audioExternalDecoderOutputBufferInfo.size);
externalBytesArrayList.addAll(Bytes.asList(eInitContents));
byte[] eContents;
//Here: take the first 4096 bytes from the external decoder's sample, save the rest in the ArrayList
//I need to replace 4096 with audioInternalDecoderOutputBufferInfo.presentationTimeUs - though complications might follow.
if(!(4096 > externalBytesArrayList.size())) {
List<Byte> subset = externalBytesArrayList.subList(0, 4096);
eContents = Bytes.toArray(subset);
externalBytesArrayList.subList(0, 4096).clear();
}else {
eContents = new byte[audioInternalDecoderOutputBufferInfo.size];
}
byte[] alteredIContents = scaleByteArrayByScalar(initContents, internalAudioGain);
byte[] alteredEContents = scaleByteArrayByScalar(eContents, externalAudioGain);
ByteBuffer endByteBuffer;
byte[] res = mixTwoByteArrays(alteredIContents, alteredEContents, alteredEContents.length);
endByteBuffer = ByteBuffer.wrap(res);
iDecoderOutputBuffer.position(audioInternalDecoderOutputBufferInfo.offset);
iDecoderOutputBuffer.limit(audioInternalDecoderOutputBufferInfo.offset + size);
encoderInputBuffer.position(0);
encoderInputBuffer.put(endByteBuffer);
if((presentationTime < totalTime)) {
Log.i("presentation_time", presentationTime+" "+totalTime);
audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, size, presentationTime, audioInternalDecoderOutputBufferInfo.flags);
} else {
audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
}
}
this.internalAudioDecoder.releaseOutputBuffer(pendingInternalAudioDecoderOutputBufferIndex, false);
this.externalAudioDecoder.releaseOutputBuffer(pendingExternalAudioDecoderOutputBufferIndex, false);
pendingInternalAudioDecoderOutputBufferIndex = -1;
pendingExternalAudioDecoderOutputBufferIndex = -1;
if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
lastAudioDecoderFinalFrameTimestamp += temporaryAudioDecoderTimestamp + 33333;
temporaryAudioDecoderTimestamp = 0;
audioDecoderTimestampOffset = lastAudioDecoderFinalFrameTimestamp;
audioInternalDecoderDone = true;
audioExternalDecoderDone = true;
}
break;
}
关于android - MediaCodec-如何同步同步混合来自两个Decoder MediaCodec对象的音频样本?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50758960/