swift - AVAssetWriter 连续段

标签 swift avfoundation avassetwriter

我想录制一系列片段,当通过视频播放器或 ffmpeg -f concat 一起播放时无缝播放。

现在在任何一种情况下,我在每个片段连接点都会遇到非常明显的音频问题。

我目前的策略是维护 2 个 AssetWriter 实例。在每个截止点,我启动一个新的编写器,等到它准备好,然后开始给它提供样本。当视频和音频样本在特定时间点完成时,我关闭最后一个编写器。

如何修改它以获得连续的剪辑录制?根本原因是什么?

import Foundation
import UIKit
import AVFoundation

class StreamController: UIViewController, AVCaptureAudioDataOutputSampleBufferDelegate, AVCaptureVideoDataOutputSampleBufferDelegate {
    @IBOutlet weak var previewView: UIView!

    var closingVideoInput: AVAssetWriterInput?
    var closingAudioInput: AVAssetWriterInput?
    var closingAssetWriter: AVAssetWriter?

    var currentVideoInput: AVAssetWriterInput?
    var currentAudioInput: AVAssetWriterInput?
    var currentAssetWriter: AVAssetWriter?

    var nextVideoInput: AVAssetWriterInput?
    var nextAudioInput: AVAssetWriterInput?
    var nextAssetWriter: AVAssetWriter?

    var previewLayer: AVCaptureVideoPreviewLayer?
    var videoHelper: VideoHelper?

    var startTime: NSTimeInterval = 0
    override func viewDidLoad() {
        super.viewDidLoad()
        startTime = NSDate().timeIntervalSince1970
        createSegmentWriter()
        videoHelper = VideoHelper()
        videoHelper!.delegate = self
        videoHelper!.startSession()
        NSTimer.scheduledTimerWithTimeInterval(5, target: self, selector: "createSegmentWriter", userInfo: nil, repeats: true)
    }

    func createSegmentWriter() {
        print("Creating segment writer at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
        nextAssetWriter = try! AVAssetWriter(URL: NSURL(fileURLWithPath: OutputFileNameHelper.instance.pathForOutput()), fileType: AVFileTypeMPEG4)
        nextAssetWriter!.shouldOptimizeForNetworkUse = true

        let videoSettings: [String:AnyObject] = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: 960, AVVideoHeightKey: 540]
        nextVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
        nextVideoInput!.expectsMediaDataInRealTime = true
        nextAssetWriter?.addInput(nextVideoInput!)

        let audioSettings: [String:AnyObject] = [
                AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC),
                AVSampleRateKey: 44100.0,
                AVNumberOfChannelsKey: 2,
        ]
        nextAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
        nextAudioInput!.expectsMediaDataInRealTime = true
        nextAssetWriter?.addInput(nextAudioInput!)

        nextAssetWriter!.startWriting()
    }

    override func viewDidAppear(animated: Bool) {
        super.viewDidAppear(animated)
        previewLayer = AVCaptureVideoPreviewLayer(session: videoHelper!.captureSession)
        previewLayer!.frame = self.previewView.bounds
        previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
        if ((previewLayer?.connection?.supportsVideoOrientation) != nil) {
            previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
        }
        self.previewView.layer.addSublayer(previewLayer!)
    }

    func closeWriter() {
        if videoFinished && audioFinished {
            let outputFile = closingAssetWriter?.outputURL.pathComponents?.last
            closingAssetWriter?.finishWritingWithCompletionHandler() {
                let delta = NSDate().timeIntervalSince1970 - self.startTime
                print("segment \(outputFile) finished at t=\(delta)")
            }
            self.closingAudioInput = nil
            self.closingVideoInput = nil
            self.closingAssetWriter = nil
            audioFinished = false
            videoFinished = false
        }
    }

    func closingVideoFinished() {
        if closingVideoInput != nil {
            videoFinished = true
            closeWriter()
        }
    }

    func closingAudioFinished() {
        if closingAudioInput != nil {
            audioFinished = true
            closeWriter()
        }
    }

    var closingTime: CMTime = kCMTimeZero
    var audioFinished = false
    var videoFinished = false
    func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBufferRef, fromConnection connection: AVCaptureConnection!) {
        let sampleTime: CMTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
        if let nextWriter = nextAssetWriter {
            if nextWriter.status.rawValue != 0 {
                print("Switching asset writers at t=\(NSDate().timeIntervalSince1970 - self.startTime)")

                closingAssetWriter = currentAssetWriter
                closingVideoInput = currentVideoInput
                closingAudioInput = currentAudioInput

                currentAssetWriter = nextAssetWriter
                currentVideoInput = nextVideoInput
                currentAudioInput = nextAudioInput

                nextAssetWriter = nil
                nextVideoInput = nil
                nextAudioInput = nil

                closingTime = sampleTime
                currentAssetWriter!.startSessionAtSourceTime(sampleTime)
            }
        }

        if currentAssetWriter != nil {
            if let _ = captureOutput as? AVCaptureVideoDataOutput {
                if (CMTimeCompare(sampleTime, closingTime) < 0) {
                    if closingVideoInput?.readyForMoreMediaData == true {
                        closingVideoInput?.appendSampleBuffer(sampleBuffer)
                    }
                } else {
                    closingVideoFinished()
                    if currentVideoInput?.readyForMoreMediaData == true {
                        currentVideoInput?.appendSampleBuffer(sampleBuffer)
                    }
                }

            } else if let _ = captureOutput as? AVCaptureAudioDataOutput {
                if (CMTimeCompare(sampleTime, closingTime) < 0) {
                    if currentAudioInput?.readyForMoreMediaData == true {
                        currentAudioInput?.appendSampleBuffer(sampleBuffer)
                    }
                } else {
                    closingAudioFinished()
                    if currentAudioInput?.readyForMoreMediaData == true {
                        currentAudioInput?.appendSampleBuffer(sampleBuffer)
                    }
                }
            }
        }
    }

    override func shouldAutorotate() -> Bool {
        return true
    }

    override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
        return [UIInterfaceOrientationMask.LandscapeRight]
    }
}

最佳答案

我认为根本原因是视频和音频的 CMSampleBuffer 代表不同的时间间隔。您需要拆分和加入音频 CMSampleBuffer 以使它们无缝地插入您的 AVAssetWriter 的时间线,这可能应该基于视频演示时间戳。

为什么必须改变音频而不是视频?它看起来不对称,但我想这是因为音频具有更高的采样率。

附注实际上创建新的拆分样本缓冲区看起来很吓人。 CMSampleBufferCreate 有大量参数。 CMSampleBufferCopySampleBufferForRange 使用起来可能更简单高效。

关于swift - AVAssetWriter 连续段,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/33829518/

相关文章:

ios - 如何制作未知页码的UIPageViewController?

ios - 从 CollectionViewCell.swift 发送邮件

iphone - 了解 AVPlayer 对象何时准备好播放

ios - 使用 AVAsset 的多图像到视频需要高内存

objective-c - 以编程方式在 iOS 中截屏视频

ios - 无法将 ViewController 类型的值分配给 UITextFieldDelegate 类型的值?

ios - 如何在具有单独位置 url 和资源 url 的 swift 中从服务器直播音频?

ios - 如何使用 AVAssetResourceLoader?

macos - 如何在 OS X 上实时录制和播放音频

ios - 如何确定AVAssetWriter中样本缓冲区使用的持续时间