ios - AVCaptureVideoDataOutputSampleBufferDelegate 使用 CIFilters 进行丢帧进行视频过滤

标签 ios swift video avcapturesession cifilter

我有一个非常奇怪的情况,如果我使用 13 个不同的过滤器链,AVCaptureVideoDataOutputSampleBufferDelegate 会丢帧。让我解释一下:

我有 CameraController 设置,没什么特别的,这是我的委托(delegate)方法:

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        if !paused {

            if connection.output?.connection(with: .audio) == nil {
                //capture video

                // my try to avoid "Out of buffers error", no luck ;(
                lastCapturedBuffer = nil
                let err = CMSampleBufferCreateCopy(allocator: kCFAllocatorDefault, sampleBuffer: sampleBuffer, sampleBufferOut: &lastCapturedBuffer)
                if err == noErr {

                }

                connection.videoOrientation = .portrait

                // getting image
                let pixelBuffer = CMSampleBufferGetImageBuffer(lastCapturedBuffer!)
                // remove if any
                CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))

                // captured - is just ciimage property
                captured = CIImage(cvPixelBuffer: pixelBuffer!)
                //remove if any
                CVPixelBufferUnlockBaseAddress(pixelBuffer!,CVPixelBufferLockFlags(rawValue: 0))
                //CVPixelBufferUnlockBaseAddress(pixelBuffer!, .readOnly)

                // transform image to targer resolution
                let srcWidth = CGFloat(captured.extent.width)
                let srcHeight = CGFloat(captured.extent.height)

                let dstWidth: CGFloat = ConstantsManager.shared.k_video_width
                let dstHeight: CGFloat = ConstantsManager.shared.k_video_height

                let scaleX = dstWidth / srcWidth
                let scaleY = dstHeight / srcHeight

                var transform = CGAffineTransform.init(scaleX: scaleX, y: scaleY)
                captured = captured.transformed(by: transform).cropped(to: CGRect(x: 0, y: 0, width: dstWidth, height: dstHeight))
                // mirror for front camera
                if front {
                    var t = CGAffineTransform.init(scaleX: -1, y: 1)
                    t = t.translatedBy(x: -ConstantsManager.shared.k_video_width, y: 0)
                    captured = captured.transformed(by: t)
                }

                // video capture logic
                let writable = canWrite()

                if writable,
                    sessionAtSourceTime == nil {
                    sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(lastCapturedBuffer!)
                    videoWriter.startSession(atSourceTime: sessionAtSourceTime!)
                }

                if writable, (videoWriterInput.isReadyForMoreMediaData) {
                    videoWriterInput.append(lastCapturedBuffer!)
                }

                // apply effect in realtime <- here is problem. If I comment next line, it will be fixed but effect will n't be applied
                captured = FilterManager.shared.applyFilterForCamera(inputImage: captured)

                // current frame in case user wants to save image as photo
                self.capturedPhoto = captured

                // sent frame to Camcoder view controller
                self.delegate?.didCapturedFrame(frame: captured)
            } else {
                // capture sound
                let writable = canWrite()
                if writable, (audioWriterInput.isReadyForMoreMediaData) {
                    //print("write audio buffer")
                    audioWriterInput?.append(lastCapturedBuffer!)
                }
            }
        } else {
            // paused
        }
    }

我还实现了 didDrop 委托(delegate)方法,以下是我如何找出它丢帧的原因:

func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        print("did drop")
        var mode: CMAttachmentMode = 0
        let reason = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_DroppedFrameReason, attachmentModeOut: &mode)
        print("reason \(String(describing: reason))") // Optional(OutOfBuffers)
    }

所以我像专业人士一样做了,只是评论了部分代码以找出问题所在。所以,它在这里:

captured = FilterManager.shared.applyFilterForCamera(inputImage: captured)

FilterManager - 是单例,这里称为 func:

func applyFilterForCamera(inputImage: CIImage) -> CIImage {
        return currentVsFilter!.apply(sourceImage: inputImage)
    }

currentVsFilter 是 VSFilter 类型的对象 - 这是一个示例:

import Foundation
import AVKit

class TestFilter: CustomFilter {

    let _name = "Тестовый Фильтр"
    let _displayName = "Test Filter"

    var tempImage: CIImage?
    var final: CGImage?

    override func name() -> String {
        return _name
    }

    override func displayName() -> String {
        return _displayName
    }

    override init() {
        super.init()
        print("Test Filter init")

        // setup my custom kernel filter
        self.noise.type = GlitchFilter.GlitchType.allCases[2]
    }

    // this returns composition for playback using AVPlayer
    override func composition(asset: AVAsset) -> AVMutableVideoComposition {
        let composition = AVMutableVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
            let inputImage = request.sourceImage.cropped(to: request.sourceImage.extent)
            DispatchQueue.global(qos: .userInitiated).async {
                let output = self.apply(sourceImage: inputImage, forComposition: true)
                request.finish(with: output, context: nil)
            }
        })
        let size = FilterManager.shared.cropRectForOrientation().size

        composition.renderSize = size
        return composition
    }

    // this returns actual filtered CIImage, used for both AVPlayer composition and realtime camera
    override func apply(sourceImage: CIImage, forComposition: Bool = false) -> CIImage {

        // rendered text
        tempImage = FilterManager.shared.textRenderedImage()

        // some filters chained one by one
        self.screenBlend?.setValue(tempImage, forKey: kCIInputImageKey)
        self.screenBlend?.setValue(sourceImage, forKey: kCIInputBackgroundImageKey)

        self.noise.inputImage = self.screenBlend?.outputImage
        self.noise.inputAmount = CGFloat.random(in: 1.0...3.0)

        // result
        tempImage = self.noise.outputImage

        // correct crop
        let rect = forComposition ? FilterManager.shared.cropRectForOrientation() : FilterManager.shared.cropRect
        final = self.context.createCGImage(tempImage!, from: rect!)

        return CIImage(cgImage: final!)
    }

}

现在,最奇怪的事情是,我有 30 个 VSFilter,当我达到 13 个(通过 UIButton 逐一切换)时,我收到错误“Out of Buffer”,这个:

kCMSampleBufferDroppedFrameReason_OutOfBuffers

我测试的内容:

  • 我更改了 FilterManager 单例中过滤器数组中的 vsFilters 顺序 - 相同
  • 我尝试从第一个切换到 12 个,然后返回 - 有效,但在我切换到 13tn(从 0 开始的第 30 个)之后 - bug

看起来它只能处理 12 个 VSFIlter 对象,就像它是否以某种方式保留它们或者可能与线程有关,我不知道。

此应用程序专为 iOs 设备制作,在 iPhone X iOs 13.3.1 上测试 这是视频编辑器应用程序,可将不同的效果应用于来自相机的直播流和来自相机胶卷的视频文件

也许有人有这方面的经验?

祝你有美好的一天

最好,维克多

编辑1。如果我重新初始化cameraController(AVCaptureSession.input/output devices),它可以工作,但这是一个丑陋的选项,并且在切换过滤器时会增加延迟

最佳答案

好吧,这场战斗我终于赢了。万一其他人遇到这个“OutOfBuffer”问题,这是我的解决方案

正如我所发现的,CIFilter 会抓取 CVPixelBuffer 并且在过滤图像时不会释放它。我猜这有点创造了一个巨大的缓冲区。奇怪的是:它不会造成内存泄漏,所以我猜它不会获取特定的缓冲区,但会创建对其的强引用。正如传闻(我)所说,它只能处理 12 个这样的引用。

所以,我的方法是复制 CVPixelBuffer,然后使用它而不是从 AVCaptureVideoDataOutputSampleBufferDelegate didOutput func 获得的缓冲区

这是我的新代码:

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {

        if !paused {
            //print("camera controller \(id) got frame")

            if connection.output?.connection(with: .audio) == nil {
                //capture video

                connection.videoOrientation = .portrait

                // getting image
                guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }

                // this works!
                let copyBuffer = pixelBuffer.copy()

                // captured - is just ciimage property
                captured = CIImage(cvPixelBuffer: copyBuffer)
                //remove if any

                // transform image to targer resolution
                let srcWidth = CGFloat(captured.extent.width)
                let srcHeight = CGFloat(captured.extent.height)

                let dstWidth: CGFloat = ConstantsManager.shared.k_video_width
                let dstHeight: CGFloat = ConstantsManager.shared.k_video_height

                let scaleX = dstWidth / srcWidth
                let scaleY = dstHeight / srcHeight

                var transform = CGAffineTransform.init(scaleX: scaleX, y: scaleY)
                captured = captured.transformed(by: transform).cropped(to: CGRect(x: 0, y: 0, width: dstWidth, height: dstHeight))
                // mirror for front camera
                if front {
                    var t = CGAffineTransform.init(scaleX: -1, y: 1)
                    t = t.translatedBy(x: -ConstantsManager.shared.k_video_width, y: 0)
                    captured = captured.transformed(by: t)
                }

                // video capture logic
                let writable = canWrite()

                if writable,
                    sessionAtSourceTime == nil {
                    sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
                    videoWriter.startSession(atSourceTime: sessionAtSourceTime!)
                }

                if writable, (videoWriterInput.isReadyForMoreMediaData) {
                    videoWriterInput.append(sampleBuffer)
                }

                self.captured = FilterManager.shared.applyFilterForCamera(inputImage: self.captured)

                // current frame in case user wants to save image as photo
                self.capturedPhoto = captured

                // sent frame to Camcoder view controller
                self.delegate?.didCapturedFrame(frame: captured)
            } else {
                // capture sound
                let writable = canWrite()
                if writable, (audioWriterInput.isReadyForMoreMediaData) {
                    //print("write audio buffer")
                    audioWriterInput?.append(sampleBuffer)
                }
            }
        } else {
            // paused
            //print("paused camera controller \(id)")
        }
    }

并且有复制缓冲区的功能:

func copy() -> CVPixelBuffer {
        precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")

        var _copy : CVPixelBuffer?
        CVPixelBufferCreate(
            kCFAllocatorDefault,
            CVPixelBufferGetWidth(self),
            CVPixelBufferGetHeight(self),
            CVPixelBufferGetPixelFormatType(self),
            nil,
            &_copy)

        guard let copy = _copy else { fatalError() }

        CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
        CVPixelBufferLockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))


        let copyBaseAddress = CVPixelBufferGetBaseAddress(copy)
        let currBaseAddress = CVPixelBufferGetBaseAddress(self)

        print("copy data size: \(CVPixelBufferGetDataSize(copy))")
        print("self data size: \(CVPixelBufferGetDataSize(self))")

        memcpy(copyBaseAddress, currBaseAddress, CVPixelBufferGetDataSize(copy))
        //memcpy(copyBaseAddress, currBaseAddress, CVPixelBufferGetDataSize(self) * 2)

        CVPixelBufferUnlockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
        CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags.readOnly)


        return copy
    }

我用它作为扩展

希望这对遇到类似问题的人有所帮助

最好,维克多

关于ios - AVCaptureVideoDataOutputSampleBufferDelegate 使用 CIFilters 进行丢帧进行视频过滤,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/61730352/

相关文章:

c++ - cvCaptureFromCAM 中存在内存泄漏吗?

javascript - Lightbox 关闭时停止播放 Youtube 视频

ios - ReactiveCocoa : why rac_textSignal doesn't work

iOS ooyala 视频播放器 : is there a way to play a list of videos?

javascript - Angular.js 移动浏览器应用程序在 iOS 8 Safari 上卡住

ios - 返回 Nil 的 MPMediaItem 发布日期(Swift 4)

ios - 继承许多 UIViewController 的背景颜色和其他属性

ios - 在重力作用下旋转物体

ios - 如何在不重新加载数据的情况下更新 tableview 单元格?

Android Face Detection API - 存储的视频文件