ios - 在 Swift 中逐像素对图像应用视觉效果

标签 ios iphone camera swift core-image

我有一项大学作业是创建视觉效果并将其应用于通过设备相机捕获的视频帧。我目前可以获取图像并显示,但无法更改像素颜色值。

我将示例缓冲区转换为 imageRef 变量,如果我将它转换为 UIImage,一切都很好。

但是现在我想使用那个 imageRef 逐个像素地改变它的颜色值,在这个例子中改变为负色(我必须做更复杂的事情所以我不能使用 CIFilters)但是当我执行评论部分时它因访问错误而崩溃。

import UIKit
import AVFoundation

class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {

  let captureSession = AVCaptureSession()
  var previewLayer : AVCaptureVideoPreviewLayer?

  var captureDevice : AVCaptureDevice?

  @IBOutlet weak var cameraView: UIImageView!

  override func viewDidLoad() {
    super.viewDidLoad()

    captureSession.sessionPreset = AVCaptureSessionPresetMedium

    let devices = AVCaptureDevice.devices()

    for device in devices {
      if device.hasMediaType(AVMediaTypeVideo) && device.position == AVCaptureDevicePosition.Back {
        if let device = device as? AVCaptureDevice {
          captureDevice = device
          beginSession()
          break
        }
      }
    }
  }

  func focusTo(value : Float) {
    if let device = captureDevice {
      if(device.lockForConfiguration(nil)) {
        device.setFocusModeLockedWithLensPosition(value) {
          (time) in
        }
        device.unlockForConfiguration()
      }
    }
  }

  override func touchesBegan(touches: NSSet!, withEvent event: UIEvent!) {
    var touchPercent = Float(touches.anyObject().locationInView(view).x / 320)
    focusTo(touchPercent)
  }

  override func touchesMoved(touches: NSSet!, withEvent event: UIEvent!) {
    var touchPercent = Float(touches.anyObject().locationInView(view).x / 320)
    focusTo(touchPercent)
  }

  func beginSession() {
    configureDevice()

    var error : NSError?
    captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &error))

    if error != nil {
      println("error: \(error?.localizedDescription)")
    }

    previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)

    previewLayer?.frame = view.layer.frame
    //view.layer.addSublayer(previewLayer)

    let output = AVCaptureVideoDataOutput()
    let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL)
    output.setSampleBufferDelegate(self, queue: cameraQueue)
    output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]

    captureSession.addOutput(output)
    captureSession.startRunning()
  }

  func configureDevice() {
    if let device = captureDevice {
      device.lockForConfiguration(nil)
      device.focusMode = .Locked
      device.unlockForConfiguration()
    }
  }

  // MARK : - AVCaptureVideoDataOutputSampleBufferDelegate

  func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
    let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
    CVPixelBufferLockBaseAddress(imageBuffer, 0)

    let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0)
    let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
    let width = CVPixelBufferGetWidth(imageBuffer)
    let height = CVPixelBufferGetHeight(imageBuffer)
    let colorSpace = CGColorSpaceCreateDeviceRGB()

    var bitmapInfo = CGBitmapInfo.fromRaw(CGImageAlphaInfo.PremultipliedFirst.toRaw())! | CGBitmapInfo.ByteOrder32Little

    let context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo)
    let imageRef = CGBitmapContextCreateImage(context)

    CVPixelBufferUnlockBaseAddress(imageBuffer, 0)

    let data = CGDataProviderCopyData(CGImageGetDataProvider(imageRef)) as NSData
    let pixels = data.bytes

    var newPixels = UnsafeMutablePointer<UInt8>()

    //for index in stride(from: 0, to: data.length, by: 4) {

      /*newPixels[index] = 255 - pixels[index]
      newPixels[index + 1] = 255 - pixels[index + 1]
      newPixels[index + 2] = 255 - pixels[index + 2]
      newPixels[index + 3] = 255 - pixels[index + 3]*/
    //}

    bitmapInfo = CGImageGetBitmapInfo(imageRef)
    let provider = CGDataProviderCreateWithData(nil, newPixels, UInt(data.length), nil)

    let newImageRef = CGImageCreate(width, height, CGImageGetBitsPerComponent(imageRef), CGImageGetBitsPerPixel(imageRef), bytesPerRow, colorSpace, bitmapInfo, provider, nil, false, kCGRenderingIntentDefault)

    let image = UIImage(CGImage: newImageRef, scale: 1, orientation: .Right)
    dispatch_async(dispatch_get_main_queue()) {
      self.cameraView.image = image
    }
  }
}

最佳答案

您在像素操作循环中访问不当,因为 newPixels UnsafeMutablePointer 使用内置 RawPointer 初始化并指向内存中的 0x0000,在我看来它指向一个未分配的内存空间,您无权存储数据。

为了获得更长的解释和“解决方案”,我做了一些更改...

首先,自从发布OP以来,Swift发生了一些变化,这行必须根据rawValue的功能进行修改:

    //var bitmapInfo = CGBitmapInfo.fromRaw(CGImageAlphaInfo.PremultipliedFirst.toRaw())! | CGBitmapInfo.ByteOrder32Little
    var bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue) | CGBitmapInfo.ByteOrder32Little

指针也需要进行一些更改,因此我发布了所有更改(我在其中保留了带有注释标记的原始行)。

    let data = CGDataProviderCopyData(CGImageGetDataProvider(imageRef)) as NSData

    //let pixels = data.bytes
    let pixels = UnsafePointer<UInt8>(data.bytes)

    let imageSize : Int = Int(width) * Int(height) * 4

    //var newPixels = UnsafeMutablePointer<UInt8>()

    var newPixelArray = [UInt8](count: imageSize, repeatedValue: 0)

    for index in stride(from: 0, to: data.length, by: 4) {
        newPixelArray[index] = 255 - pixels[index]
        newPixelArray[index + 1] = 255 - pixels[index + 1]
        newPixelArray[index + 2] = 255 - pixels[index + 2]
        newPixelArray[index + 3] = pixels[index + 3]
    }

    bitmapInfo = CGImageGetBitmapInfo(imageRef)
    //let provider = CGDataProviderCreateWithData(nil, newPixels, UInt(data.length), nil)
    let provider = CGDataProviderCreateWithData(nil, &newPixelArray, UInt(data.length), nil)

一些解释:所有旧像素字节都必须转换为 UInt8,因此将像素更改为 UnsafePointer 而不是这样做。然后我为新像素创建了一个数组并删除了 newPixels 指针并直接使用该数组。最后将指向新数组的指针添加到提供程序以创建图像。并去掉了对alpha字节的修改。

在此之后,我能够以非常低的性能将一些负面图像放入我的 View 中,每十秒大约 1 张图像(iPhone 5,通过 XCode)。并且在 imageview 中呈现第一帧需要花费很多时间。

当我将 captureSession.stopRunning() 添加到 didOutputSampleBuffer 函数的开头时有一些更快的响应,然后在处理完成后再次使用 captureSession.startRunning() 启动。有了这个,我有将近 1fps。

感谢您提出有趣的挑战!

关于ios - 在 Swift 中逐像素对图像应用视觉效果,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/25587792/

相关文章:

ios - Twilio Room 无法识别连接的参与者

html - 元名称 ="viewport"- 一起禁用旋转、缩放和滑动

iphone - 如何判断一个3D旋转的物体离屏幕边缘有多远

android - 表面 View 相机预览

Android onPictureTaken 回调在 Bitmap.decodeByteArray 中抛出内存异常

ios - 如何将 UIView 固定在 iOS 11 NavigationBar 不断变化的高度框架下方?

ios - UITextVIew 在屏幕上不可见

ios - 解析不返回 PFObject

iphone - 如何使用相机以编程方式拍摄低分辨率快照?

c - 如何使用 opencv 映射相机图像来创建现场 Playground 镜子?