ios - AVCapturePhotoOutput iOS 相机超暗

标签 ios swift image camera

我有一个应用程序设置,可以使用相机拍照(基于计时器)来检测脸部的存在。当我向应用程序提供已添加到 Assets 中的照片时,检测过程效果相当好。然而,当我尝试直接使用相机的输出或什至在将图像保存到文件后使用相机的输出时,生成的图像太暗,以至于面部识别完全不可靠。

如果我显示相机看到的图像,它看起来是正确的。我捕获了以下两张图像 - 一张来自实时查看的相机,另一张来自 AVCapturePhotoOutput 创建图像后的相同 View 。如果我只是在 ImageView 中显示捕获的图像,也会发生同样的黑暗。

请注意评论:“我在此处放置了断点并拍摄了屏幕截图”。然后,当代码完成时,我拍摄了第二个屏幕截图。这些是在高光下拍摄的。 enter image description here enter image description here 这是基本代码:

class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {

var sentBy : String?

//timers
var faceTimer : Timer?
var frvcTimer : Timer?

//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?

var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?

var image : UIImage?
var outputImage : UIImage?
@IBOutlet weak var imageView: UIImageView!

//MARK: - Setup

override func viewDidLoad() {
    super.viewDidLoad()
}//viewDidLoad

override func viewWillAppear(_ animated: Bool) {
    super.viewWillAppear(true)
}//viewWillAppear

override func viewDidAppear(_ animated: Bool) {
    super.viewDidAppear(true)

    //check for camera
    if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {

        setupCaptureSession()
        setupDevices()
        setupInputOutput()
        setupPreviewLayer()

        startRunningCaptureSession()

        photoOutput?.capturePhoto(with:settings, delegate: self)

    } else {
        print("Camera not present")
    }

}//viewDidAppear

//MARK: - Video

@objc func showFaceRecognitionViewController() {
    //all this does is present the image in a new ViewController imageView
    performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView

func setupCaptureSession() {
    captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession

func setupDevices() {

    let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)

    let devices = deviceDiscoverySession.devices
    for device in devices {

        if device.position == AVCaptureDevice.Position.back {
            backCamera = device
        } else if device.position == AVCaptureDevice.Position.front {
            frontCamera = device
        }//if else

    }//for in

    currentCamera = frontCamera

}//setupDevices

func setupInputOutput() {

    do {
        let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
        captureSession.addInput(captureDeviceInput)
        photoOutput = AVCapturePhotoOutput()
        photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
            print("in photoOutput completion handler")
        })
        captureSession.addOutput(photoOutput!)
    } catch {
        print("Error creating AVCaptureDeviceInput:", error)
    }//do catch

}//setupInputOutput

func setupPreviewLayer() {
    cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
    cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
    cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
    cameraPreviewLayer?.frame = view.frame
    view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer


func startRunningCaptureSession() {
    captureSession.startRunning()
}//startRunningCaptureSession


//MARK: - Segue

override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
    if segue.identifier == "showSavedCameraPhoto" {
        let controller = segue.destination as! JustToSeeThePhotoViewController
        controller.inImage = outputImage
    }//if segue

}//prepare


//MARK: - Look for Faces

func findTheFaces() {
    let myView : UIView = self.view

    guard let outImage = outputImage else {return}

    let imageView = UIImageView(image: outImage)
    imageView.contentMode = .scaleAspectFit

    let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height

    imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
    imageView.backgroundColor = UIColor.blue

    myView.addSubview(imageView)

    let request = VNDetectFaceRectanglesRequest { (req, err) in

        if let err = err {
            print("VNDetectFaceRectanglesRequest failed to run:", err)
            return
        }//if let err

        print(req.results ?? "req.results is empty")

        req.results?.forEach({ (res) in

            DispatchQueue.main.async {

                guard let faceObservation = res as? VNFaceObservation else {return}

                let x = myView.frame.width * faceObservation.boundingBox.origin.x

                let width = myView.frame.width * faceObservation.boundingBox.width
                let height = scaledHeight * faceObservation.boundingBox.height

                let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height

                let redView = UIView()
                redView.backgroundColor = .red
                redView.alpha = 0.4
                redView.frame = CGRect(x: x, y: y, width: width, height: height)
                myView.addSubview(redView)

                print("faceObservation bounding box:")
                print(faceObservation.boundingBox)

                //if you get here, then you have a face bounding box

            }//main
        })//forEach block


    }//let request

    guard let cgImage = outImage.cgImage else {return}

    DispatchQueue.global(qos: .utility).async {
        let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])

        do {
            try handler.perform([request])

            print("handler request was successful")
            self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)

        } catch let reqErr {
            print("Failed to perform request:", reqErr)
        }
    }//DispatchQueue

}//findTheFaces

//MARK: - Memory

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning

}//class


extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {

    if let imageData = photo.fileDataRepresentation() {

        print(imageData)
        outputImage = UIImage(data : imageData)

        //
        //I put breakpoint here and took a screen shot
        //

        if let outImage = outputImage?.updateImageOrientionUpSide() {
            self.outputImage = outImage
        }

        DispatchQueue.main.async {
            self.findTheFaces()
        }

    }//if let imageData
}//photoOutput

}//extension

extension UIImage {

//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
    if self.imageOrientation == .up {
        return self
    }

    UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
    self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
    if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
        UIGraphicsEndImageContext()
        return normalizedImage
    }
    UIGraphicsEndImageContext()
    return nil
}//updateImageOrientionUpSide

}//image

我的相机捕捉一定是出了什么问题。任何帮助,将不胜感激。 Swift 4、iOS 11.2.5、Xcode 9.2

最佳答案

我会尝试在 startRunningCaptureSession()photoOutput?.capturePhoto(with:settings, delegate: self) 之间添加延迟

例如,

DispatchQueue.main.asyncAfter(截止日期: .now() + .seconds(4), 执行: { //拍个照 开始运行捕获 session () photoOutput?.capturePhoto(with:settings, delegate: self) })

关于ios - AVCapturePhotoOutput iOS 相机超暗,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/48478430/

相关文章:

ios - CGImageCreateWithMask() 不返回蒙版图像

ios - Swift:如何获取Parse的许多用户的姓名和照片

ios - 使用 MapKit 计算 3 个或更多地址之间的距离

Javascript:如何从页面获取图像字节(无需重新下载)

php - 签名 png 不显示

ios - 我应该选择哪个 iOS 3D 引擎来像 3D 模型一样显示 iBook Author?

注销和登录时 ios Facebook 集成错误

ios - 关于使用 iOS 和 swift 显示部分后 View 的想法

swift 从终端抛出 python 错误

java - 预览窗口(如 Windows 7 任务栏显示打开的应用程序)