我正在从 ARKit 捕获一帧并从中获取 CVPixelBuffer
func session(_ session: ARSession, didUpdate frame: ARFrame) {
if self.detectionFrame != nil {
return
}
self.detectionFrame = frame
// Retain the image buffer for Vision processing.
let pixelBuffer = frame.capturedImage
DispatchQueue.global().async {
self.recognizeText(from: pixelBuffer)
}
}
在 recognizeText
中,我继续初始化 Tesseract 并在将图像转换为 UIImage
后传递图像。
func recognizeText(from image:CVPixelBuffer){
// 1
if let tesseract = MGTesseract(language: "jpn+jpn_vert") {
// 2
tesseract.engineMode = .tesseractCubeCombined
// 3
tesseract.pageSegmentationMode = .auto
// 4
let ciImage = CIImage(cvPixelBuffer: image)
tesseract.image = UIImage(ciImage: ciImage)
// 5
tesseract.recognize()
// 6
let text = tesseract.recognizedText
print(text ?? "")
}
}
这个结果总是在
Thread 15: EXC_BAD_ACCESS (code=1, address=0x0)
在
- (Pix *)pixForImage:(UIImage *)image
{
int width = image.size.width;
int height = image.size.height;
CGImage *cgImage = image.CGImage;
CFDataRef imageData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage));
const UInt8 *pixels = CFDataGetBytePtr(imageData); <<< EXC_BAD_ACCESS
size_t bitsPerPixel = CGImageGetBitsPerPixel(cgImage);
size_t bytesPerPixel = bitsPerPixel / 8;
size_t bytesPerRow = CGImageGetBytesPerRow(cgImage);
我做错了什么?
最佳答案
找到缺失的部分,要将缓冲区转换为 UIImage,您需要提供 CIContext 和缓冲区大小
let ciImage = CIImage(cvPixelBuffer: pixBuffer)
let ciContext = CIContext(options: nil)
if let videoImage = ciContext.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixBuffer), height: CVPixelBufferGetHeight(pixBuffer))) {
self.prcessedImage = UIImage(cgImage: videoImage )
tesseract.image = self.prcessedImage
// 5
tesseract.recognize()
// 6
let text = tesseract.recognizedText
print(text ?? "")
}
关于ios - TesseractOCR BAD_ACCESS,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56246799/