我正在创建一个检测矩形并使用相机 捕捉图像的 iPhone 应用程序。我在检测到的最大矩形上创建了一个叠加层,捕获后我有 4 个 CGPoints
使用 CIRectangleFeature
和一个图像。
CIRectangleFeature
中的所有 4 个点都是横向的,而我的应用是纵向的。
当我在下一个 Controller 的 UIImageView
中显示图像时,它们的坐标会受到干扰。 ImageView 处于 AspectFit 模式。我搜索并找到了一些解决方案,一个是
extension CGPoint {
func scalePointByCeficient(ƒ_x: CGFloat, ƒ_y: CGFloat, viewWidth: CGSize, imageWidth: CGSize) -> CGPoint {
let scale: CGFloat;
scale = min(ƒ_x, ƒ_y)
var p: CGPoint = CGPoint(x: self.x, y: self.y)
p.x *= scale
p.y *= scale
p.x += (viewWidth.width - imageWidth.width * scale) / 2.0
p.y += (viewWidth.height - imageWidth.height * scale) / 2.0
return p
}
func reversePointCoordinates() -> CGPoint {
return CGPoint(x: self.y, y: self.x)
}
func sumPointCoordinates(add: CGPoint) -> CGPoint {
return CGPoint(x: self.x + add.x, y: self.y + add.y)
}
func substractPointCoordinates(sub: CGPoint) -> CGPoint {
return CGPoint(x: self.x - sub.x, y: self.y - sub.y)
}}
class ObyRectangleFeature : NSObject {
public var topLeft: CGPoint
public var topRight: CGPoint
public var bottomLeft: CGPoint
public var bottomRight: CGPoint
var myRect: CIRectangleFeature?
public var viewWidth: CGSize
public var imageWidth: CGSize
var centerPoint_OLD : CGPoint{
get {
myRect?.topLeft = self.topLeft
myRect?.topRight = self.topRight
myRect?.bottomLeft = self.bottomLeft
myRect?.bottomRight = self.bottomRight
let superCenter: CGPoint = CGPoint(x: (myRect?.bounds().midX)!, y: (myRect?.bounds().midY)!)
return superCenter
}
}
var centerPoint : CGPoint{
get {
myRect?.topLeft = self.topLeft
myRect?.topRight = self.topRight
myRect?.bottomLeft = self.bottomLeft
myRect?.bottomRight = self.bottomRight
let superCenter: CGPoint = CGPoint(x: (myRect?.bounds().midX)!, y: (myRect?.bounds().midY)!)
return superCenter
}
}
convenience init(rectObj rectangleFeature: CIRectangleFeature) {
self.init()
myRect = rectangleFeature
topLeft = rectangleFeature.topLeft
topRight = rectangleFeature.topRight
bottomLeft = rectangleFeature.bottomLeft
bottomRight = rectangleFeature.bottomRight
}
override init() {
self.topLeft = CGPoint.zero
self.topRight = CGPoint.zero
self.bottomLeft = CGPoint.zero
self.bottomRight = CGPoint.zero
self.viewWidth = CGSize.zero
self.imageWidth = CGSize.zero
super.init()
}
public func rotate90Degree() -> Void {
let centerPoint = self.centerPoint
// /rotate cos(90)=0, sin(90)=1
topLeft = CGPoint(x: centerPoint.x + (topLeft.y - centerPoint.y), y: centerPoint.y + (topLeft.x - centerPoint.x))
topRight = CGPoint(x: centerPoint.x + (topRight.y - centerPoint.y), y: centerPoint.y + (topRight.x - centerPoint.x))
bottomLeft = CGPoint(x: centerPoint.x + (bottomLeft.y - centerPoint.y), y: centerPoint.y + (bottomLeft.x - centerPoint.x))
bottomRight = CGPoint(x: centerPoint.x + (bottomRight.y - centerPoint.y), y: centerPoint.y + (bottomRight.x - centerPoint.x))
print(self.centerPoint)
}
public func scaleRectWithCoeficient(ƒ_x: CGFloat, ƒ_y: CGFloat) -> Void {
topLeft = topLeft.scalePointByCeficient(ƒ_x: ƒ_x, ƒ_y: ƒ_y, viewWidth: self.viewWidth, imageWidth: self.imageWidth)
topRight = topRight.scalePointByCeficient(ƒ_x: ƒ_x, ƒ_y: ƒ_y, viewWidth: self.viewWidth, imageWidth: self.imageWidth)
bottomLeft = bottomLeft.scalePointByCeficient(ƒ_x: ƒ_x, ƒ_y: ƒ_y, viewWidth: self.viewWidth, imageWidth: self.imageWidth)
bottomRight = bottomRight.scalePointByCeficient(ƒ_x: ƒ_x, ƒ_y: ƒ_y, viewWidth: self.viewWidth, imageWidth: self.imageWidth)
}
public func correctOriginPoints() -> Void {
let deltaCenter = self.centerPoint.reversePointCoordinates().substractPointCoordinates(sub: self.centerPoint)
let TL = topLeft
let TR = topRight
let BL = bottomLeft
let BR = bottomRight
topLeft = BL.sumPointCoordinates(add: deltaCenter)
topRight = TL.sumPointCoordinates(add: deltaCenter)
bottomLeft = BR.sumPointCoordinates(add: deltaCenter)
bottomRight = TR.sumPointCoordinates(add: deltaCenter)
print(self.centerPoint)
}}
它的叫声就像
ObyRectangleFeature *scaledRect = [[ObyRectangleFeature alloc] initWithRectObj:(id)rect_rect];
float f_x = _sourceImageView.frame.size.width / _sourceImageView.image.size.width;
float f_y = _sourceImageView.frame.size.height / _sourceImageView.image.size.height;
[scaledRect setViewWidth:_sourceImageView.bounds.size];
[scaledRect setImageWidth:_sourceImageView.image.size];
[scaledRect scaleRectWithCoeficientWithƒ_x:f_y ƒ_y:f_x];
[scaledRect rotate90Degree];
[scaledRect correctOriginPoints];
基本上它会检查比例因子并将点转换为 UIImageView
坐标,然后根据需要考虑景观因子将其旋转 90 度或更多。但是我得到的结果有点问题
如您所见,制作的矩形移到了卡片下方。关于如何解决这个问题的任何想法?
最佳答案
问题是 UIImageView 的 contentMode 改变了图像在 View 中的放置位置。看来您正在使用宽高比,因此您必须计算 UImageView 中有多少是黑条,然后将图像大小除以 imageView 大小,然后根据黑条添加偏移量。这真的不难,但数学可能很棘手。我建议使用 https://github.com/nubbel/UIImageView-GeometryConversion它对所有不同的内容模式进行数学计算。
关于ios - 将 CGPoints (CIRectangleFeature) 从 Image 转换为 ImageView,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44158447/