c++ - OpenCV 3.0 中的主动轮廓模型

标签 c++ matlab opencv contour

我正在尝试使用 C++ 中的 Opencv 3.0 实现主动轮廓模型算法。 该算法基于我为 MatLab 编写的脚本,但未按预期工作。 这两张图显示了两种算法运行的结果。

MatLab 脚本:

和 OpenCV 一个:

在它们中,我对所有 ACM 参数使用了相同的值,因此它们应该返回相同的东西,即白色圆形轮廓。 我怀疑问题出在我的图像能量函数上,因为 opencv 和 matlab 中的梯度操作不一样。图像能量的matlab脚本是:

function [Eext] = get_eext(wl, we, wt, image)

%External Energy
[row,col] = size(image);
eline = image; %eline is simply the image intensities

[grady,gradx] = gradient(image);
eedge = -1 *(gradx .* gradx + grady .* grady);



%masks for taking various derivatives
m1 = [-1 1];
m2 = [-1;1];
m3 = [1 -2 1];
m4 = [1;-2;1];
m5 = [1 -1;-1 1];

cx = conv2(image,m1,'same');
cy = conv2(image,m2,'same');
cxx = conv2(image,m3,'same');
cyy = conv2(image,m4,'same');
cxy = conv2(image,m5,'same');

eterm = zeros(row, col);

for i = 1:row;
    for j= 1:col;
        % eterm as deined in Kass et al Snakes paper
        eterm(i,j) = (cyy(i,j)*cx(i,j)*cx(i,j) -2 *cxy(i,j)*cx(i,j)...
            *cy(i,j) + cxx(i,j)*cy(i,j)*cy(i,j))/((1+cx(i,j)*cx(i,j)...
            + cy(i,j)*cy(i,j))^1.5);
    end;
end;

Eext = (wl*eline + we*eedge + wt*eterm);

在 C++ 中我的函数是这样的:

Mat get_eext(float wl, float we, float wt, Mat image){

Mat eline, gradx, grady, img_gray, eedge;

//bitdepth defined as CV_32F
image.convertTo(img_gray, bitdepth);

//Convolution Kernels
Mat m1, m2, m3, m4, m5;
m1 = (Mat_<float>(1, 2) << -1, 1);
m2 = (Mat_<float>(2, 1) << -1, 1);
m3 = (Mat_<float>(1, 3) << 1, -2, 1);
m4 = (Mat_<float>(3, 1) << 1, -2, 1);
m5 = (Mat_<float>(2, 2) << 1, -1, -1, 1);

//cvtColor(image, img_gray, CV_BGR2GRAY); <- Not required since image already in grayscale
img_gray.copyTo(eline);

Mat kernelx = (Mat_<float>(1, 3) << -0.5, 0, 0.5);
Mat kernely = (Mat_<float>(3, 1) << -0.5, 0, 0.5);

filter2D(img_gray, gradx, -1, kernelx);
filter2D(img_gray, grady, -1, kernely);

//Edge Energy
eedge = -1 * (gradx.mul(gradx) + grady.mul(grady));

//Termination Energy Convolution
Mat cx, cy, cxx, cyy, cxy, eterm, cxm1, den, cxcx, cxcxm1, cxcxcy, cxcycxy, cycycxx;
filter2D(img_gray, cx, bitdepth, m1);
filter2D(img_gray, cy, bitdepth, m2);
filter2D(img_gray, cxx, bitdepth, m3);
filter2D(img_gray, cyy, bitdepth, m4);
filter2D(img_gray, cxy, bitdepth, m5);

//element wise operations to find Eterm
cxcx = cx.mul(cx);
cxcx.convertTo(cxcxm1, -1, 1, 1);
den = cxcxm1 + cy.mul(cy);
cv::pow(den, 1.5, den);
cxcxcy = cxcx.mul(cy);
cxcycxy = cx.mul(cy);
cxcycxy = cxcycxy.mul(cxy);
cycycxx = cy.mul(cy);
cycycxx = cycycxx.mul(cxx);
eterm = (cxcxcy - 2 * cxcycxy + cycycxx);
cv::divide(eterm,den,eterm,-1);

//Image energy
Mat eext;
eext = wl*eline + we*eedge + wt*eterm;
return eext;}

有人知道可能出了什么问题吗?

最佳答案

根据 David Doria 的要求,这是函数 get_eext 经过一些修正后的最终版本。这个版本对我来说效果很好。

Mat config_eext(float wl, float we, float wt, Mat image)
{
Mat eline, gradx, grady, img_gray, eedge;

//bitdepth defined as CV_32F
image.convertTo(img_gray, bitdepth);

//Convolution Kernels
Mat m1, m2, m3, m4, m5;
m1 = (Mat_<float>(1, 2) << 1, -1);
m2 = (Mat_<float>(2, 1) << 1, -1);
m3 = (Mat_<float>(1, 3) << 1, -2, 1);
m4 = (Mat_<float>(3, 1) << 1, -2, 1);
m5 = (Mat_<float>(2, 2) << 1, -1, -1, 1);

img_gray.copyTo(eline);

//Kernels de gradiente
Mat kernelx = (Mat_<float>(1, 3) << -1, 0, 1);
Mat kernely = (Mat_<float>(3, 1) << -1, 0, 1);

//Gradiente em x e em y
filter2D(img_gray, gradx, -1, kernelx);
filter2D(img_gray, grady, -1, kernely);

//Edge Energy como definido por Kass
eedge = -1 * (gradx.mul(gradx) + grady.mul(grady));

//Termination Energy Convolution
Mat cx, cy, cxx, cyy, cxy, eterm(img_gray.rows, img_gray.cols, bitdepth), cxm1, den, cxcx, cxcxm1, cxcxcy, cxcycxy, cycycxx;
filter2D(img_gray, cx, bitdepth, m1);
filter2D(img_gray, cy, bitdepth, m2);
filter2D(img_gray, cxx, bitdepth, m3);
filter2D(img_gray, cyy, bitdepth, m4);
filter2D(img_gray, cxy, bitdepth, m5);

//element wise operations to find Eterm
cxcx = cx.mul(cx);
cxcx.convertTo(cxcxm1, -1, 1, 1);
den = cxcxm1 + cy.mul(cy);
cv::pow(den, 1.5, den);
cxcxcy = cxcx.mul(cy);
cxcycxy = cx.mul(cy);
cxcycxy = cxcycxy.mul(cxy);
cycycxx = cy.mul(cy);
cycycxx = cycycxx.mul(cxx);
eterm = (cxcxcy - 2 * cxcycxy + cycycxx);
cv::divide(eterm, den, eterm, -1);

//Image energy
Mat eext;
eext = wl*eline + we*eedge + wt*eterm;
return eext;
}

希望对你有帮助!

关于c++ - OpenCV 3.0 中的主动轮廓模型,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32894542/

相关文章:

c++ - g++ printf 的特殊处理

c++ - 确定一个 vector 是否是另一个 vector 的子集的有效方法?

c++ - 为什么空IP地址将计算机名解析为204.204.204.204?

python - 使用由少数元素构成的向量来平滑曲线?

linux - 通过 nohup 终止 matlab 运行并访问工作区变量

python - 如何在 python 包装中使用 unicode 字符串为 c++ 类使用 cython?

c++ - 在 C++ 代码中调用 Matlab - 使用 engine.h 中的方法

python - OpenCV Python 中点的 warpPerspective 逆变换

c++ - 在对角正方形中裁剪图像

android - 将相机预览旋转到Portrait Android OpenCV Camera和Xamarin