使用opencv对图像进行特征提取工作中经常用到surf与sift等相关特征描述子,该模块在opencv_contrib模块中,使用时候需要注意与对应的opencv版本进行编译。opencv2.x版本与opencv3.x中关于特征子的使用不太一样,这里根据最新的opencv版本来统一进行代码的设计执行。
opencv3.2中SurfFeatureDetector、SurfDescriptorExtractor、BruteForceMatcher这三个的使用方法已经和原先2.4版本前不一样了。使用方法示例如下:
Ptr<SURF> detector = SURF::create(minHessian);
detector->detect(img_1, keypoints_1);
Ptr<SURF> extractor = SURF::create();
extractor->compute(img_1, keypoints_1, descriptors_1);
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");//这里填写使用的匹配方式
matcher->match(descriptors_1, descriptors_2, matches);
图像配准(Image registration)就是将不同时间、不同传感器(成像设备)或不同条件下(天候、照度、摄像位置和角度等)获取的两幅或多幅图像进行匹配、叠加的过程,它已经被广泛地应用于遥感数据分析、计算机视觉、图像处理等领域。
图像配准基本过程:
1、先拍摄两张有相同区域的图片,注意图片尺寸保持一致。
2、分别提取出图像的特征点(如果图像质量很差的话,可能需要先做些预处理操作)。
3、根据图像特征点,对它们做特征点匹配。
4、筛选出比较好的特征匹配点。
5、根据这些特征匹配点计算出畸变仿射矩阵。
6、使用算出来的矩阵进行图像匹配。
以下是opencv2.x的代码(参考链接):
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc,char *argv[])
{
Mat image01=imread(argv[1]);
Mat image02=imread(argv[2]);
imshow("原始测试图像",image01);
imshow("基准图像",image02);
//灰度图转换
Mat image1,image2;
cvtColor(image01,image1,CV_RGB2GRAY);
cvtColor(image02,image2,CV_RGB2GRAY);
//提取特征点
SurfFeatureDetector surfDetector(800); // 海塞矩阵阈值
vector<KeyPoint> keyPoint1,keyPoint2;
surfDetector.detect(image1,keyPoint1);
surfDetector.detect(image2,keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor SurfDescriptor;
Mat imageDesc1,imageDesc2;
SurfDescriptor.compute(image1,keyPoint1,imageDesc1);
SurfDescriptor.compute(image2,keyPoint2,imageDesc2);
//获得匹配特征点,并提取最优配对
FlannBasedMatcher matcher;
vector<DMatch> matchePoints;
matcher.match(imageDesc1,imageDesc2,matchePoints,Mat());
sort(matchePoints.begin(),matchePoints.end()); //特征点排序
//获取排在前N个的最优匹配特征点
vector<Point2f> imagePoints1,imagePoints2;
for(int i=0;i<10;i++)
{
imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
}
//获取图像1到图像2的投影映射矩阵 尺寸为3*3
Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC);
也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
//Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
cout<<"变换矩阵为:\n"<<homo<<endl<<endl; //输出映射矩阵
//图像配准
Mat imageTransform1,imageTransform2;
warpPerspective(image01,imageTransform1,homo,Size(image02.cols,image02.rows));
imshow("经过透视矩阵变换后",imageTransform1);
waitKey();
return 0;
}
本博客使用的opencv3.x版本代码如下:
#include<opencv2/opencv.hpp>
#include<opencv2/xfeatures2d/nonfree.hpp>
//在使用SurfFeatureDetector类时候,opencv3.x新版本需要加相关头文件与命名空间
#include <iostream>
using namespace cv;
using namespace std;
using namespace xfeatures2d;
int main(int argc, char *argv[])
{
Ptr<SurfFeatureDetector> detector = SurfFeatureDetector::create(800);
Mat image01 = imread("1.png");
Mat image02 = imread("2.png");
imshow("原始测试图像", image01);
imshow("基准图像", image02);
//灰度图转换
Mat srcImage1, srcImage2;
cvtColor(image01, srcImage1, CV_RGB2GRAY);
cvtColor(image02, srcImage2, CV_RGB2GRAY);
vector<cv::KeyPoint> key_points_1, key_points_2;
Mat dstImage1, dstImage2;
detector->detectAndCompute(srcImage1, Mat(), key_points_1, dstImage1);
detector->detectAndCompute(srcImage2, Mat(), key_points_2, dstImage2);//可以分成detect和compute
Mat img_keypoints_1, img_keypoints_2;
drawKeypoints(srcImage1, key_points_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
drawKeypoints(srcImage2, key_points_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
vector<DMatch>mach;
matcher->match(dstImage1, dstImage2, mach);
sort(mach.begin(), mach.end()); //特征点排序
double Max_dist = 0;
double Min_dist = 100;
for (int i = 0; i < dstImage1.rows; i++)
{
double dist = mach[i].distance;
if (dist < Min_dist)Min_dist = dist;
if (dist > Max_dist)Max_dist = dist;
}
cout << "最短距离" << Min_dist << endl;
cout << "最长距离" << Max_dist << endl;
vector<DMatch>goodmaches;
for (int i = 0; i < dstImage1.rows; i++)
{
if (mach[i].distance < 2 * Min_dist)
goodmaches.push_back(mach[i]);
}
Mat img_maches;
drawMatches(srcImage1, key_points_1, srcImage2, key_points_2, goodmaches, img_maches);
vector<Point2f> imagePoints1, imagePoints2;
for (int i = 0; i<10; i++)
{
imagePoints1.push_back(key_points_1[mach[i].queryIdx].pt);
imagePoints2.push_back(key_points_2[mach[i].trainIdx].pt);
}
Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
//Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵
//图像配准
Mat imageTransform1, imageTransform2;
warpPerspective(image01, imageTransform1, homo, Size(image02.cols, image02.rows));
imshow("经过透视矩阵变换后", imageTransform1);
waitKey();
return 0;
}