相机标定的三个坐标系变化看得比较吃力(线性代数都还回去了)。
历时3个礼拜,断断续续今天终于算是成功跑出程序,做个小结。
一、坐标系的理解
还是有必要理解一下这个坐标系的关系;一共四个:分别是世界坐标系、像素坐标系、图像物理坐标系、相机坐标系;
首先要清楚三个坐标系分别是什么:看下图:
uv是像素坐标系,xy是图像物理坐标系:二者的关系有两个:xy的原点记为(U0、V0);像素大小转换关系:后面的表达式
xwywzw是世界坐标系,xcyc是相机坐标系(可以认为就是相机的透镜,反映了相机相对于世界坐标系的位置和角度)
ok! 为什么要知道这些坐标系呢?因为给一张图片,我们可以知道一张图片上的标记点在世界坐标系和像素坐标系下的坐标!然后他们两个本身存在一个矩阵转换关系,可以求得矩阵,这个矩阵就是相机的内参,进而可以进行畸变矫正。
这样我们用来测量长度的照片测出来的尺寸才会比较准确!
那这个矩阵长什么样子?(治好了多年的颈椎病)
二、标定开始
1、先打印一张棋盘
2、各种角度拍7张照片
3、量好小方块的实际尺寸mm--然后量整个棋盘的大小
代码如下:我什么时候可以完整写出这么牛逼的代码;
#include <opencv2/opencv.hpp>
#include <highgui.h>
#include <cxcore.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int argc, char* argv[]){
int cube_length = 13;//实际小方块大小
int cam_Dx =182; //整个棋盘横轴方向长度
int cam_Dy = 182; //整个棋盘纵轴方向长度
int number_image = 7;//照片数量
int a = 1;
int number_image_copy = 7;
CvSize board_size = cvSize(13, 13);
int board_width = board_size.width;
int board_height = board_size.height;
int total_per_image = board_width*board_height;
//定义点类
CvPoint2D32f * image_points_buf = new CvPoint2D32f[total_per_image];
//定义空矩阵
CvMat * image_points = cvCreateMat(number_image*total_per_image, 2, CV_32FC1);//像素坐标系
CvMat * object_points = cvCreateMat(number_image*total_per_image, 3, CV_32FC1);//世界坐标系
CvMat * point_counts = cvCreateMat(number_image, 1, CV_32SC1); //角点存放位置
CvMat * intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1); //内参数矩阵
CvMat * distortion_coeffs = cvCreateMat(4, 1, CV_32FC1); //畸变系数向量
char picName[7][10] = { "1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg" };
IplImage * show;
int count;
int found;
int step;
int successes = 0;
while (a <= number_image_copy){
show = cvLoadImage(picName[a - 1], -1);
//寻找角点函数
found = cvFindChessboardCorners(show, board_size, image_points_buf, &count,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if (found == 0){
cout << "第" << a << "帧图片无法找到棋盘格所有角点!\n\n";
cvNamedWindow("RePlay", 1);
cvShowImage("RePlay", show);
cvWaitKey(0);
}
else{
cout << "第" << a << "帧图像成功获得" << count << "个角点...\n";
IplImage * gray_image = cvCreateImage(cvGetSize(show), 8, 1);
cvCvtColor(show, gray_image, CV_BGR2GRAY);
cout << "获取源图像灰度图过程完成...\n";
//要先灰度化才可以,使用FindCornerSubPix
//这一步是为了更加精确确定像素坐标,像素坐标放在了image_points_buf,十分关键
cvFindCornerSubPix(gray_image, image_points_buf, count, cvSize(11, 11), cvSize(-1, -1),
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
cout << "灰度图亚像素化过程完成...\n";
cvDrawChessboardCorners(show, board_size, image_points_buf, count, found);
cout << "在源图像上绘制角点过程完成...\n\n";
}
if (total_per_image == count){
step = successes*total_per_image;
for (int i = step, j = 0; j<total_per_image; ++i, ++j){
//写入矩阵,image_points是像素坐标
CV_MAT_ELEM(*image_points, float, i, 0) = image_points_buf[j].x;
CV_MAT_ELEM(*image_points, float, i, 1) = image_points_buf[j].y;
//写入矩阵,object_points是世界系坐标--你要明白cam—dx实际上等于cube——length*横坐标方向有多少个方块。
CV_MAT_ELEM(*object_points, float, i, 0) = (float)((j / cube_length) * cam_Dx);
CV_MAT_ELEM(*object_points, float, i, 1) = (float)((j%cube_length) * cam_Dy);
CV_MAT_ELEM(*object_points, float, i, 2) = 0.0f;
}
CV_MAT_ELEM(*point_counts, int, successes, 0) = total_per_image;
successes++;
}
a++;
}
cout << "*********************************************\n";
cout << number_image << "帧图片中,标定成功的图片为" << successes << "帧...\n";
cout << number_image << "帧图片中,标定失败的图片为" << number_image - successes << "帧...\n\n";
cout << "*********************************************\n\n";
IplImage * show_colie;
show_colie = show;
CvMat * object_points2 = cvCreateMat(successes*total_per_image, 3, CV_32FC1);
CvMat * image_points2 = cvCreateMat(successes*total_per_image, 2, CV_32FC1);
CvMat * point_counts2 = cvCreateMat(successes, 1, CV_32SC1);
for (int i = 0; i<successes*total_per_image; ++i){
CV_MAT_ELEM(*image_points2, float, i, 0) = CV_MAT_ELEM(*image_points, float, i, 0);//用来存储角点提取成功的图像的角点
CV_MAT_ELEM(*image_points2, float, i, 1) = CV_MAT_ELEM(*image_points, float, i, 1);
CV_MAT_ELEM(*object_points2, float, i, 0) = CV_MAT_ELEM(*object_points, float, i, 0);
CV_MAT_ELEM(*object_points2, float, i, 1) = CV_MAT_ELEM(*object_points, float, i, 1);
CV_MAT_ELEM(*object_points2, float, i, 2) = CV_MAT_ELEM(*object_points, float, i, 2);
}
for (int i = 0; i<successes; ++i){
CV_MAT_ELEM(*point_counts2, int, i, 0) = CV_MAT_ELEM(*point_counts, int, i, 0);
}
cvReleaseMat(&object_points);
cvReleaseMat(&image_points);
cvReleaseMat(&point_counts);
CV_MAT_ELEM(*intrinsic_matrix, float, 0, 0) = 1.0f;
CV_MAT_ELEM(*intrinsic_matrix, float, 1, 1) = 1.0f;
//用来计算内参和基表矩阵的!
cvCalibrateCamera2(object_points2, image_points2, point_counts2, cvGetSize(show_colie),
intrinsic_matrix, distortion_coeffs, NULL, NULL, 0);
//输出文本
cvSave("Intrinsics.xml", intrinsic_matrix);
cvSave("Distortion.xml", distortion_coeffs);
cout << "摄像机矩阵、畸变系数向量已经分别存储在名为Intrinsics.xml、Distortion.xml文档中\n\n";
//用矩阵读取文本的值
CvMat * intrinsic = (CvMat *)cvLoad("Intrinsics.xml");
CvMat * distortion = (CvMat *)cvLoad("Distortion.xml");
//存在在iplimage中
IplImage * mapx = cvCreateImage(cvGetSize(show_colie), IPL_DEPTH_32F, 1);
IplImage * mapy = cvCreateImage(cvGetSize(show_colie), IPL_DEPTH_32F, 1);
//
cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
cvNamedWindow("原始图像", 1);
cvNamedWindow("非畸变图像", 1);
//自己重新拍一张照片命名为8.jpg
show_colie = cvLoadImage("8.jpg");
IplImage * clone = cvCloneImage(show_colie);
cvShowImage("原始图像", show_colie);
//这个就是用来校准的函数
cvRemap(clone, show_colie, mapx, mapy);
cvReleaseImage(&clone);
cvShowImage("非畸变图像", show_colie);
cvWaitKey(0);
return 0;
}
ok!在程序后面这样我们得到了MAPX,MAPY两个矩阵,这就是用来校准相片的参数,以后直接调用就ok了;
PS:发现自己C++基础很薄弱啊!在考虑要认真学一遍。C和C++还是不太一样的。