由于之前老师一直让我用我们的到的图像深度信息进行虚化,如果深度信息得到的很准确,这的确不是一件难事,只是目前我对那一套计算体系掌握的不够。
假设我们手上有一副已经获取的深度图像,现在我的手上有两份,DFD以及双目(双目的后面我会放上)DfD的那个项目不便于公开,抱歉。
我们得到的原图如下:
这次换了以下场景拍的,依然是液晶透镜拍摄得到的像
然后还有一幅深度图。
第一步:
我们利用深度图进行一个图像分割
代码如下:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat SourceImage = imread("C:\\Users\\ltc\\Desktop\\data3\\taowamirror.jpg");
Mat SourceImage_ROI = SourceImage(cv::Rect(0,342,2592,1260));//这里截断只是单纯为了显示比例好看
Mat Image_forward=imread("C:\\Users\\ltc\\Desktop\\data3\\taowaforward2.jpg");
Mat Image_forward_ROI = Image_forward(cv::Rect(0,342,2592,1260));//同上
Mat Image_Middle = Mat::zeros(SourceImage_ROI.size(),CV_8UC3);
Mat Image_Back = Mat::zeros(SourceImage_ROI.size(),CV_8UC3);
Mat ImageSubtract;
ImageSubtract = SourceImage_ROI-Image_forward_ROI;
namedWindow("SourceImage_ROI",WINDOW_NORMAL);
imshow("SourceImage_ROI",SourceImage_ROI);
namedWindow("ImageSubtract",WINDOW_NORMAL);
imshow("ImageSubtract",ImageSubtract);
cv::Mat depthImage = cv::imread("C:\\Users\\ltc\\Desktop\\data3\\m_depth.bmp",0);
Mat depthImageBlur;
blur(depthImage,depthImageBlur,Size(11,11),Point(-1,-1));
namedWindow("depthImageBlur",WINDOW_NORMAL);
imshow("depthImageBlur",depthImageBlur);
imwrite("C:\\Users\\ltc\\Desktop\\data3\\depthImageBlur.bmp",depthImageBlur);
int n =11;
int nr = depthImageBlur.rows;
int nc = depthImageBlur.cols;
for(int i = (n - 1) / 2;i < nr-(n-1)/2;i++)
{
uchar* data = depthImageBlur.ptr<uchar>(i);
for (int j = (n-1)/2; j < nc-(n-1)/2; j++)
{
double sum = 0;
for (int k = i - (n - 1) / 2; k <= i + (n - 1) / 2; k++)
for (int l = j - (n - 1) / 2; l <= j + (n - 1) / 2; l++)
{
sum += data[l];
}
sum = sum/(n*n);
if(sum <105)
{
/*if(data[j]>60 && data[j]<120)
{*/
for (int m = i - (n - 1) / 2; m <= i + (n - 1) / 2; m++)
for (int v = j - (n - 1) / 2; v <= j + (n - 1) / 2; v++)
{
Image_Middle.at<Vec3b>(m,v)[0] = ImageSubtract.at<Vec3b>(m,v)[0];
Image_Middle.at<Vec3b>(m,v)[1] = ImageSubtract.at<Vec3b>(m,v)[1];
Image_Middle.at<Vec3b>(m,v)[2] = ImageSubtract.at<Vec3b>(m,v)[2];
}
}
}
}
Image_Back = ImageSubtract-Image_Middle;
imwrite("C:\\Users\\ltc\\Desktop\\data3\\Image_Back.jpg",Image_Back);
imwrite("C:\\Users\\ltc\\Desktop\\data3\\Image_Middle.jpg",Image_Middle);
waitKey();
return 0;
}
由于我现在的深度图只在中间三个娃娃和背景那儿好分开,并且采取的是一个核内求均值判断的方法,这样就可以把背景和三个娃娃分开。
Imageforward_ROI如下:
m_depth如下:
这张深度图是有问题的,但是不影响我们继续下去。
Image_Subcrat如下:
最后我们得到的图片Image_Back如下:
Image_Middle如下:
怎么让这样一张图能够生成很好的掩模呢?
第二步:
先进行一个二值化:
我是写了一个滑动条来操作:
//滑动条二值化
//ltc-需要使用时放开分割线中间代码即可
//------------------------------------------------------------
#include<opencv2/opencv.hpp>
#include<iostream>
using namespace cv;
using namespace std;
Mat dst;
int value=20;
void onChange(int,void* param){
Mat src=*(Mat*)param;
threshold(src,dst,value,255,THRESH_BINARY);//二值化
//Canny(src,dst,value,255);//canny边缘检测
imshow("TrackBar",dst);
//imwrite("C:\\Users\\ltc\\Desktop\\data3\\mission2\\taowaforward2_hbROI.jpg",dst);
}
void main(){
Mat src=imread("C:\\Users\\ltc\\Desktop\\data3\\mission2\\taowamirror.jpg",0);//读入灰度图
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
namedWindow("TrackBar",CV_WINDOW_NORMAL);
createTrackbar("Threshold","TrackBar",&value,255,onChange,&src);
threshold(src,dst,value,255,THRESH_BINARY);
//Canny(src,dst,value,255);
imshow("TrackBar",dst);// 21/22/23这三行代码如果去掉的话,在未调整滑动条的情况下,不会有画面显示
waitKey(0);
}
当然你也可以智能的使用大津法等等
threshold(frame_gray, result, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);//大津法
这是opencv已经封装好的
当然也可以自己实现,我之前弄的一时找不到了
这是一个自己实现的版本,原理网上可以找到很多
#include <opencv2/opencv.hpp>
#include <cv.h>
#include <highgui.h>
#include <cxcore.h>
using namespace std;
using namespace cv;
Mat otsuGray(const Mat src) {
Mat img = src;
int c = img.cols; //图像列数
int r = img.rows; //图像行数
int T = 0; //阈值
uchar* data = img.data; //数据指针
int ftNum = 0; //前景像素个数
int bgNum = 0; //背景像素个数
int N = c*r; //总像素个数
int ftSum = 0; //前景总灰度值
int bgSum = 0; //背景总灰度值
int graySum = 0;
double w0 = 0; //前景像素个数占比
double w1 = 0; //背景像素个数占比
double u0 = 0; //前景平均灰度
double u1 = 0; //背景平均灰度
double Histogram[256] = {0}; //灰度直方图
double temp = 0; //临时类间方差
double g = 0; //类间方差
//灰度直方图
for(int i = 0; i < r ; i ++) {
for(int j = 0; j <c; j ++) {
Histogram[img.at<uchar>(i,j)]++;
}
}
//求总灰度值
for(int i = 0; i < 256; i ++) {
graySum += Histogram[i]*i;
}
for(int i = 0; i < 256; i ++) {
ftNum += Histogram[i]; //阈值为i时前景个数
bgNum = N - ftNum; //阈值为i时背景个数
w0 = (double)ftNum/N; //前景像素占总数比
w1 = (double)bgNum/N; //背景像素占总数比
if(ftNum == 0) continue;
if(bgNum == 0) break;
//前景平均灰度
ftSum += i*Histogram[i];
u0 = ftSum/ftNum;
//背景平均灰度
bgSum = graySum - ftSum;
u1 = bgSum/bgNum;
g = w0*w1*(u0-u1)*(u0-u1);
if(g > temp) {
temp = g;
T = i;
}
}
for(int i=0; i<img.rows; i++)
{
for(int j=0; j<img.cols; j++)
{
if((int)img.at<uchar>(i,j)>T)
img.at<uchar>(i,j) = 255;
else
img.at<uchar>(i,j) = 0;
}
}
return img;
}
好了,最后得到的效果如下:
第三步:
接下来要做的就是对二值图像的空洞的填充和去除小区域。
对于二值化图像,去除孔洞时采用的方法实际上与去除小区域相同,因此完全可以用同一个函数进行。
这两个功能可以采取区域生长法来实现。须注意,去除小区域时为保存有用信息,可采用8邻域探测,去除孔洞时则4邻域即可,否则容易泄露,出现靠边缘的孔洞未去除的情况。
这一步完整代码如下:
#include <cv.h>
#include <highgui.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
void RemoveSmallRegion(Mat& Src, Mat& Dst, int AreaLimit=50, int CheckMode=1, int NeihborMode=0);
int main()
{
double t = (double)getTickCount();
char* imagePath = "C:\\Users\\ltc\\Desktop\\data3\\Image_Middle.jpg";
char* OutPath = "C:\\Users\\ltc\\Desktop\\data3\\处理结果.jpg";
Mat Src = imread(imagePath, CV_LOAD_IMAGE_GRAYSCALE);
Mat Dst = Mat::zeros(Src.size(), CV_8UC1);
//二值化处理
for(int i = 0; i < Src.rows; ++i)
{
uchar* iData = Src.ptr<uchar>(i);
for(int j = 0; j < Src.cols; ++j)
{
if(iData[j] == 0 || iData[j]==255) continue;
else if (iData[j] < 10)
{
iData[j] = 0;
//cout<<'#';
}
else if (iData[j] > 10)
{
iData[j] = 255;
//cout<<'!';
}
}
}
cout<<"Image Binary processed."<<endl;
RemoveSmallRegion(Src, Dst, 12000, 1, 1);
RemoveSmallRegion(Dst, Dst, 12000, 0, 0);
cout<<"Done!"<<endl;
imwrite(OutPath, Dst);
t = ((double)getTickCount() - t)/getTickFrequency();
cout<<"Time cost: "<<t<<" sec."<<endl;
return 0;
}
//CheckMode: 0代表去除黑区域,1代表去除白区域; NeihborMode:0代表4邻域,1代表8邻域;
void RemoveSmallRegion(Mat& Src, Mat& Dst, int AreaLimit, int CheckMode, int NeihborMode)
{
int RemoveCount=0; //记录除去的个数
//记录每个像素点检验状态的标签,0代表未检查,1代表正在检查,2代表检查不合格(需要反转颜色),3代表检查合格或不需检查
Mat Pointlabel = Mat::zeros( Src.size(), CV_8UC1 );
if(CheckMode==1)
{
cout<<"Mode: 去除小区域. ";
for(int i = 0; i < Src.rows; ++i)
{
uchar* iData = Src.ptr<uchar>(i);
uchar* iLabel = Pointlabel.ptr<uchar>(i);
for(int j = 0; j < Src.cols; ++j)
{
if (iData[j] < 10)
{
iLabel[j] = 3;
}
}
}
}
else
{
cout<<"Mode: 去除孔洞. ";
for(int i = 0; i < Src.rows; ++i)
{
uchar* iData = Src.ptr<uchar>(i);
uchar* iLabel = Pointlabel.ptr<uchar>(i);
for(int j = 0; j < Src.cols; ++j)
{
if (iData[j] > 10)
{
iLabel[j] = 3;
}
}
}
}
vector<Point2i> NeihborPos; //记录邻域点位置
NeihborPos.push_back(Point2i(-1, 0));
NeihborPos.push_back(Point2i(1, 0));
NeihborPos.push_back(Point2i(0, -1));
NeihborPos.push_back(Point2i(0, 1));
if (NeihborMode==1)
{
cout<<"Neighbor mode: 8邻域."<<endl;
NeihborPos.push_back(Point2i(-1, -1));
NeihborPos.push_back(Point2i(-1, 1));
NeihborPos.push_back(Point2i(1, -1));
NeihborPos.push_back(Point2i(1, 1));
}
else cout<<"Neighbor mode: 4邻域."<<endl;
int NeihborCount=4+4*NeihborMode;
int CurrX=0, CurrY=0;
//开始检测
for(int i = 0; i < Src.rows; ++i)
{
uchar* iLabel = Pointlabel.ptr<uchar>(i);
for(int j = 0; j < Src.cols; ++j)
{
if (iLabel[j] == 0)
{
//********开始该点处的检查**********
vector<Point2i> GrowBuffer; //堆栈,用于存储生长点
GrowBuffer.push_back( Point2i(j, i) );
Pointlabel.at<uchar>(i, j)=1;
int CheckResult=0; //用于判断结果(是否超出大小),0为未超出,1为超出
for ( int z=0; z<GrowBuffer.size(); z++ )
{
for (int q=0; q<NeihborCount; q++) //检查四个邻域点
{
CurrX=GrowBuffer.at(z).x+NeihborPos.at(q).x;
CurrY=GrowBuffer.at(z).y+NeihborPos.at(q).y;
if (CurrX>=0&&CurrX<Src.cols&&CurrY>=0&&CurrY<Src.rows) //防止越界
{
if ( Pointlabel.at<uchar>(CurrY, CurrX)==0 )
{
GrowBuffer.push_back( Point2i(CurrX, CurrY) ); //邻域点加入buffer
Pointlabel.at<uchar>(CurrY, CurrX)=1; //更新邻域点的检查标签,避免重复检查
}
}
}
}
if (GrowBuffer.size()>AreaLimit) CheckResult=2; //判断结果(是否超出限定的大小),1为未超出,2为超出
else {CheckResult=1; RemoveCount++;}
for (int z=0; z<GrowBuffer.size(); z++) //更新Label记录
{
CurrX=GrowBuffer.at(z).x;
CurrY=GrowBuffer.at(z).y;
Pointlabel.at<uchar>(CurrY, CurrX) += CheckResult;
}
//********结束该点处的检查**********
}
}
}
CheckMode=255*(1-CheckMode);
//开始反转面积过小的区域
for(int i = 0; i < Src.rows; ++i)
{
uchar* iData = Src.ptr<uchar>(i);
uchar* iDstData = Dst.ptr<uchar>(i);
uchar* iLabel = Pointlabel.ptr<uchar>(i);
for(int j = 0; j < Src.cols; ++j)
{
if (iLabel[j] == 2)
{
iDstData[j] = CheckMode;
}
else if(iLabel[j] == 3)
{
iDstData[j] = iData[j];
}
}
}
cout<<RemoveCount<<" objects removed."<<endl;
}
得到结果如下:
感觉还行,边上是联通的很难处理
第四步:
还是把边缘平滑了一下以及突兀消除:
这一步完全代码如下:
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
#include <cv.h>
#include <highgui.h>
using namespace cv;
using namespace std;
//去除二值图像边缘的突出部
//uthreshold、vthreshold分别表示突出部的宽度阈值和高度阈值
//type代表突出部的颜色,0表示黑色,1代表白色
void delete_jut(Mat& src, Mat& dst, int uthreshold, int vthreshold, int type)
{
int threshold;
src.copyTo(dst);
int height = dst.rows;
int width = dst.cols;
int k; //用于循环计数传递到外部
for (int i = 0; i < height - 1; i++)
{
uchar* p = dst.ptr<uchar>(i);
for (int j = 0; j < width - 1; j++)
{
if (type == 0)
{
//行消除
if (p[j] == 255 && p[j + 1] == 0)
{
if (j + uthreshold >= width)
{
for (int k = j + 1; k < width; k++)
p[k] = 255;
}
else
{
for (k = j + 2; k <= j + uthreshold; k++)
{
if (p[k] == 255) break;
}
if (p[k] == 255)
{
for (int h = j + 1; h < k; h++)
p[h] = 255;
}
}
}
//列消除
if (p[j] == 255 && p[j + width] == 0)
{
if (i + vthreshold >= height)
{
for (k = j + width; k < j + (height - i)*width; k += width)
p[k] = 255;
}
else
{
for (k = j + 2 * width; k <= j + vthreshold*width; k += width)
{
if (p[k] == 255) break;
}
if (p[k] == 255)
{
for (int h = j + width; h < k; h += width)
p[h] = 255;
}
}
}
}
else //type = 1
{
//行消除
if (p[j] == 0 && p[j + 1] == 255)
{
if (j + uthreshold >= width)
{
for (int k = j + 1; k < width; k++)
p[k] = 0;
}
else
{
for (k = j + 2; k <= j + uthreshold; k++)
{
if (p[k] == 0) break;
}
if (p[k] == 0)
{
for (int h = j + 1; h < k; h++)
p[h] = 0;
}
}
}
//列消除
if (p[j] == 0 && p[j + width] == 255)
{
if (i + vthreshold >= height)
{
for (k = j + width; k < j + (height - i)*width; k += width)
p[k] = 0;
}
else
{
for (k = j + 2 * width; k <= j + vthreshold*width; k += width)
{
if (p[k] == 0) break;
}
if (p[k] == 0)
{
for (int h = j + width; h < k; h += width)
p[h] = 0;
}
}
}
}
}
}
}
//图片边缘光滑处理
//size表示取均值的窗口大小,threshold表示对均值图像进行二值化的阈值
void imageblur(Mat& src, Mat& dst, Size size, int threshold)
{
int height = src.rows;
int width = src.cols;
blur(src, dst, size);
namedWindow("dst",WINDOW_NORMAL);
imshow("dst",dst);
imwrite("C:\\Users\\ltc\\Desktop\\data3\\BinaryImageSmooth.jpg",dst);
/*for (int i = 0; i < height; i++)
{
uchar* p = dst.ptr<uchar>(i);
for (int j = 0; j < width; j++)
{
if (p[j] < threshold)
p[j] = 0;
else p[j] = 255;
}
}
namedWindow("result",WINDOW_NORMAL);
imshow("result", dst); */
}
int main()
{
Mat sourceImage = imread("C:\\Users\\ltc\\Desktop\\data3\\处理结果.jpg");
if(sourceImage.empty())
{
cout<<"where is your picture"<<endl;
return 0;
}
Mat BinaryImgVerse;
bitwise_not(sourceImage,BinaryImgVerse);
imwrite("C:\\Users\\ltc\\Desktop\\data3\\BinaryImgVerse.jpg",BinaryImgVerse);
namedWindow("sourceImage",WINDOW_NORMAL);
imshow("sourceImage",sourceImage);
namedWindow("BinaryImgVerse",WINDOW_NORMAL);
imshow("BinaryImgVerse",BinaryImgVerse);
Mat BinaryImageCut;
//去除二值图像边缘的突出部
//uthreshold、vthreshold分别表示突出部的宽度阈值和高度阈值
//type代表突出部的颜色,0表示黑色,1代表白色
int uthresholdvalue = 100;
int vthresholdvalue = 100;
int typevalue = 0;
delete_jut(BinaryImgVerse, BinaryImageCut, uthresholdvalue, vthresholdvalue,typevalue);
namedWindow("BinaryImageCut",WINDOW_NORMAL);
imshow("BinaryImageCut",BinaryImageCut);
Mat BinaryImageSmooth;
//图片边缘光滑处理
//size表示取均值的窗口大小,threshold表示对均值图像进行二值化的阈值
int t = 11;
int thresholdvalue = 5;
imageblur(BinaryImageCut, BinaryImageSmooth, Size(2*t+1, 2*t+1), thresholdvalue);
//imwrite("C:\\Users\\ltc\\Desktop\\data3\\BinaryImageSmooth.jpg",BinaryImageSmooth);
Mat imgmeihua = imread("C:\\Users\\ltc\\Desktop\\data3\\处理结果美化.jpg");
Mat imgmeihuaverse;
bitwise_not(imgmeihua,imgmeihuaverse);
imwrite("C:\\Users\\ltc\\Desktop\\data3\\imgmeihuaverse.jpg",imgmeihuaverse);
int size = 31;
Mat imgmeihuaverseBlur;
medianBlur(imgmeihuaverse,imgmeihuaverseBlur,size);
namedWindow("imgmeihuaverseBlur",WINDOW_NORMAL);
imshow("imgmeihuaverseBlur",imgmeihuaverseBlur);
imwrite("C:\\Users\\ltc\\Desktop\\data3\\imgmeihuaverseBlur.jpg",imgmeihuaverseBlur);
waitKey();
return 0;
}
最后处理的效果感觉一般,我还是手动把那个最突兀的去掉了。
效果如下:
最后做个非运算,反下色:
然后边缘可以做个中值滤波平滑一下。
第五步:
我们将得到的掩模与原图像相与得到:
然后进行高斯模糊,这些代码在前篇都有,相信大家也都能写出来。效果如下:
得到中间的三个套娃如下:
进行一个低层次的模糊:
最后三个一相加可以得到:
最终结果如下:
后面我还是用ImageShop边缘突出部分磨皮处理了一下
这就是最后的效果:
后面我觉得还是越少手动越好,老师也觉得还不够好,且听下回处理。