又到一学期期末课程设计周
计科小学生又来发布课程设计

本次是高级程序设计
用MFC构建界面,调用opencv动态链接库,再加上openmp实现并行计算。
看起来高大上很多。

提示

如果要使用openmp要在配置中打开

opencv 合并相邻的线_openmp

业务流程大致如此:

opencv 合并相邻的线_opencv 合并相邻的线_02


高斯滤波:

opencv 合并相邻的线_openmp_03


伪色彩增强:

opencv 合并相邻的线_openmp_04


在进行线性增强和灰度处理时可以选择多线程处理来缩减响应时间

,分别以2线程和8线程展示:

opencv 合并相邻的线_opencv_05


opencv 合并相邻的线_openmp_06


代码部分细节:

点击打开图片按钮响应对应的代码细节展示:
点击“确定”按钮后获取文件路径到strPathName,并将文件路径显示到编辑框

if (dlg.DoModal() == IDOK) 
	{
		strPathName = dlg.GetPathName();
		m_file.SetWindowText(strPathName); 
	}

使用StretchDIBits函数显示图片

StretchDIBits(GetDlgItem(IDC_STATIC)->GetDC()->GetSafeHdc()
			, 0, 0, std::min(rect.Width(), rect.Height()), std::min(rect.Width(), rect.Height())
			, 0, 0, cvImgTmp.cols, cvImgTmp.rows,
			cvImgTmp.data, &bitmapInfo, DIB_RGB_COLORS, SRCCOPY);

声明图片大小

bmiHeader = &bitmapInfo.bmiHeader;
		bmiHeader->biSize = sizeof(BITMAPINFOHEADER);
		bmiHeader->biWidth = cvImgTmp.cols;
		bmiHeader->biHeight = -cvImgTmp.rows;
		bmiHeader->biPlanes = 1;
		bmiHeader->biBitCount = 24;
		bmiHeader->biCompression = BI_RGB;
		bitmapInfo.bmiHeader.biSizeImage = 0;
		bmiHeader->biXPelsPerMeter = GetSystemMetrics(SM_CXSCREEN);
		bmiHeader->biYPelsPerMeter = GetSystemMetrics(SM_CYSCREEN);
		bmiHeader->biClrUsed = 0;
		bmiHeader->biClrImportant = 0;

使用IsIconic函数来判断当前界面是否已经有图像打开
若没有则按照路径打开图片,若已存在图片
使用CPaintDC函数对当前图层关闭,然后再显示选中图片

if (IsIconic())//IsIconic()作用是判断窗口是否处于最小化状态(点击了最小化按钮之后
	{
		CPaintDC dc(this); // device context for painting

		SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);//窗口调用窗口程序

		// Center icon in client rectangle
		int cxIcon = GetSystemMetrics(SM_CXICON);//窗体显示区域的宽度和高度、滚动条的宽度和高度
		int cyIcon = GetSystemMetrics(SM_CYICON);
		CRect rect;
		GetClientRect(&rect);//GetClientRect用于取得指定窗口的客户区域大小
		int x = (rect.Width() - cxIcon + 1) / 2;
		int y = (rect.Height() - cyIcon + 1) / 2;

		// Draw the icon
		dc.DrawIcon(x, y, m_hIcon);
	}

对图片进行高斯滤波处理

if (str == "")
	{
		AfxMessageBox(_T("请先打开图片!"));
		return;
	}
	Mat image = imread(str, 1);
	Mat out;
	//进行滤波操作
	GaussianBlur(image, out, Size(9, 9), 10, 0);//X方向上的高斯核标准偏差;
	imwrite("高斯滤波.jpg", image);
	waitKey();

点击伪色彩增强

if (str == "")
	{
		AfxMessageBox(_T("请先打开图片!"));
		return;
	}


	Mat im_gray = imread(str, 1);
	Mat im_color;
	applyColorMap(im_gray, im_color, COLORMAP_JET);
	imwrite("伪彩色增强.jpg", im_color);
	waitKey(0);

对图像进行腐蚀操作

if (str == "")
	{
		AfxMessageBox(_T("请先打开图片!"));
		return;
	}
	Mat src = imread(str, 1);
	Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
	Mat dsImage;
	erode(src, dsImage, element);
	imwrite("腐蚀操作.jpg", dsImage);
	waitKey(0);

对图片进行线性增强
分别使用三个通道存储之前的图像RGB色彩信息,然后使用saturate_cast函数对图片进行线性处理,如果图片位灰度图片则只使用一个变量来存储色阶信息。

if (str == "")
	{
		AfxMessageBox(_T("请先打开图片!"));
		return;
	}
	Mat src1, dst;
	src1 = imread(str,1);
	double alpha = 1.2, beta = 50;
	dst = Mat::zeros(src1.size(), src1.type());
	Mat src2 = src1, dst1 = dst;
	float begintime = omp_get_wtime();
	for (int row = 0; row < src1.rows; row++)
	{
		for (int col = 0; col < src1.cols; col++)
		{
			if (src1.channels() == 3)
			{
				int b = src1.at<Vec3b>(row, col)[0];
				int g = src1.at<Vec3b>(row, col)[1];
				int r = src1.at<Vec3b>(row, col)[2];

				dst.at<Vec3b>(row, col)[0] = saturate_cast<uchar>(b*alpha + beta);
				dst.at<Vec3b>(row, col)[1] = saturate_cast<uchar>(g*alpha + beta);
				dst.at<Vec3b>(row, col)[2] = saturate_cast<uchar>(r*alpha + beta);
			}
			else if (src1.channels() == 1)
			{
				float v = src1.at<uchar>(row, col);
				dst.at<uchar>(row, col) = saturate_cast<uchar>(v*alpha + beta);
			}
		}
	}
	float endtime = omp_get_wtime();
	float before = endtime - begintime;

使用多线程加速处理。
使用omp parallel for num_threads(n)语句来进行多线程加速,n为输入的线程数,最后减去初始的时间得到多线程加速后节约的时间。

begintime = omp_get_wtime();
#pragma omp parallel for num_threads(n)
	for (int row = 0; row < src2.rows; row++)
	{
		for (int col = 0; col < src2.cols; col++)
		{
			if (src2.channels() == 3)
			{
				int b = src2.at<Vec3b>(row, col)[0];
				int g = src2.at<Vec3b>(row, col)[1];
				int r = src2.at<Vec3b>(row, col)[2];

				dst1.at<Vec3b>(row, col)[0] = saturate_cast<uchar>(b*alpha + beta);
				dst1.at<Vec3b>(row, col)[1] = saturate_cast<uchar>(g*alpha + beta);
				dst1.at<Vec3b>(row, col)[2] = saturate_cast<uchar>(r*alpha + beta);
			}
			else if (src2.channels() == 1)
			{
				float v = src2.at<uchar>(row, col);
				dst1.at<uchar>(row, col) = saturate_cast<uchar>(v*alpha + beta);
			}
		}
	}
	endtime = omp_get_wtime();
	float after = endtime - begintime;
	float gap = before - after;

对图片进行灰度处理
使用max函数将图像原本的rgb信息压缩成一个通道

dst.create(src.size(), src.type());
	Mat src1 = src, dst1 = dst;
	float begintime = omp_get_wtime();
	for (int row = 0; row < src.rows; row++)

	{
		for (int col = 0; col < src.cols; col++)

		{

			int b = src.at<Vec3b>(row, col)[0];

			int g = src.at<Vec3b>(row, col)[1];

			int r = src.at<Vec3b>(row, col)[2];

			dst.at<Vec3b>(row, col)[0] = max(r, max(g, b));

			dst.at<Vec3b>(row, col)[1] = max(r, max(g, b));

			dst.at<Vec3b>(row, col)[2] = max(r, max(g, b));
		}
	}
	float endtime = omp_get_wtime();
	float before = endtime - begintime;

使用omp parallel for num_threads(n)进行多线程加速处理

begintime = omp_get_wtime();
#pragma omp parallel for num_threads(n)
	for (int row = 0; row < src1.rows; row++)

	{
		for (int col = 0; col < src1.cols; col++)

		{

			int b = src1.at<Vec3b>(row, col)[0];

			int g = src1.at<Vec3b>(row, col)[1];

			int r = src1.at<Vec3b>(row, col)[2];

			dst1.at<Vec3b>(row, col)[0] = max(r, max(g, b));

			dst1.at<Vec3b>(row, col)[1] = max(r, max(g, b));

			dst1.at<Vec3b>(row, col)[2] = max(r, max(g, b));
		}
	}
	endtime = omp_get_wtime();
	float after = endtime - begintime;
	float gap = before - after;

以上就是整体框架了