<二>经典例子
这一次这几个例子要我自己一下子写出来应该是不可能的,先主要感受以下OpenCV的一些有趣的功能吧。(溜走
(1)彩色目标跟踪:Camshift
①Cameshift算法:根据鼠标框区域的色度光谱来进行摄像头读入的视频目标追踪。
②代码和示例往往能够更生动形象的进行理解
注:代码来源于OpenCV官方例程,每个用户的下载里面都有,这里的代码文件名为:camshiftdemo.cpp。
#include <opencv2/core/utility.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
Mat image;
bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 256, smin = 30;
// User draws box around object to track. This triggers CAMShift to start tracking
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
selection &= Rect(0, 0, image.cols, image.rows);
}
switch (event)
{
case EVENT_LBUTTONDOWN:
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
break;
case EVENT_LBUTTONUP:
selectObject = false;
if (selection.width > 0 && selection.height > 0)
trackObject = -1; // Set up CAMShift properties in main() loop
break;
}
}
string hot_keys =
"\n\nHot keys: \n"
"\tESC - quit the program\n"
"\tc - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"\tp - pause video\n"
"To initialize tracking, select the object with mouse\n";
static void help()
{
cout << "\nThis is a demo that shows mean-shift based tracking\n"
"You select a color objects such as your face and it tracks it.\n"
"This reads from video camera (0 by default, or the camera number the user enters\n"
"Usage: \n"
" ./camshiftdemo [camera number]\n";
cout << hot_keys;
}
const char* keys =
{
"{help h | | show help message}{@camera_number| 0 | camera number}"
};
int main(int argc, const char** argv)
{
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = { 0,180 };
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
help();
return 0;
}
int camNum = parser.get<int>(0);
cap.open(camNum);
if (!cap.isOpened())
{
help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
parser.printMessage();
return -1;
}
cout << hot_keys;
namedWindow("Histogram", 0);
namedWindow("CamShift Demo", 0);
setMouseCallback("CamShift Demo", onMouse, 0);
createTrackbar("Vmin", "CamShift Demo", &vmin, 256, 0);
createTrackbar("Vmax", "CamShift Demo", &vmax, 256, 0);
createTrackbar("Smin", "CamShift Demo", &smin, 256, 0);
Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
bool paused = false;
for (;;)
{
if (!paused)
{
cap >> frame;
if (frame.empty())
break;
}
frame.copyTo(image);
if (!paused)
{
cvtColor(image, hsv, COLOR_BGR2HSV);
if (trackObject)
{
int _vmin = vmin, _vmax = vmax;
inRange(hsv, Scalar(0, smin, MIN(_vmin, _vmax)),
Scalar(180, 256, MAX(_vmin, _vmax)), mask);
int ch[] = { 0, 0 };
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);
if (trackObject < 0)
{
// Object has been selected by user, set up CAMShift search properties once
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, NORM_MINMAX);
trackWindow = selection;
trackObject = 1; // Don't set up again, unless user selects new ROI
histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for (int i = 0; i < hsize; i++)
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180. / hsize), 255, 255);
cvtColor(buf, buf, COLOR_HSV2BGR);
for (int i = 0; i < hsize; i++)
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows / 255);
rectangle(histimg, Point(i*binW, histimg.rows),
Point((i + 1)*binW, histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -1, 8);
}
}
// Perform CAMShift
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria(TermCriteria::EPS | TermCriteria::COUNT, 10, 1));
if (trackWindow.area() <= 1)
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5) / 6;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}
if (backprojMode)
cvtColor(backproj, image, COLOR_GRAY2BGR);
ellipse(image, trackBox, Scalar(0, 0, 255), 3, LINE_AA);
}
}
else if (trackObject < 0)
paused = false;
if (selectObject && selection.width > 0 && selection.height > 0)
{
Mat roi(image, selection);
bitwise_not(roi, roi);
}
imshow("CamShift Demo", image);
imshow("Histogram", histimg);
char c = (char)waitKey(10);
if (c == 27)
break;
switch (c)
{
case 'b':
backprojMode = !backprojMode;
break;
case 'c':
trackObject = 0;
histimg = Scalar::all(0);
break;
case 'h':
showHist = !showHist;
if (!showHist)
destroyWindow("Histogram");
else
namedWindow("Histogram", 1);
break;
case 'p':
paused = !paused;
break;
default:
;
}
}
return 0;
}
③结论
1.通过我的实践,粗略的了解了Camshift算法的实现功能:用鼠标圈出一个图像,然后会把整个摄像头捕捉到的界面里所有相同颜色的图形圈出来,同时我发现每次界面只有一个圈,如果人为圈的内容有多种颜色,那么会使范围变大很多,色柱也会更多。
2.对于生成的框里面有三个参数的含义的理解:嘤嘤嘤???我还没理解欸(逃
3.这个博客比较适合理解Camshift算法,之后有空可以看看(如有侵权冒犯,请告知,纯学习使用)
(2)光流
①含义:当物体运动时,图像上对应的亮点也在运动,检测图像亮度模式这种表观运动就是光流,它包含了物体的图像运动变化信息。
②代码和图像示例
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
void duan_OpticalFlow(Mat &frame, Mat & result);
bool addNewPoints();
bool acceptTrackedPoint(int i);
Mat curgray; // 当前图片
Mat pregray; // 预测图片
vector<Point2f> point[2]; // point0为特征点的原来位置,point1为特征点的新位置
vector<Point2f> initPoint; // 初始化跟踪点的位置
vector<Point2f> features; // 检测的特征
int maxCount = 500; // 检测的最大特征数
double qLevel = 0.01; // 特征检测的等级
double minDist = 10.0; // 两特征点之间的最小距离
vector<uchar> status; // 跟踪特征的状态,特征的流发现为1,否则为0
vector<float> err;
int main()
{
Mat matSrc;
Mat matRst;
VideoCapture cap(0);
//int totalFrameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);
// perform the tracking process
printf("Start the tracking process, press ESC to quit.\n");
while (cap.isOpened())
{
// get frame from the video
cap >> matSrc;
if (!matSrc.empty())
{
duan_OpticalFlow(matSrc, matRst);
}
else
{
cout << "Error : Get picture is empty!" << endl;
}
if (waitKey(1) == 27) break;
}
waitKey(0);
return 0;
}
void duan_OpticalFlow(Mat &frame, Mat & result)
{
cvtColor(frame, curgray, CV_BGR2GRAY);
frame.copyTo(result);
if (addNewPoints())
{
goodFeaturesToTrack(curgray, features, maxCount, qLevel, minDist);
point[0].insert(point[0].end(), features.begin(), features.end());
initPoint.insert(initPoint.end(), features.begin(), features.end());
}
if (pregray.empty())
{
curgray.copyTo(pregray);
}
calcOpticalFlowPyrLK(pregray, curgray, point[0], point[1], status, err);
int k = 0;
for (size_t i = 0; i < point[1].size(); i++)
{
if (acceptTrackedPoint(i))
{
initPoint[k] = initPoint[i];
point[1][k++] = point[1][i];
}
}
point[1].resize(k);
initPoint.resize(k);
for (size_t i = 0; i < point[1].size(); i++)
{
line(result, initPoint[i], point[1][i], Scalar(0, 0, 255));
circle(result, point[1][i], 3, Scalar(0, 255, 0), -1);
}
swap(point[1], point[0]);
swap(pregray, curgray);
imshow("Optical Flow Demo", result);
//waitKey(50);
}
bool addNewPoints()
{
return point[0].size() <= 10;
}
bool acceptTrackedPoint(int i)
{
return status[i] && ((abs(point[0][i].x - point[1][i].x) + abs(point[0][i].y - point[1][i].y)) > 2);//检测是否为移动点
}
③calcOpticalFlowPyrLK()函数:跟光流金字塔有关,喵喵喵???之后了解以下。
(3)点追踪
①含义:用鼠标点下要追踪的点,然后随着这个点所在物体的移动可以检测到点的移动。
②代码和示例
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help()
{
// print a welcome message, and the OpenCV version
cout << "\nThis is a demo of Lukas-Kanade optical flow lkdemo(),\n"
"Using OpenCV version " << CV_VERSION << endl;
cout << "\nIt uses camera by default, but you can provide a path to video as an argument.\n";
cout << "\nHot keys: \n"
"\tESC - quit the program\n"
"\tr - auto-initialize tracking\n"
"\tc - delete all the points\n"
"\tn - switch the \"night\" mode on/off\n"
"To add/remove a feature point click it\n" << endl;
}
Point2f point;
bool addRemovePt = false;
static void onMouse(int event, int x, int y, int /*flags*/, void* /*param*/)
{
if (event == EVENT_LBUTTONDOWN)
{
point = Point2f((float)x, (float)y);
addRemovePt = true;
}
}
int main(int argc, char** argv)
{
VideoCapture cap;
TermCriteria termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.03);
Size subPixWinSize(10, 10), winSize(31, 31);
const int MAX_COUNT = 500;
bool needToInit = false;
bool nightMode = false;
help();
cv::CommandLineParser parser(argc, argv, "{@input|0|}");
string input = parser.get<string>("@input");
if (input.size() == 1 && isdigit(input[0]))
cap.open(input[0] - '0');
else
cap.open(input);
if (!cap.isOpened())
{
cout << "Could not initialize capturing...\n";
return 0;
}
namedWindow("LK Demo", 1);
setMouseCallback("LK Demo", onMouse, 0);
Mat gray, prevGray, image, frame;
vector<Point2f> points[2];
for (;;)
{
cap >> frame;
if (frame.empty())
break;
frame.copyTo(image);
cvtColor(image, gray, COLOR_BGR2GRAY);
if (nightMode)
image = Scalar::all(0);
if (needToInit)
{
// automatic initialization
goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 3, 0, 0.04);
cornerSubPix(gray, points[1], subPixWinSize, Size(-1, -1), termcrit);
addRemovePt = false;
}
else if (!points[0].empty())
{
vector<uchar> status;
vector<float> err;
if (prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize,
3, termcrit, 0, 0.001);
size_t i, k;
for (i = k = 0; i < points[1].size(); i++)
{
if (addRemovePt)
{
if (norm(point - points[1][i]) <= 5)
{
addRemovePt = false;
continue;
}
}
if (!status[i])
continue;
points[1][k++] = points[1][i];
circle(image, points[1][i], 3, Scalar(0, 255, 0), -1, 8);
}
points[1].resize(k);
}
if (addRemovePt && points[1].size() < (size_t)MAX_COUNT)
{
vector<Point2f> tmp;
tmp.push_back(point);
cornerSubPix(gray, tmp, winSize, Size(-1, -1), termcrit);
points[1].push_back(tmp[0]);
addRemovePt = false;
}
needToInit = false;
imshow("LK Demo", image);
char c = (char)waitKey(10);
if (c == 27)
break;
switch (c)
{
case 'r':
needToInit = true;
break;
case 'c':
points[0].clear();
points[1].clear();
break;
case 'n':
nightMode = !nightMode;
break;
}
std::swap(points[1], points[0]);
cv::swap(prevGray, gray);
}
return 0;
}
(4)人脸识别
①含义:识别人脸。
②代码和示例
代码是自带的,但是由于我还缺少PDB文件配置,所以没能在今天实现人脸检测,这里其实仅仅只能实现人脸检测,示例择日再传好了(?
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name, eyes_cascade_name;
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
/** @function main */
int main( int argc, const char** argv )
{
CommandLineParser parser(argc, argv,
"{help h||}"
"{face_cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{eyes_cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}");
parser.about( "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n"
"You can use Haar or LBP features.\n\n" );
parser.printMessage();
face_cascade_name = parser.get<String>("face_cascade");
eyes_cascade_name = parser.get<String>("eyes_cascade");
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture.open( 0 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
if( waitKey(10) == 27 ) { break; } // escape
}
return 0;
}
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(60, 60) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
(5)支持向量机引导(???黑人问号