OpenCV笔记03
- SIFT(Scale Invariant Feature Transform)尺度不变特征变换算法原理
- 特点
- 解决的问题
- 步骤
- 概念
- 缺点
- 速度
- BF(Brute-Force)暴力匹配
- ORB(ORiented Brief)特征检测器
- 1
- 2
- SIFT
- FLANN(Fast Library for Approximate Nearest Neighbors)快速近似(逼近)最近邻特征匹配
- SIFT
- 1
- 2
- 3
- SURF
SIFT(Scale Invariant Feature Transform)尺度不变特征变换算法原理
特点
- 对旋转、尺度缩放、亮度变化保持不变性;对视角变化、仿射变换、噪声也保持一定程度的稳定性。
- 适用于在海量特征数据库中进行快速、准确的匹配。
- 即使少数的几个物体也可以产生大量的SIFT特征向量。
- 经优化的SIFT匹配算法甚至可以达到实时的要求。
- 可以很方便的与其他形式的特征向量进行联合。
解决的问题
- 目标的旋转、缩放、平移
- 图像仿射/投影变换
- 光照影响
- 目标遮挡
- 杂物场景
- 噪声
步骤
- 尺度空间极值检测
- 关键点定位
- 方向确定
- 关键点描述
概念
- 二维高斯函数
- 图像的二维高斯模糊及其分离
- 尺度空间
- 高斯金字塔
- 高斯差分金字塔
- 边缘响应的消除
- 有限差分法求导
缺点
- 实时性不高。
- 有时特征点较少。
- 对边缘光滑的目标无法准确提取特征点。
速度
ORB = 10 * SURF = 100 * SIFT
BF(Brute-Force)暴力匹配
ORB(ORiented Brief)特征检测器
1
# -*- coding: utf-8-*-
import cv2
from matplotlib import pyplot as plt
template = cv2.imread("sources/gk1.jpg", 0)
target = cv2.imread("sources/gk2.jpg", 0) # 格式为灰度图
orb = cv2.ORB_create() # 建立orb特征检测器
kp1, des1 = orb.detectAndCompute(template, None) # 计算template中的特征点和描述符
kp2, des2 = orb.detectAndCompute(target, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # 建立匹配关系
matches = bf.match(des1, des2) # 匹配描述符
matches = sorted(matches, key=lambda x: x.distance) # 根据距离来排序
result = cv2.drawMatches(template, kp1, target, kp2, matches[:40], None, flags=2) # 画出匹配关系
plt.imshow(result), plt.show() # matplotlib描绘出来
2
import cv2
imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
orb = cv2.ORB_create()
img1 = cv2.imread(imgname1)
kp1, des1 = orb.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("ORB", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
SIFT
import cv2
imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
sift = cv2.xfeatures2d.SIFT_create()
img1 = cv2.imread(imgname1)
kp1, des1 = sift.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("BFmatch", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
FLANN(Fast Library for Approximate Nearest Neighbors)快速近似(逼近)最近邻特征匹配
SIFT
1
# -*- coding: utf-8-*-
import cv2
from matplotlib import pyplot as plt
queryImage = cv2.imread("sources/gk1.jpg", 0)
trainingImage = cv2.imread("sources/gk2.jpg", 0)
sift = cv2.xfeatures2d.SIFT_create() # 创建sift检测器
kp1, des1 = sift.detectAndCompute(queryImage, None)
kp2, des2 = sift.detectAndCompute(trainingImage, None)
FLANN_INDEX_KDTREE = 0 # 设置Flann参数
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2)
matchesMask = [[0, 0] for i in range(len(matches))] # 设置初始匹配值
for i, (m, n) in enumerate(matches):
if m.distance < 0.5 * n.distance: # 舍弃小于0.5的匹配结果
matchesMask[i] = [1, 0]
drawParams = dict(matchColor=(0, 0, 255), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)
# 给特征点和匹配的线定义颜色
resultImage = cv2.drawMatchesKnn(queryImage, kp1, trainingImage, kp2, matches, None, **drawParams)
plt.imshow(resultImage,), plt.show()
2
# -*- coding: utf-8-*-
import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10 # 设置最低特征点匹配数量为10
template = cv2.imread('sources/gk1.jpg', 0)
target = cv2.imread('sources/gk2.jpg', 0)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(template, None)
kp2, des2 = sift.detectAndCompute(target, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches: # 舍弃大于0.7的匹配
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT: # 获取关键点的坐标
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) #
matchesMask = mask.ravel().tolist()
h, w = template.shape
# 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
cv2.polylines(target, [np.int32(dst)], True, 0, 2, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=None,
matchesMask=matchesMask,
flags=2)
result = cv2.drawMatches(template, kp1, target, kp2, good, None, **draw_params)
plt.imshow(result, 'gray')
plt.show()
3
import cv2
imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
sift = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, tree=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1 = cv2.imread(imgname1)
kp1, des1 = sift.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = sift.detectAndCompute(img2, None)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("FLANN", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
SURF
import cv2
imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
surf = cv2.xfeatures2d.SURF_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, tree=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1 = cv2.imread(imgname1)
kp1, des1 = surf.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = surf.detectAndCompute(img2, None)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("SURF", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()