网上可以找到python的openCV库的英文教程,本文对其中的部分实例进行了实现和注释,同时针对cv2这个库更改了原来程序中一些会报错的地方。
#原程序是像草稿一样写在 jupyter notebook里的,方便分cell运行测试,现在就直接贴进来不做修改了。
import numpy as np
import cv2
# 读取一张照片
img = cv2.imread('8.jpg')
# 沿着横纵轴放大1.6倍,然后平移(-150,-240),最后沿原图大小截取,等效于裁剪并放大
M_crop_elephant = np.array([
[1.6, 0, -150],
[0, 1.6, -240]
], dtype=np.float32)
img_elephant = cv2.warpAffine(img, M_crop_elephant, (400, 600))
cv2.imwrite('3_0.jpg', img_elephant)
# x轴的剪切变换,角度15°
theta = 10 * np.pi / 180
M_shear = np.array([
[1, np.tan(theta), 0],
[0, 1, 0]
], dtype=np.float32)
img_sheared = cv2.warpAffine(img, M_shear, (400, 600))
cv2.imwrite('3_1.jpg', img_sheared)
# 顺时针旋转,角度15°
M_rotate = np.array([
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0]
], dtype=np.float32)
img_rotated = cv2.warpAffine(img, M_rotate, (400, 600))
cv2.imwrite('3_2.jpg', img_rotated)
# 某种变换,具体旋转+缩放+旋转组合可以通过SVD分解理解
M = np.array([
[1, 1.5, -400],
[0.5, 2, -100]
], dtype=np.float32)
img_transformed = cv2.warpAffine(img, M, (400, 600))
cv2.imwrite('3_3.jpg', img_transformed)
import cv2#画图
import numpy as np
im = np.zeros((512,512,3),np.uint8)
cv2.line(im,(0,0),(511,511),(255,255,0),100)#区域,起点,终点,颜色,线粗
cv2.namedWindow("Image")
cv2.imshow("Image", im)
cv2.waitKey (0)
cv2.destroyAllWindows()
#鼠标交互
import cv2
import numpy as np
def draw_circle(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
img = np.zeros((512,512,3),np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
while 1:
cv2.imshow('image',img)
if cv2.waitKey(20)&0xFF==27:
break
cv2.destroyAllWindows()
#混叠图片
import cv2
import numpy as np
im1 = cv2.imread('7.jpg')
im2 = cv2.imread('10.jpg')
mix = cv2.addWeighted(im1,0.7,im2,0.3,0)#加权叠加图片,图片大小要相等
cv2.imshow('mix',mix)
cv2.waitKey(0)
cv2.destroyAllWindows()
#滤出单色
import cv2
import numpy as np
while 1:
im1 = cv2.imread('10.jpg')
hsv = cv2.cvtColor(im1,cv2.COLOR_BGR2HSV)
blue_low = np.array([110,50,50])#颜色的上下限可以网上查表
blue_high = np.array([130,255,255])
green_low = np.array([0,0,221])
green_high = np.array([180,30,255])
mask_blue = cv2.inRange(hsv,blue_low,blue_high)
mask_green = cv2.inRange(hsv,green_low,green_high)
res = cv2.bitwise_and(im1,im1,mask=mask_green + mask_blue)
cv2.imshow('im1',im1)
cv2.imshow('mask',mask_green)
cv2.imshow('res',res)
k = cv2.waitKey(5)&0xFF
if k == 27:
break
cv2.destroyAllWindows()
#图像旋转——矩阵自动生成,通常使用构建变换矩阵的方法实现图片的旋转平移,在cv2里封装了生成矩阵的函数
im1 = cv2.imread('10.jpg')
r,c,b = im1.shape
M = cv2.getRotationMatrix2D((c/2,r/2),45,0.6)
dst = cv2.warpAffine(im1,M,(2*c,2*r))
while 1:
cv2.imshow('imgr',dst)
k = cv2.waitKey(5)&0xFF
if k == 27:
break
cv2.destroyAllWindows()
'''图像阈值'''
import cv2
import numpy as np
from matplotlib import pyplot as plt
im = cv2.imread('1.jpg',0)#0 灰度
img = cv2.medianBlur(im,5)#滤波降噪
#普通阈值
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
#自适应阈值
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
#加高斯核的otsu动态阈值
blur = cv2.GaussianBlur(im,(5,5),0)
re,th4 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print type(th2)
titles = ['%d'%i for i in range(5)]
images = [img,th1,th2,th3,th4]
for i in range(5):
plt.subplot(2,3,i+1)
plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
img = cv2.imread('4.jpg')
blur = cv2.bilateralFilter(img,9,75,75)#双边滤波,保留边界
while 1:
cv2.imshow('imgr',blur)
k = cv2.waitKey(5)&0xFF
if k == 27:
break
cv2.destroyAllWindows()
img = cv2.imread('4.jpg')
blur = cv2.bilateralFilter(img,9,75,75)#双边滤波,保留边界
while 1:
cv2.imshow('imgr',blur)
k = cv2.waitKey(5)&0xFF
if k == 27:
break
cv2.destroyAllWindows()
#使用滤波函数实现对图片的求导
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('6.jpg',0)
laplacian = cv2.Laplacian(img,cv2.CV_64F)#二阶导
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize = 5)#x偏导,可拓展到二阶
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize = 5)#y偏导
titles = ['%d'%i for i in range(4)]
al = sobelx + sobely
images = [laplacian,sobelx,sobely,al]
for i in range(4):
plt.subplot(2,2,i+1)
plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
#canny边缘检测
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('4.jpg',0)
edg = cv2.Canny(img,100,200)
plt.subplot(121)
plt.imshow(img,cmap = 'gray')
plt.subplot(122)
plt.imshow(edg,cmap = 'gray')
plt.show()
#画轮廓
#图像的矩
cnt = contours[0]
M = cv2.moments(cnt)
#print type(M)#图像的矩
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])#对象重心
area = cv2.contourArea(cnt)#轮廓面积=m00
perimeter = cv2.arcLength(cnt,True)#轮廓周长
epsilon = 0.1*perimeter
approx = cv2.approxPolyDP(cnt,epsilon,True)#轮廓近似
print approx
cv2.drawContours(imgr, approx, -1, (0,255,0), 5)
dst = cv2.pointPolygonTest(cnt,(50,50),True)
print dst
#图像的掩模
mask = np.zeros(imgr.shape,np.uint8)
cv2.drawContours(mask,[cnt],0,255,-1)
cv2.imshow('1',imgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
from matplotlib import pyplot as plt
#直方图
im = cv2.imread('1.jpg',0)
hist = cv2.calcHist([im],[0],None,[256],[0,256])#图像、通道、掩模、bin数目、像素范围
im = cv2.imread('11.jpg',0)
plt.hist(im.ravel(),256,[0,256])
plt.show()
#多通道直方图
import cv2
import numpy as np
from matplotlib import pyplot as plt
im = cv2.imread('7.jpg')
color = ('b','g','r')
for i,col in enumerate(color):
hist = hist = cv2.calcHist([im],[i],None,[256],[0,256])
plt.plot(hist,color = col)
plt.show()
#通过直方图平均加强图片对比度
im = cv2.imread('16.png',0)#直方图平均
equ = cv2.equalizeHist(im)
res = np.hstack((im,equ))
cv2.imshow('imgr',res)
cv2.waitKey(0)
cv2.destroyAllWindows()
im = cv2.imread('14.png',0)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))#有限对比适应性直方图均匀
cl1 = clahe.apply(im)#对对比度低的局部进行改善
res = np.hstack((im,cl1))
cv2.imshow('imgr',res)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
img = cv2.imread('3.jpg')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)#2D直方图
hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
plt.imshow(hist,interpolation = 'nearest')
plt.show()
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('1.jpg',0)
dft = cv2.dft(np.float32(img),flags=cv2.DFT_COMPLEX_OUTPUT)
dft_shift=np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
#傅里叶变换
rows, cols = img.shape
crow,ccol = rows/2 , cols/2
mask = np.zeros((rows,cols,2),np.uint8)
mask[crow-30:crow+30, ccol-30:ccol+30] = 1
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
import cv2
import numpy as np
from matplotlib import pyplot as plt
#numpy实现傅里叶
img = cv2.imread('1.jpg',0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
rows, cols = img.shape
crow,ccol = rows/2 , cols/2
fshift[crow-30:crow+30, ccol-30:ccol+30] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.subplot(131),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(img_back, cmap = 'gray')
plt.title('Image after HPF'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back)
plt.title('Result in JET'), plt.xticks([]), plt.yticks([])
plt.show()
#模板匹配
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('18.jpg',0)#原图
img2 = img.copy()
template = cv2.imread('19.jpg',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
res = cv2.matchTemplate(img,template,method)#返回矩阵
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 5)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show()
#霍夫变换找直线
import cv2
import numpy as np
img = cv2.imread('22.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for line in lines:
#print line
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('imgr',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#哈里斯角点检测
import cv2
import numpy as np
filename = '3.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
ret,dst = cv2.threshold(dst,0.01*dst.max(),255,0)
dst = np.uint8(dst)
#dst = cv2.dilate(dst,None)#可以不做
img[dst>0.01*dst.max()]=[0,0,255]
cv2.imshow('dst',img)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
import numpy as np#改进哈里斯
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('4.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,25,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),5,0,-1)
#plt.imshow(img),plt.show()
#SIFT
import cv2
import numpy as np
img = cv2.imread('8.jpg')
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT()
kp = sift.detect(gray,None)#找关键点
img=cv2.drawKeypoints(gray,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)#画关键点
#指定参数
cv2.imshow('imgr',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#SURF可用作斑点检测
img = cv2.imread('10.jpg',0)#
# Create SURF object. You can specify params here or later.
# Here I set Hessian Threshold to 400
surf = cv2.SURF(400)
# Find keypoints and descriptors directly
print surf.hessianThreshold#可以改变阈值来控制关键点数目
surf.hessianThreshold=1000
kp, des = surf.detectAndCompute(img,None)
#img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),5)
img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
print kp
cv2.imshow('imgr',img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
#FAST特征检测,可应用机器学习优化,加入非最大值抑制与否
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('6.jpg',0)
fast = cv2.FastFeatureDetector()
# find and draw the keypoints
kp = fast.detect(img,None)
img2 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
# Print all default params
'''print( "Threshold: {}".format(fast.getInt('threshold')) )
print( "nonmaxSuppression:{}".format(fast.getBool('nonmaxSuppression')) )
print( "neighborhood: {}".format(fast.getInt('type')) )
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
'''
# Disable nonmaxSuppression
fast.setBool('nonmaxSuppression',0)
kp = fast.detect(img,None)
print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) )
img3 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
im_all = np.hstack((img3,img2))
cv2.imshow('imgr',im_all)
cv2.waitKey(0)
cv2.destroyAllWindows()
#蛮力匹配——报错,2版本没有drawmatch函数,没找到合适的办法
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('1.jpg',0) # queryImage
img2 = cv2.imread('2.jpg',0) # trainImage
# Initiate ORB detector
orb = cv2.ORB()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1,None)#查找兴趣点
kp2, des2 = orb.detectAndCompute(img2,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)#创建匹配对象
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2)
plt.imshow(img3),plt.show()