import cv2#格式BGR
def show(img):
cv2.imshow("image",img)
#或plt.imshow(img)
cv2.waitKey(0)
cv2.destroyAllWindows()
image=cv2.imread("image.png"
#,cv2.IMREAD_COLOR#彩色图
,cv2.IMREAD_GRAYSCALE#灰色
)#不支持绝对路径
cv2.imwrite("地址",cat)#保存
vc=cv2.VideoCapture("test.mp4")
if vc.isOpened():
open,frame=vc.read()
else:
open=False
while open:
ret,frame=vc.read()
if frame is None:
break
if ret == True:
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow("name",gray)
if cv2.waitKey(20)&0xFF==27:
break
split_image=image[0:250,0:250]
image[:,:,0]=0
image:,:,1]=0
show(image)
image[:,:,0]=0
image:,:,2]=0
show(image)
image[:,:,1]=0
image:,:,2]=0
show(image)
image[:,:,0]+image[:,:,1]
#运算前提同型
#存在数据自动截断max=255
B,G,R=cv2.split(image)#plt.imshow()
image=cv2.merge((B,G,R))
new_image=cv2.resize(image,(500,500))
dog_cat = cv2.addWeighted(dog_image, 0.5, cat_image, 0.5,0)
all_images=np.hstack(image1,image2,image3)
show(all_images)
#cv2.copyMakeBorder(image,top_size,,left_size,,=cv2.BORDER_REPLICATE)
cat1=cv2.copyMakeBorder(image
,50#top_size
,50#bottom_size
,50#left_size
,50#right_size
,borderType=cv2.BORDER_REPLICATE#borderType
#补充方式若为cv2.BORDER_CONSTANT,多一参数value
)
"""
BORDER_REPLICATE:复制法,也就是复制最边缘像素。
BORDER_REFLECT:反射法,对感兴趣的图像中的像素在两边进行复制例如:fedcba|abcdefgh|hgfedcb
BORDER_REFLECT_101:反射法,也就是以最边缘像素为轴,对称,gfedcb|abcdefgh|gfedcba
BORDER_WRAP:外包装法cdefgh|abcdefgh|abcdefg
BORDER_CONSTANT:常量法,常数值填充。
"""
#ret为阈值
ret, new_image = cv2.threshold(image
, 127
, 220
,cv2.THRESH_BINARY
)
"""
cv2.THRESH_BINARY 超过阈值部分取maxval(最大值),否则取0
cv2.THRESH_BINARY_INV (THRESH_BINARY的反转)
cv2.THRESH_TRUNC 大于阈值部分设为阈值,否则不变
cv2.THRESH_TOZERO 大于阈值部分不改变,否则设为0
cv2.THRESH_TOZERO_INV THRESH_TOZERO的反转
"""
#中值滤波(最常用) 3表示3*3范围矩阵
new_cat4=cv2.medianBlur(cat,3)
#均值滤波
new_cat1=cv2.blur(cat,(3,3))
#方框率波 normalize=True时与均值滤波同 Flase则不除以3*3
new_cat2=cv2.boxFilter(cat,-1,(3,3),normalize=True)
#高斯滤波 权重矩阵
new_cat3=cv2.GaussianBlur(cat,(3,3),1)
#形态学腐蚀操作(增)
#iterations表次数
qiu=cv2.imread("qiu.png",cv2.IMREAD_GRAYSCALE)
N=np.ones((3,3),np.uint8)
new_qiu=cv2.erode(qiu,N,iterations=5)
show(new_qiu)
#形态学膨胀操作(减)
N=np.ones((3,3),np.uint8)
new_qiu=cv2.dilate(qiu
,N
,iterations=4
)
show(new_qiu)
#开运算,先腐蚀后膨胀(先增后减)
new_qiu=cv2.morphologyEx(qiu
,cv2.MORPH_OPEN
,N
,iterations=1
)
#闭运算,先膨胀后腐蚀(先减后增)
new_qiu=cv2.morphologyEx(qiu
,cv2.MORPH_CLOSE
,N
,iterations=1
)
#梯度运算(膨胀-腐蚀)
new_qiu=cv2.morphologyEx(qiu
,cv2.MORPH_GRADIENT
,N
,iterations=1
)
#礼帽(原始-开结果)
new_qiu=cv2.morphologyEx(qiu
,cv2.MORPH_TOPHAT
,N
,iterations=1
)
#黑帽(闭结果-原结果)
new_qiu=cv2.morphologyEx(qiu
,cv2.MORPH_BLACKHAT
,N
,iterations=1
)
1为原图,2为腐蚀 3.为膨胀
#横向右向左,纵向下到上,ksize为算子大小(越大越能抓住更多细节),ddepth=cv2.CV_64F(不知道原因)
#横向
yuan=cv2.imread("yuan.png",cv2.IMREAD_GRAYSCALE)
x_new_yuan=cv2.Sobel(car
,cv2.CV_64F
,1#横向程度
,0#纵向程度
,ksize=3#算子大小
)
x_new_yuan=cv2.convertScaleAbs(x_new_yuan)
#cv2.convertScaleAbs并非简单的取绝对值
#纵向
y_new_yuan=cv2.Sobel(yuan,cv2.CV_64F,0,1,ksize=3)
y_new_yuan=cv2.convertScaleAbs(y_new_yuan)
#横纵同时用效果不好
#但是可以分别出结果后相融合
xy_new_yuan=cv2.addWeighted(x_new_yuan,0.5,y_new_yuan,0.5,0)
show(xy_new_yuan)
Sobel等算子有如下缺点:没有充分利用边缘的梯度方向。最后得到的二值图,只是简单地利用单阈值进行处理。
yuan=cv2.imread("yuan.png",cv2.IMREAD_GRAYSCALE)
x_new_yuan=cv2.Scharr(yuan
,cv2.CV_64F
,1#横
,0
#纵
)
x_new_yuan=cv2.convertScaleAbs(x_new_yuan)
#cv2.convertScaleAbs并非简单的取绝对值
#纵向
y_new_yuan=cv2.Scharr(yuan,cv2.CV_64F,0,1)
y_new_yuan=cv2.convertScaleAbs(y_new_yuan)
#横纵同时用效果不好
#但是可以分别出结果后相融合
xy_new_yuan=cv2.addWeighted(x_new_yuan,0.5,y_new_yuan,0.5,0)
show(xy_new_yuan)
yuan=cv2.imread("yuan.png",cv2.IMREAD_GRAYSCALE)
x_new_yuan=cv2.Laplacian(yuan,cv2.CV_64F)
#x无正负之分故无需
#x_new_yuan=cv2.convertScaleAbs(x_new_yuan)
show(x_new_yuan)
#三种算子对比
#Sobel算子
car=cv2.imread("car.png",cv2.IMREAD_GRAYSCALE)
x_new_car=cv2.Sobel(car,cv2.CV_64F,1 ,0,ksize=3)
x_new_car=cv2.convertScaleAbs(x_new_car)
y_new_car=cv2.Sobel(car,cv2.CV_64F,0,1,ksize=3)
y_new_car=cv2.convertScaleAbs(y_new_car)
xy_new_car1=cv2.addWeighted(x_new_car,0.5,y_new_car,0.5,0)
#Scharr算子
x_new_car=cv2.Scharr(car,cv2.CV_64F,1,0)
x_new_car=cv2.convertScaleAbs(x_new_car)
y_new_car=cv2.Scharr(car,cv2.CV_64F,0,1)
y_new_car=cv2.convertScaleAbs(y_new_car)
xy_new_car2=cv2.addWeighted(x_new_car,0.5,y_new_car,0.5,0)
#Laplacian算子
car=cv2.imread("car.png",cv2.IMREAD_GRAYSCALE)
xy_new_car3=cv2.Laplacian(car,cv2.CV_64F)
#合并
all=np.hstack((xy_new_car1,xy_new_car2,xy_new_car3))
show(all)
Canny边缘检测内部过程
1.使用高斯波分布去除噪声,平滑曲线
2.计算图形像素点梯度和方向
3.应用非极大值抑制,消除边缘杂散影响
4.应用双阈值检测真是和潜在边缘
5.通过抑制孤立弱边缘宛城边缘检测
car=cv2.imread("car.png",cv2.IMREAD_GRAYSCALE)
new_car=cv2.Canny(car,80,200)#minvalue和maxvalue表为梯度
show(new_car)
#高斯金字塔向下采样法(二倍缩小,更清晰)
new_car=cv2.pyrDown(car)
#高斯金字塔向上采样法(二倍放大,稍微模糊)
new_car=cv2.pyrUp(car)
#先后使用后不会与原图同,一般会稍微模糊
import numpy as np
import matplotlib.pyplot as plt
han_han=cv2.imread("hanhan.jpg",cv2.IMREAD_GRAYSCALE)
equ=cv2.equalizeHist(han_han)
two=np.hstack((han_han,equ))
show(two)
ret,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
ax1.hist(han_han.ravel(),255)
ax2.hist(equ.ravel(),255)
ret.subplots_adjust(wspace=0)
plt.show()
直方图均衡化
均衡化图像素分布
import numpy as np
import cv2#格式BGR
import matplotlib.pyplot as plt
def show(img):
cv2.imshow("image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
image1=cv2.imread("hanhan.jpg",0)
image2=image1.copy()
c=cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
#自适应均衡化实例化
res=c.apply(image1)
#应用
all=np.hstack((image1,res))
show(all)
ret,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
ax1.hist(image1.ravel(),255)
ax2.hist(res.ravel(),255)
ret.subplots_adjust(wspace=0)
plt.show()
import numpy as np
import cv2#格式BGR
def show(img):
cv2.imshow("image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
image=cv2.imread("many_pictures.png")
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#图像二极化
ret,thre=cv2.threshold(gray,100,255,cv2.THRESH_BINARY)
#contours为所有轮廓
contours,hierarchy=cv2.findContours(thre
,cv2.RETR_TREE#轮廓检索方法
,cv2.CHAIN_APPROX_NONE
#轮廓逼近方法(另有cv2.CHAIN_APPROX_SIMPLE)
)
image1=image.copy()
new_image=cv2.drawContours(image1#在彩色图上画轮廓边界,灰色图则结果为灰色
,contours#轮廓
,-1#所有索引出的轮廓
,(100,100,55)#BGR定色三原色可随意配色叠加
,2#轮廓宽度
)
all=np.hstack((image,new_image))
show(all)
import cv2#格式BGR
def show(img):
cv2.imshow("image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#轮廓检测
image=cv2.imread("many_pictures.png")
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#图像二极化
ret,thre=cv2.threshold(gray,100,255,cv2.THRESH_BINARY)
#contours为所有轮廓
contours,hierarchy=cv2.findContours(thre
,cv2.RETR_TREE#轮廓检索方法
,cv2.CHAIN_APPROX_NONE
#轮廓逼近方法(另有cv2.CHAIN_APPROX_SIMPLE)
)
image1=image.copy()
cnt=contours[1]#第几号轮廓
epsilon=0.095*cv2.arcLength(cnt,True)#近似轮廓的近似阈值
approx=cv2.approxPolyDP(cnt,epsilon,True)#近似轮廓的生成
new_image=cv2.drawContours(image1#在彩色图上画轮廓边界,灰色图则结果为灰色
,[approx]#轮廓
,-1#所有索引出的轮廓
,(0,0,255)#BGR定色三原色可随意配色叠加
,4#轮廓宽度
)
show(new_image)
import matplotlib.pyplot as plt
import numpy as np
import cv2#格式BGR
#轮廓检测
image=cv2.imread("dog.png",0)
template=cv2.imread("dogface.png",0)
#标出目标的高和宽
height,width=template.shape[:2]
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
fig=plt.figure(figsize=(15,9))
for i,meth in enumerate(methods):
img2 = image.copy()
# 匹配方法的真值
method = eval(meth)
#生成模板匹配次数参数
res = cv2.matchTemplate(image, template, method)
#相似度和位置参数(不同method,下面参数不同)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
#矩形左上和右下点的确定
# 如果是平方差匹配TM_SQDIFF或归一化平方差匹配TM_SQDIFF_NORMED,取最小值
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + width, top_left[1] + height)
# 画矩形
cv2.rectangle(img2#举行图片对象
,top_left#左上位置
,bottom_right#右下位置
,(100,100,55)#RGB边框颜色确定
,2#线条宽度
)
ax=fig.add_subplot(2,3,i+1)
ax.imshow(img2,cmap="gray")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title(meth,fontsize=10)#标体喝字体大小设置
plt.show()
最好使用后三种,因为有归一化,更好地评估模型,注意后两种
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
该句代码各个参数有所不同。