1、python代码去掉完全相同的图片, 重复的图片移动到另一文件夹保存
- import shutil
- import numpy as np
- from PIL import Image
- import os
-
-
- def 比较图片大小(dir_image1, dir_image2):
- with open(dir_image1, "rb") as f1:
- size1 = len(f1.read())
- with open(dir_image2, "rb") as f2:
- size2 = len(f2.read())
- if (size1 == size2):
- result = "大小相同"
- else:
- result = "大小不同"
- return result
-
-
- def 比较图片尺寸(dir_image1, dir_image2):
- image1 = Image.open(dir_image1)
- image2 = Image.open(dir_image2)
- if (image1.size == image2.size):
- result = "尺寸相同"
- else:
- result = "尺寸不同"
- return result
-
-
- def 比较图片内容(dir_image1, dir_image2):
- image1 = np.array(Image.open(dir_image1))
- image2 = np.array(Image.open(dir_image2))
- if (np.array_equal(image1, image2)):
- result = "内容相同"
- else:
- result = "内容不同"
- return result
-
-
- def 比较两张图片是否相同(dir_image1, dir_image2):
- # 比较两张图片是否相同
- # 第一步:比较大小是否相同
- # 第二步:比较长和宽是否相同
- # 第三步:比较每个像素是否相同
- # 如果前一步不相同,则两张图片必不相同
- result = "两张图不同"
- 大小 = 比较图片大小(dir_image1, dir_image2)
- if (大小 == "大小相同"):
- 尺寸 = 比较图片尺寸(dir_image1, dir_image2)
- if (尺寸 == "尺寸相同"):
- 内容 = 比较图片内容(dir_image1, dir_image2)
- if (内容 == "内容相同"):
- result = "两张图相同"
- return result
-
-
- if __name__ == '__main__':
-
- load_path = r'D:\data\imgs_dir' # 要去重的文件夹
- save_path = r'D:\data\imgs_dir_repeat' # 空文件夹,用于存储检测到的重复的照片
- os.makedirs(save_path, exist_ok=True)
-
- # 获取图片列表 file_map,字典{文件路径filename : 文件大小image_size}
- file_map = {}
- image_size = 0
- # 遍历filePath下的文件、文件夹(包括子目录)
- for parent, dirnames, filenames in os.walk(load_path):
- # for dirname in dirnames:
- # print('parent is %s, dirname is %s' % (parent, dirname))
- for filename in filenames:
- # print('parent is %s, filename is %s' % (parent, filename))
- # print('the full name of the file is %s' % os.path.join(parent, filename))
- image_size = os.path.getsize(os.path.join(parent, filename))
- file_map.setdefault(os.path.join(parent, filename), image_size)
-
- # 获取的图片列表按 文件大小image_size 排序
- file_map = sorted(file_map.items(), key=lambda d: d[1], reverse=False)
- file_list = []
- for filename, image_size in file_map:
- file_list.append(filename)
-
- # 取出重复的图片
- file_repeat = []
- for currIndex, _ in enumerate(file_list):
- dir_image1 = file_list[currIndex]
- dir_image2 = file_list[currIndex + 1]
- result = 比较两张图片是否相同(dir_image1, dir_image2)
- if (result == "两张图相同"):
- file_repeat.append(file_list[currIndex + 1])
- print("\n相同的图片:", file_list[currIndex], file_list[currIndex + 1])
- else:
- print('\n不同的图片:', file_list[currIndex], file_list[currIndex + 1])
- currIndex += 1
- if currIndex >= len(file_list) - 1:
- break
-
- # 将重复的图片移动到新的文件夹,实现对原文件夹降重
- for image in file_repeat:
- shutil.move(image, save_path)
- print("正在移除重复照片:", image)
-
2. python 代码 去掉模糊图片
- import os
- import cv2
- import shutil
- class item: # (图片, 图片清晰度) 结构体
- def __init__(self):
- self.name = '' # 图片名称
- self.val = 10 # 图片清晰度 也就是 getImageVar(img)
-
- #利用拉普拉斯 利用拉普拉斯算子计算图片的二阶导数,反映图片的边缘信息,同样事物的图片,清晰度高的,相对应的经过拉普拉斯算子滤波后的图片的方差也就越大
- def getImageVar(imgPath):
- image = cv2.imread(imgPath)
- img2gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- imageVar = cv2.Laplacian(img2gray, cv2.CV_64F).var()
- return imageVar
-
-
- if __name__ == "__main__":
- src_img_dir = r"D:\data\img_dir"
- move_img_dir = r"D:\data\img_dir_blur"
- os.makedirs(move_img_dir, exist_ok=True)
- img_files = os.listdir(src_img_dir)
- print("len(img_files): ", len(img_files))
- a_list = []
- for i in range(len(img_files)):
- img_file_path = os.path.join(src_img_dir, img_files[i])
- imageVar = getImageVar(img_file_path)
- # print(imageVar)
- a = item()
- a.val = imageVar
- a.name = img_files[i]
- a_list.append(a)
- print("len(a_list): ", len(a_list))
-
- a_list.sort(key=lambda ita: ita.val, reverse=False) # 对 (图片, 图片清晰度) 结构体 列表 按照 图片清晰度排序, 模糊的放在列表头部, 清晰的放在列表尾部
-
- count = 0
- for i in range(int(len(a_list)*0.1)): # 移除最模糊的 %10 的图片
- print(a_list[i].name, a_list[i].val)
- src_path = os.path.join(src_img_dir, a_list[i].name)
- dest_path = os.path.join(move_img_dir, a_list[i].name)
- shutil.move(src_path, dest_path)
- count += 1
- # break
- print("count: ", count)
3. python代码,设置阈值,去掉结构性相似的图片。后续还要从每组结构性相似的图片,手动筛选一张图片放回原文件夹。
- # coding: utf-8
- import os
- import cv2
- # from skimage.measure import compare_ssim
- # from skimage.metrics import _structural_similarity
- from skimage.metrics import structural_similarity as ssim
- import shutil
-
- # def delete(filename1):
- # os.remove(filename1)
-
-
-
-
- def list_all_files(root):
- files = []
- list = os.listdir(root)
- # os.listdir()方法:返回指定文件夹包含的文件或子文件夹名字的列表。该列表顺序以字母排序
- for i in range(len(list)):
- element = os.path.join(root, list[i])
- # 需要先使用python路径拼接os.path.join()函数,将os.listdir()返回的名称拼接成文件或目录的绝对路径再传入os.path.isdir()和os.path.isfile().
- if os.path.isdir(element): # os.path.isdir()用于判断某一对象(需提供绝对路径)是否为目录
- # temp_dir = os.path.split(element)[-1]
- # os.path.split分割文件名与路径,分割为data_dir和此路径下的文件名,[-1]表示只取data_dir下的文件名
- files.append(list_all_files(element))
-
- elif os.path.isfile(element):
- files.append(element)
- # print('2',files)
- return files
-
-
- def ssim_compare(img_files):
- imgs_n = []
- count = 0
- # thresh_lis = [0.9, 0.8, 0.7, 0.6, 0.55, 0.5, 0.45, 0.4, 0.35]
- # for thresh in thresh_lis:
- for currIndex, _ in enumerate(img_files):
- if not os.path.exists(img_files[currIndex]):
- print('not exist', img_files[currIndex])
- break
- img = cv2.imread(img_files[currIndex])
- img1 = cv2.imread(img_files[currIndex + 1])
- # 进行结构性相似度判断
- # ssim_value = _structural_similarity.structural_similarity(img,img1,multichannel=True)
- ssim_value = ssim(img, img1, multichannel=True)
- thresh = 0.9
- if ssim_value > thresh:
- # 基数
- count += 1
- imgs_n.append(img_files[currIndex + 1])
- imgs_n.append(img_files[currIndex])
- print('big_ssim:', img_files[currIndex], img_files[currIndex + 1], ssim_value)
- # 避免数组越界
- if currIndex + 1 >= len(img_files) - 1:
- break
- save_dir = r"D:\data\img_dir_sim_"+str(thresh)
- os.makedirs(save_dir, exist_ok=True)
-
- for file in list(set(imgs_n)): # 去掉重复的路径,再遍历 剪切
- shutil.move(file, os.path.join(save_dir, os.path.basename(file)))
- return count
-
-
- if __name__ == '__main__':
- path = r'D:\data\img_dir'
-
- all_files = list_all_files(path) # 返回包含完整路径的所有图片名的列表
- print('len: ', len(all_files))
- count = ssim_compare(all_files)
- print(count)
-
-