# coding=gbk
# -*- coding: utf-8 -*-
import os
import hashlib
from collections import defaultdict
def file_hash(file_path, block_size=65536):
# 计算文件的MD5哈希值
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
hasher.update(block)
return hasher.hexdigest()
def find_duplicates(directory):
file_sizes = defaultdict(list)
duplicates = []
# 遍历目录及其子目录,查找相同大小的文件
for root, dirs, files in os.walk(directory):
for filename in files:
file_path = os.path.join(root, filename)
size = os.path.getsize(file_path)
file_sizes[size].append(file_path)
# 对相同大小的文件进行进一步检查,使用MD5哈希值
for size, paths in file_sizes.items():
if len(paths) > 1:
hash_dict = defaultdict(list)
for file_path in paths:
file_hash_val
PYTHON查找和删除重复文件
最新推荐文章于 2025-01-03 15:31:15 发布