mirror of
https://github.com/arsenetar/dupeguru.git
synced 2026-01-22 14:41:39 +00:00
Add partial hashes optimization for big files
* Big files above the user selected threshold can be partially hashed in 3 places. * If the user is willing to take the risk, we consider files with identical md5samples as being identical.
This commit is contained in:
67
core/fs.py
67
core/fs.py
@@ -12,6 +12,8 @@
|
||||
# and I'm doing it now.
|
||||
|
||||
import hashlib
|
||||
from math import floor
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
from hscommon.util import nonone, get_file_ext
|
||||
@@ -30,6 +32,11 @@ __all__ = [
|
||||
|
||||
NOT_SET = object()
|
||||
|
||||
# The goal here is to not run out of memory on really big files. However, the chunk
|
||||
# size has to be large enough so that the python loop isn't too costly in terms of
|
||||
# CPU.
|
||||
CHUNK_SIZE = 1024 * 1024 # 1 MiB
|
||||
|
||||
|
||||
class FSError(Exception):
|
||||
cls_message = "An error has occured on '{name}' in '{parent}'"
|
||||
@@ -78,6 +85,7 @@ class File:
|
||||
"mtime": 0,
|
||||
"md5": "",
|
||||
"md5partial": "",
|
||||
"md5samples": []
|
||||
}
|
||||
# Slots for File make us save quite a bit of memory. In a memory test I've made with a lot of
|
||||
# files, I saved 35% memory usage with "unread" files (no _read_info() call) and gains become
|
||||
@@ -96,6 +104,7 @@ class File:
|
||||
result = object.__getattribute__(self, attrname)
|
||||
if result is NOT_SET:
|
||||
try:
|
||||
print(f"Try get attr for {self} {attrname}")
|
||||
self._read_info(attrname)
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
@@ -117,32 +126,50 @@ class File:
|
||||
self.size = nonone(stats.st_size, 0)
|
||||
self.mtime = nonone(stats.st_mtime, 0)
|
||||
elif field == "md5partial":
|
||||
print(f"_read_info md5partial {self}")
|
||||
try:
|
||||
fp = self.path.open("rb")
|
||||
offset, size = self._get_md5partial_offset_and_size()
|
||||
fp.seek(offset)
|
||||
partialdata = fp.read(size)
|
||||
md5 = hashlib.md5(partialdata)
|
||||
self.md5partial = md5.digest()
|
||||
fp.close()
|
||||
with self.path.open("rb") as fp:
|
||||
offset, size = self._get_md5partial_offset_and_size()
|
||||
fp.seek(offset)
|
||||
partialdata = fp.read(size)
|
||||
md5 = hashlib.md5(partialdata)
|
||||
self.md5partial = md5.digest()
|
||||
except Exception:
|
||||
pass
|
||||
elif field == "md5":
|
||||
try:
|
||||
fp = self.path.open("rb")
|
||||
md5 = hashlib.md5()
|
||||
# The goal here is to not run out of memory on really big files. However, the chunk
|
||||
# size has to be large enough so that the python loop isn't too costly in terms of
|
||||
# CPU.
|
||||
CHUNK_SIZE = 1024 * 1024 # 1 mb
|
||||
filedata = fp.read(CHUNK_SIZE)
|
||||
while filedata:
|
||||
md5.update(filedata)
|
||||
filedata = fp.read(CHUNK_SIZE)
|
||||
self.md5 = md5.digest()
|
||||
fp.close()
|
||||
with self.path.open("rb") as fp:
|
||||
md5 = hashlib.md5()
|
||||
while filedata := fp.read(CHUNK_SIZE):
|
||||
md5.update(filedata)
|
||||
self.md5 = md5.digest()
|
||||
except Exception:
|
||||
pass
|
||||
elif field == "md5samples":
|
||||
print(f"computing md5chunks for {self}, caller: {inspect.stack()[1][3]}")
|
||||
try:
|
||||
with self.path.open("rb") as fp:
|
||||
md5chunks = []
|
||||
# Chunk at 25% of the file
|
||||
fp.seek(floor(self.size * 25 / 100), 0)
|
||||
filedata = fp.read(CHUNK_SIZE)
|
||||
md5chunks.append(hashlib.md5(filedata).hexdigest())
|
||||
|
||||
# Chunk at 60% of the file
|
||||
fp.seek(floor(self.size * 60 / 100), 0)
|
||||
filedata = fp.read(CHUNK_SIZE)
|
||||
md5chunks.append(hashlib.md5(filedata).hexdigest())
|
||||
|
||||
# Last chunk of the file
|
||||
fp.seek(-CHUNK_SIZE, 2)
|
||||
filedata = fp.read(CHUNK_SIZE)
|
||||
md5chunks.append(hashlib.md5(filedata).hexdigest())
|
||||
|
||||
# Use setattr to avoid circular (de)reference
|
||||
setattr(self, field, tuple(md5chunks))
|
||||
except Exception as e:
|
||||
logging.error(f"Error computing md5samples: {e}")
|
||||
pass
|
||||
|
||||
def _read_all_info(self, attrnames=None):
|
||||
"""Cache all possible info.
|
||||
@@ -221,6 +248,8 @@ class Folder(File):
|
||||
# What's sensitive here is that we must make sure that subfiles'
|
||||
# md5 are always added up in the same order, but we also want a
|
||||
# different md5 if a file gets moved in a different subdirectory.
|
||||
print(f"Getting {field} of folder {self}...")
|
||||
|
||||
def get_dir_md5_concat():
|
||||
items = self._all_items()
|
||||
items.sort(key=lambda f: f.path)
|
||||
|
||||
Reference in New Issue
Block a user