mirror of
https://github.com/arsenetar/dupeguru.git
synced 2026-01-22 06:37:17 +00:00
Merge pull request #908 from glubsy/hash_sample_optimization
Hash sample optimization
This commit is contained in:
@@ -88,6 +88,7 @@ class NamedObject:
|
||||
self.size = size
|
||||
self.md5partial = name
|
||||
self.md5 = name
|
||||
self.md5samples = name
|
||||
if with_words:
|
||||
self.words = getwords(name)
|
||||
self.is_ref = False
|
||||
|
||||
@@ -551,6 +551,28 @@ class TestCaseGetMatchesByContents:
|
||||
o1, o2 = no(size=0), no(size=0)
|
||||
assert not getmatches_by_contents([o1, o2])
|
||||
|
||||
def test_big_file_partial_hashes(self):
|
||||
smallsize = 1
|
||||
bigsize = 100 * 1024 * 1024 # 100MB
|
||||
f = [no("bigfoo", size=bigsize), no("bigbar", size=bigsize),
|
||||
no("smallfoo", size=smallsize), no("smallbar", size=smallsize)]
|
||||
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
|
||||
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
|
||||
f[2].md5 = f[2].md5partial = "bleh"
|
||||
f[3].md5 = f[3].md5partial = "bleh"
|
||||
r = getmatches_by_contents(f, bigsize=bigsize)
|
||||
eq_(len(r), 2)
|
||||
# User disabled optimization for big files, compute hashes as usual
|
||||
r = getmatches_by_contents(f, bigsize=0)
|
||||
eq_(len(r), 2)
|
||||
# Other file is now slightly different, md5partial is still the same
|
||||
f[1].md5 = f[1].md5samples = "foobardiff"
|
||||
r = getmatches_by_contents(f, bigsize=bigsize)
|
||||
# Successfully filter it out
|
||||
eq_(len(r), 1)
|
||||
r = getmatches_by_contents(f, bigsize=0)
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
class TestCaseGroup:
|
||||
def test_empy(self):
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import hashlib
|
||||
from os import urandom
|
||||
|
||||
from hscommon.path import Path
|
||||
from hscommon.testutil import eq_
|
||||
@@ -15,6 +16,36 @@ from core.tests.directories_test import create_fake_fs
|
||||
from .. import fs
|
||||
|
||||
|
||||
def create_fake_fs_with_random_data(rootpath):
|
||||
rootpath = rootpath["fs"]
|
||||
rootpath.mkdir()
|
||||
rootpath["dir1"].mkdir()
|
||||
rootpath["dir2"].mkdir()
|
||||
rootpath["dir3"].mkdir()
|
||||
fp = rootpath["file1.test"].open("wb")
|
||||
data1 = urandom(200 * 1024) # 200KiB
|
||||
data2 = urandom(1024 * 1024) # 1MiB
|
||||
data3 = urandom(10 * 1024 * 1024) # 10MiB
|
||||
fp.write(data1)
|
||||
fp.close()
|
||||
fp = rootpath["file2.test"].open("wb")
|
||||
fp.write(data2)
|
||||
fp.close()
|
||||
fp = rootpath["file3.test"].open("wb")
|
||||
fp.write(data3)
|
||||
fp.close()
|
||||
fp = rootpath["dir1"]["file1.test"].open("wb")
|
||||
fp.write(data1)
|
||||
fp.close()
|
||||
fp = rootpath["dir2"]["file2.test"].open("wb")
|
||||
fp.write(data2)
|
||||
fp.close()
|
||||
fp = rootpath["dir3"]["file3.test"].open("wb")
|
||||
fp.write(data3)
|
||||
fp.close()
|
||||
return rootpath
|
||||
|
||||
|
||||
def test_size_aggregates_subfiles(tmpdir):
|
||||
p = create_fake_fs(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
@@ -25,7 +56,7 @@ def test_md5_aggregate_subfiles_sorted(tmpdir):
|
||||
# dir.allfiles can return child in any order. Thus, bundle.md5 must aggregate
|
||||
# all files' md5 it contains, but it must make sure that it does so in the
|
||||
# same order everytime.
|
||||
p = create_fake_fs(Path(str(tmpdir)))
|
||||
p = create_fake_fs_with_random_data(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
md51 = fs.File(p["dir1"]["file1.test"]).md5
|
||||
md52 = fs.File(p["dir2"]["file2.test"]).md5
|
||||
@@ -41,6 +72,36 @@ def test_md5_aggregate_subfiles_sorted(tmpdir):
|
||||
eq_(b.md5, md5.digest())
|
||||
|
||||
|
||||
def test_partial_md5_aggregate_subfile_sorted(tmpdir):
|
||||
p = create_fake_fs_with_random_data(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
md51 = fs.File(p["dir1"]["file1.test"]).md5partial
|
||||
md52 = fs.File(p["dir2"]["file2.test"]).md5partial
|
||||
md53 = fs.File(p["dir3"]["file3.test"]).md5partial
|
||||
md54 = fs.File(p["file1.test"]).md5partial
|
||||
md55 = fs.File(p["file2.test"]).md5partial
|
||||
md56 = fs.File(p["file3.test"]).md5partial
|
||||
# The expected md5 is the md5 of md5s for folders and the direct md5 for files
|
||||
folder_md51 = hashlib.md5(md51).digest()
|
||||
folder_md52 = hashlib.md5(md52).digest()
|
||||
folder_md53 = hashlib.md5(md53).digest()
|
||||
md5 = hashlib.md5(folder_md51 + folder_md52 + folder_md53 + md54 + md55 + md56)
|
||||
eq_(b.md5partial, md5.digest())
|
||||
|
||||
md51 = fs.File(p["dir1"]["file1.test"]).md5samples
|
||||
md52 = fs.File(p["dir2"]["file2.test"]).md5samples
|
||||
md53 = fs.File(p["dir3"]["file3.test"]).md5samples
|
||||
md54 = fs.File(p["file1.test"]).md5samples
|
||||
md55 = fs.File(p["file2.test"]).md5samples
|
||||
md56 = fs.File(p["file3.test"]).md5samples
|
||||
# The expected md5 is the md5 of md5s for folders and the direct md5 for files
|
||||
folder_md51 = hashlib.md5(md51).digest()
|
||||
folder_md52 = hashlib.md5(md52).digest()
|
||||
folder_md53 = hashlib.md5(md53).digest()
|
||||
md5 = hashlib.md5(folder_md51 + folder_md52 + folder_md53 + md54 + md55 + md56)
|
||||
eq_(b.md5samples, md5.digest())
|
||||
|
||||
|
||||
def test_has_file_attrs(tmpdir):
|
||||
# a Folder must behave like a file, so it must have mtime attributes
|
||||
b = fs.Folder(Path(str(tmpdir)))
|
||||
|
||||
@@ -56,6 +56,7 @@ def test_default_settings(fake_fileexists):
|
||||
eq_(s.mix_file_kind, True)
|
||||
eq_(s.word_weighting, False)
|
||||
eq_(s.match_similar_words, False)
|
||||
eq_(s.big_file_size_threshold, 0)
|
||||
|
||||
|
||||
def test_simple_with_default_settings(fake_fileexists):
|
||||
@@ -120,9 +121,9 @@ def test_content_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [no("foo"), no("bar"), no("bleh")]
|
||||
f[0].md5 = f[0].md5partial = "foobar"
|
||||
f[1].md5 = f[1].md5partial = "foobar"
|
||||
f[2].md5 = f[2].md5partial = "bleh"
|
||||
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
|
||||
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
|
||||
f[2].md5 = f[2].md5partial = f[1].md5samples = "bleh"
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
eq_(len(r[0]), 2)
|
||||
@@ -141,13 +142,43 @@ def test_content_scan_compare_sizes_first(fake_fileexists):
|
||||
eq_(len(s.get_dupe_groups(f)), 0)
|
||||
|
||||
|
||||
def test_big_file_partial_hashes(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
|
||||
smallsize = 1
|
||||
bigsize = 100 * 1024 * 1024 # 100MB
|
||||
s.big_file_size_threshold = bigsize
|
||||
|
||||
f = [no("bigfoo", bigsize), no("bigbar", bigsize),
|
||||
no("smallfoo", smallsize), no("smallbar", smallsize)]
|
||||
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
|
||||
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
|
||||
f[2].md5 = f[2].md5partial = "bleh"
|
||||
f[3].md5 = f[3].md5partial = "bleh"
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 2)
|
||||
|
||||
# md5partial is still the same, but the file is actually different
|
||||
f[1].md5 = f[1].md5samples = "difffoobar"
|
||||
# here we compare the full md5s, as the user disabled the optimization
|
||||
s.big_file_size_threshold = 0
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
|
||||
# here we should compare the md5samples, and see they are different
|
||||
s.big_file_size_threshold = bigsize
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [no("foo"), no("bar"), no("bleh")]
|
||||
f[0].md5 = f[0].md5partial = "foobar"
|
||||
f[1].md5 = f[1].md5partial = "foobar"
|
||||
f[2].md5 = f[2].md5partial = "bleh"
|
||||
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
|
||||
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
|
||||
f[2].md5 = f[2].md5partial = f[2].md5samples = "bleh"
|
||||
s.min_match_percentage = 101
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
@@ -162,13 +193,12 @@ def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [no("foo"), no("bar")]
|
||||
f[0].md5 = f[
|
||||
0
|
||||
].md5partial = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
f[1].md5 = f[
|
||||
1
|
||||
].md5partial = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
f[0].md5 = f[0].md5partial = f[0].md5samples =\
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
f[1].md5 = f[1].md5partial = f[1].md5samples =\
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
r = s.get_dupe_groups(f)
|
||||
# FIXME looks like we are missing something here?
|
||||
r[0]
|
||||
|
||||
|
||||
@@ -514,21 +544,21 @@ def test_folder_scan_exclude_subfolder_matches(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Folders
|
||||
topf1 = no("top folder 1", size=42)
|
||||
topf1.md5 = topf1.md5partial = b"some_md5_1"
|
||||
topf1.md5 = topf1.md5partial = topf1.md5samples = b"some_md5_1"
|
||||
topf1.path = Path("/topf1")
|
||||
topf2 = no("top folder 2", size=42)
|
||||
topf2.md5 = topf2.md5partial = b"some_md5_1"
|
||||
topf2.md5 = topf2.md5partial = topf2.md5samples = b"some_md5_1"
|
||||
topf2.path = Path("/topf2")
|
||||
subf1 = no("sub folder 1", size=41)
|
||||
subf1.md5 = subf1.md5partial = b"some_md5_2"
|
||||
subf1.md5 = subf1.md5partial = subf1.md5samples = b"some_md5_2"
|
||||
subf1.path = Path("/topf1/sub")
|
||||
subf2 = no("sub folder 2", size=41)
|
||||
subf2.md5 = subf2.md5partial = b"some_md5_2"
|
||||
subf2.md5 = subf2.md5partial = subf2.md5samples = b"some_md5_2"
|
||||
subf2.path = Path("/topf2/sub")
|
||||
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2])), 1) # only top folders
|
||||
# however, if another folder matches a subfolder, keep in in the matches
|
||||
otherf = no("other folder", size=41)
|
||||
otherf.md5 = otherf.md5partial = b"some_md5_2"
|
||||
otherf.md5 = otherf.md5partial = otherf.md5samples = b"some_md5_2"
|
||||
otherf.path = Path("/otherfolder")
|
||||
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2, otherf])), 2)
|
||||
|
||||
@@ -551,9 +581,9 @@ def test_dont_count_ref_files_as_discarded(fake_fileexists):
|
||||
o1 = no("foo", path="p1")
|
||||
o2 = no("foo", path="p2")
|
||||
o3 = no("foo", path="p3")
|
||||
o1.md5 = o1.md5partial = "foobar"
|
||||
o2.md5 = o2.md5partial = "foobar"
|
||||
o3.md5 = o3.md5partial = "foobar"
|
||||
o1.md5 = o1.md5partial = o1.md5samples = "foobar"
|
||||
o2.md5 = o2.md5partial = o2.md5samples = "foobar"
|
||||
o3.md5 = o3.md5partial = o3.md5samples = "foobar"
|
||||
o1.is_ref = True
|
||||
o2.is_ref = True
|
||||
eq_(len(s.get_dupe_groups([o1, o2, o3])), 1)
|
||||
|
||||
Reference in New Issue
Block a user