1
0
mirror of https://github.com/arsenetar/dupeguru.git synced 2026-01-22 14:41:39 +00:00

Squashed commit of the following:

commit 5eb515f666bfa1ff06c2e96bdc351a4b7456580e
Author: Andrew Senetar <arsenetar@gmail.com>
Date:   Sun Mar 27 22:19:39 2022 -0500

    Add fallback to md5 if xxhash not available

    Mainly here for the case when distributions have not packaged python3-xxhash.

commit 51b18d4c84
Author: Andrew Senetar <arsenetar@gmail.com>
Date:   Sat Mar 19 15:25:46 2022 -0500

    Switch file hashing to xxhash instead of md5

    - Improves performance significantly in some cases
    - Add xxhash to requirements.txt and sort requirements
    - Rename md5 based members to digest
    - Update all tests to use new member names and hashing methods
    - Update hash db code to upgrade schema

    NOTE: May consider supporting multiple hashing algorithms in the future.
This commit is contained in:
2022-03-27 22:27:13 -05:00
parent 86bf9b39d0
commit 9f40e4e786
10 changed files with 212 additions and 180 deletions

View File

@@ -123,19 +123,19 @@ def test_content_scan(fake_fileexists):
s = Scanner()
s.scan_type = ScanType.CONTENTS
f = [no("foo"), no("bar"), no("bleh")]
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
f[2].md5 = f[2].md5partial = f[1].md5samples = "bleh"
f[0].digest = f[0].digest_partial = f[0].digest_samples = "foobar"
f[1].digest = f[1].digest_partial = f[1].digest_samples = "foobar"
f[2].digest = f[2].digest_partial = f[1].digest_samples = "bleh"
r = s.get_dupe_groups(f)
eq_(len(r), 1)
eq_(len(r[0]), 2)
eq_(s.discarded_file_count, 0) # don't count the different md5 as discarded!
eq_(s.discarded_file_count, 0) # don't count the different digest as discarded!
def test_content_scan_compare_sizes_first(fake_fileexists):
class MyFile(no):
@property
def md5(self):
def digest(self):
raise AssertionError()
s = Scanner()
@@ -161,14 +161,14 @@ def test_ignore_file_size(fake_fileexists):
no("largeignore1", large_size + 1),
no("largeignore2", large_size + 1),
]
f[0].md5 = f[0].md5partial = f[0].md5samples = "smallignore"
f[1].md5 = f[1].md5partial = f[1].md5samples = "smallignore"
f[2].md5 = f[2].md5partial = f[2].md5samples = "small"
f[3].md5 = f[3].md5partial = f[3].md5samples = "small"
f[4].md5 = f[4].md5partial = f[4].md5samples = "large"
f[5].md5 = f[5].md5partial = f[5].md5samples = "large"
f[6].md5 = f[6].md5partial = f[6].md5samples = "largeignore"
f[7].md5 = f[7].md5partial = f[7].md5samples = "largeignore"
f[0].digest = f[0].digest_partial = f[0].digest_samples = "smallignore"
f[1].digest = f[1].digest_partial = f[1].digest_samples = "smallignore"
f[2].digest = f[2].digest_partial = f[2].digest_samples = "small"
f[3].digest = f[3].digest_partial = f[3].digest_samples = "small"
f[4].digest = f[4].digest_partial = f[4].digest_samples = "large"
f[5].digest = f[5].digest_partial = f[5].digest_samples = "large"
f[6].digest = f[6].digest_partial = f[6].digest_samples = "largeignore"
f[7].digest = f[7].digest_partial = f[7].digest_samples = "largeignore"
r = s.get_dupe_groups(f)
# No ignores
@@ -197,21 +197,21 @@ def test_big_file_partial_hashes(fake_fileexists):
s.big_file_size_threshold = bigsize
f = [no("bigfoo", bigsize), no("bigbar", bigsize), no("smallfoo", smallsize), no("smallbar", smallsize)]
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
f[2].md5 = f[2].md5partial = "bleh"
f[3].md5 = f[3].md5partial = "bleh"
f[0].digest = f[0].digest_partial = f[0].digest_samples = "foobar"
f[1].digest = f[1].digest_partial = f[1].digest_samples = "foobar"
f[2].digest = f[2].digest_partial = "bleh"
f[3].digest = f[3].digest_partial = "bleh"
r = s.get_dupe_groups(f)
eq_(len(r), 2)
# md5partial is still the same, but the file is actually different
f[1].md5 = f[1].md5samples = "difffoobar"
# here we compare the full md5s, as the user disabled the optimization
# digest_partial is still the same, but the file is actually different
f[1].digest = f[1].digest_samples = "difffoobar"
# here we compare the full digests, as the user disabled the optimization
s.big_file_size_threshold = 0
r = s.get_dupe_groups(f)
eq_(len(r), 1)
# here we should compare the md5samples, and see they are different
# here we should compare the digest_samples, and see they are different
s.big_file_size_threshold = bigsize
r = s.get_dupe_groups(f)
eq_(len(r), 1)
@@ -221,9 +221,9 @@ def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
s = Scanner()
s.scan_type = ScanType.CONTENTS
f = [no("foo"), no("bar"), no("bleh")]
f[0].md5 = f[0].md5partial = f[0].md5samples = "foobar"
f[1].md5 = f[1].md5partial = f[1].md5samples = "foobar"
f[2].md5 = f[2].md5partial = f[2].md5samples = "bleh"
f[0].digest = f[0].digest_partial = f[0].digest_samples = "foobar"
f[1].digest = f[1].digest_partial = f[1].digest_samples = "foobar"
f[2].digest = f[2].digest_partial = f[2].digest_samples = "bleh"
s.min_match_percentage = 101
r = s.get_dupe_groups(f)
eq_(len(r), 1)
@@ -234,12 +234,16 @@ def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
eq_(len(r[0]), 2)
def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
def test_content_scan_doesnt_put_digest_in_words_at_the_end(fake_fileexists):
s = Scanner()
s.scan_type = ScanType.CONTENTS
f = [no("foo"), no("bar")]
f[0].md5 = f[0].md5partial = f[0].md5samples = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
f[1].md5 = f[1].md5partial = f[1].md5samples = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
f[0].digest = f[0].digest_partial = f[
0
].digest_samples = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
f[1].digest = f[1].digest_partial = f[
1
].digest_samples = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
r = s.get_dupe_groups(f)
# FIXME looks like we are missing something here?
r[0]
@@ -587,21 +591,21 @@ def test_folder_scan_exclude_subfolder_matches(fake_fileexists):
s = Scanner()
s.scan_type = ScanType.FOLDERS
topf1 = no("top folder 1", size=42)
topf1.md5 = topf1.md5partial = topf1.md5samples = b"some_md5_1"
topf1.digest = topf1.digest_partial = topf1.digest_samples = b"some_digest__1"
topf1.path = Path("/topf1")
topf2 = no("top folder 2", size=42)
topf2.md5 = topf2.md5partial = topf2.md5samples = b"some_md5_1"
topf2.digest = topf2.digest_partial = topf2.digest_samples = b"some_digest__1"
topf2.path = Path("/topf2")
subf1 = no("sub folder 1", size=41)
subf1.md5 = subf1.md5partial = subf1.md5samples = b"some_md5_2"
subf1.digest = subf1.digest_partial = subf1.digest_samples = b"some_digest__2"
subf1.path = Path("/topf1/sub")
subf2 = no("sub folder 2", size=41)
subf2.md5 = subf2.md5partial = subf2.md5samples = b"some_md5_2"
subf2.digest = subf2.digest_partial = subf2.digest_samples = b"some_digest__2"
subf2.path = Path("/topf2/sub")
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2])), 1) # only top folders
# however, if another folder matches a subfolder, keep in in the matches
otherf = no("other folder", size=41)
otherf.md5 = otherf.md5partial = otherf.md5samples = b"some_md5_2"
otherf.digest = otherf.digest_partial = otherf.digest_samples = b"some_digest__2"
otherf.path = Path("/otherfolder")
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2, otherf])), 2)
@@ -624,9 +628,9 @@ def test_dont_count_ref_files_as_discarded(fake_fileexists):
o1 = no("foo", path="p1")
o2 = no("foo", path="p2")
o3 = no("foo", path="p3")
o1.md5 = o1.md5partial = o1.md5samples = "foobar"
o2.md5 = o2.md5partial = o2.md5samples = "foobar"
o3.md5 = o3.md5partial = o3.md5samples = "foobar"
o1.digest = o1.digest_partial = o1.digest_samples = "foobar"
o2.digest = o2.digest_partial = o2.digest_samples = "foobar"
o3.digest = o3.digest_partial = o3.digest_samples = "foobar"
o1.is_ref = True
o2.is_ref = True
eq_(len(s.get_dupe_groups([o1, o2, o3])), 1)