mirror of
https://github.com/arsenetar/dupeguru.git
synced 2026-01-22 14:41:39 +00:00
Squashed commit of the following:
commit 8b15fe9a502ebf4841c6529e7098cef03a6a5e6f Author: Andrew Senetar <arsenetar@gmail.com> Date: Sun Mar 27 23:48:15 2022 -0500 Finish up changes to copy_or_move commit 21f6a32cf3186a400af8f30e67ad2743dc9a49bd Author: Andrew Senetar <arsenetar@gmail.com> Date: Thu Mar 17 23:56:52 2022 -0500 Migrate from hscommon.path to pathlib - Part one, this gets all hscommon and core tests passing - App appears to be able to load directories and complete scans, need further testing - app.py copy_or_move needs some additional work
This commit is contained in:
@@ -17,7 +17,7 @@ except ImportError:
|
||||
|
||||
from os import urandom
|
||||
|
||||
from hscommon.path import Path
|
||||
from pathlib import Path
|
||||
from hscommon.testutil import eq_
|
||||
from core.tests.directories_test import create_fake_fs
|
||||
|
||||
@@ -25,32 +25,26 @@ from .. import fs
|
||||
|
||||
|
||||
def create_fake_fs_with_random_data(rootpath):
|
||||
rootpath = rootpath["fs"]
|
||||
rootpath = rootpath.joinpath("fs")
|
||||
rootpath.mkdir()
|
||||
rootpath["dir1"].mkdir()
|
||||
rootpath["dir2"].mkdir()
|
||||
rootpath["dir3"].mkdir()
|
||||
fp = rootpath["file1.test"].open("wb")
|
||||
rootpath.joinpath("dir1").mkdir()
|
||||
rootpath.joinpath("dir2").mkdir()
|
||||
rootpath.joinpath("dir3").mkdir()
|
||||
data1 = urandom(200 * 1024) # 200KiB
|
||||
data2 = urandom(1024 * 1024) # 1MiB
|
||||
data3 = urandom(10 * 1024 * 1024) # 10MiB
|
||||
fp.write(data1)
|
||||
fp.close()
|
||||
fp = rootpath["file2.test"].open("wb")
|
||||
fp.write(data2)
|
||||
fp.close()
|
||||
fp = rootpath["file3.test"].open("wb")
|
||||
fp.write(data3)
|
||||
fp.close()
|
||||
fp = rootpath["dir1"]["file1.test"].open("wb")
|
||||
fp.write(data1)
|
||||
fp.close()
|
||||
fp = rootpath["dir2"]["file2.test"].open("wb")
|
||||
fp.write(data2)
|
||||
fp.close()
|
||||
fp = rootpath["dir3"]["file3.test"].open("wb")
|
||||
fp.write(data3)
|
||||
fp.close()
|
||||
with rootpath.joinpath("file1.test").open("wb") as fp:
|
||||
fp.write(data1)
|
||||
with rootpath.joinpath("file2.test").open("wb") as fp:
|
||||
fp.write(data2)
|
||||
with rootpath.joinpath("file3.test").open("wb") as fp:
|
||||
fp.write(data3)
|
||||
with rootpath.joinpath("dir1", "file1.test").open("wb") as fp:
|
||||
fp.write(data1)
|
||||
with rootpath.joinpath("dir2", "file2.test").open("wb") as fp:
|
||||
fp.write(data2)
|
||||
with rootpath.joinpath("dir3", "file3.test").open("wb") as fp:
|
||||
fp.write(data3)
|
||||
return rootpath
|
||||
|
||||
|
||||
@@ -66,12 +60,12 @@ def test_digest_aggregate_subfiles_sorted(tmpdir):
|
||||
# same order everytime.
|
||||
p = create_fake_fs_with_random_data(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
digest1 = fs.File(p["dir1"]["file1.test"]).digest
|
||||
digest2 = fs.File(p["dir2"]["file2.test"]).digest
|
||||
digest3 = fs.File(p["dir3"]["file3.test"]).digest
|
||||
digest4 = fs.File(p["file1.test"]).digest
|
||||
digest5 = fs.File(p["file2.test"]).digest
|
||||
digest6 = fs.File(p["file3.test"]).digest
|
||||
digest1 = fs.File(p.joinpath("dir1", "file1.test")).digest
|
||||
digest2 = fs.File(p.joinpath("dir2", "file2.test")).digest
|
||||
digest3 = fs.File(p.joinpath("dir3", "file3.test")).digest
|
||||
digest4 = fs.File(p.joinpath("file1.test")).digest
|
||||
digest5 = fs.File(p.joinpath("file2.test")).digest
|
||||
digest6 = fs.File(p.joinpath("file3.test")).digest
|
||||
# The expected digest is the hash of digests for folders and the direct digest for files
|
||||
folder_digest1 = hasher(digest1).digest()
|
||||
folder_digest2 = hasher(digest2).digest()
|
||||
@@ -83,12 +77,12 @@ def test_digest_aggregate_subfiles_sorted(tmpdir):
|
||||
def test_partial_digest_aggregate_subfile_sorted(tmpdir):
|
||||
p = create_fake_fs_with_random_data(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
digest1 = fs.File(p["dir1"]["file1.test"]).digest_partial
|
||||
digest2 = fs.File(p["dir2"]["file2.test"]).digest_partial
|
||||
digest3 = fs.File(p["dir3"]["file3.test"]).digest_partial
|
||||
digest4 = fs.File(p["file1.test"]).digest_partial
|
||||
digest5 = fs.File(p["file2.test"]).digest_partial
|
||||
digest6 = fs.File(p["file3.test"]).digest_partial
|
||||
digest1 = fs.File(p.joinpath("dir1", "file1.test")).digest_partial
|
||||
digest2 = fs.File(p.joinpath("dir2", "file2.test")).digest_partial
|
||||
digest3 = fs.File(p.joinpath("dir3", "file3.test")).digest_partial
|
||||
digest4 = fs.File(p.joinpath("file1.test")).digest_partial
|
||||
digest5 = fs.File(p.joinpath("file2.test")).digest_partial
|
||||
digest6 = fs.File(p.joinpath("file3.test")).digest_partial
|
||||
# The expected digest is the hash of digests for folders and the direct digest for files
|
||||
folder_digest1 = hasher(digest1).digest()
|
||||
folder_digest2 = hasher(digest2).digest()
|
||||
@@ -96,12 +90,12 @@ def test_partial_digest_aggregate_subfile_sorted(tmpdir):
|
||||
digest = hasher(folder_digest1 + folder_digest2 + folder_digest3 + digest4 + digest5 + digest6).digest()
|
||||
eq_(b.digest_partial, digest)
|
||||
|
||||
digest1 = fs.File(p["dir1"]["file1.test"]).digest_samples
|
||||
digest2 = fs.File(p["dir2"]["file2.test"]).digest_samples
|
||||
digest3 = fs.File(p["dir3"]["file3.test"]).digest_samples
|
||||
digest4 = fs.File(p["file1.test"]).digest_samples
|
||||
digest5 = fs.File(p["file2.test"]).digest_samples
|
||||
digest6 = fs.File(p["file3.test"]).digest_samples
|
||||
digest1 = fs.File(p.joinpath("dir1", "file1.test")).digest_samples
|
||||
digest2 = fs.File(p.joinpath("dir2", "file2.test")).digest_samples
|
||||
digest3 = fs.File(p.joinpath("dir3", "file3.test")).digest_samples
|
||||
digest4 = fs.File(p.joinpath("file1.test")).digest_samples
|
||||
digest5 = fs.File(p.joinpath("file2.test")).digest_samples
|
||||
digest6 = fs.File(p.joinpath("file3.test")).digest_samples
|
||||
# The expected digest is the digest of digests for folders and the direct digest for files
|
||||
folder_digest1 = hasher(digest1).digest()
|
||||
folder_digest2 = hasher(digest2).digest()
|
||||
|
||||
Reference in New Issue
Block a user