mirror of
https://github.com/arsenetar/dupeguru.git
synced 2024-11-19 05:19:03 +00:00
Format files with black
- Format all files with black - Update tox.ini flake8 arguments to be compatible - Add black to requirements-extra.txt - Reduce ignored flake8 rules and fix a few violations
This commit is contained in:
parent
359d6498f7
commit
7ba8aa3514
143
build.py
143
build.py
@ -13,129 +13,165 @@ from setuptools import setup, Extension
|
||||
|
||||
from hscommon import sphinxgen
|
||||
from hscommon.build import (
|
||||
add_to_pythonpath, print_and_do, move_all, fix_qt_resource_file,
|
||||
add_to_pythonpath,
|
||||
print_and_do,
|
||||
move_all,
|
||||
fix_qt_resource_file,
|
||||
)
|
||||
from hscommon import loc
|
||||
|
||||
|
||||
def parse_args():
|
||||
usage = "usage: %prog [options]"
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.add_option(
|
||||
'--clean', action='store_true', dest='clean',
|
||||
help="Clean build folder before building"
|
||||
"--clean",
|
||||
action="store_true",
|
||||
dest="clean",
|
||||
help="Clean build folder before building",
|
||||
)
|
||||
parser.add_option(
|
||||
'--doc', action='store_true', dest='doc',
|
||||
help="Build only the help file"
|
||||
"--doc", action="store_true", dest="doc", help="Build only the help file"
|
||||
)
|
||||
parser.add_option(
|
||||
'--loc', action='store_true', dest='loc',
|
||||
help="Build only localization"
|
||||
"--loc", action="store_true", dest="loc", help="Build only localization"
|
||||
)
|
||||
parser.add_option(
|
||||
'--updatepot', action='store_true', dest='updatepot',
|
||||
help="Generate .pot files from source code."
|
||||
"--updatepot",
|
||||
action="store_true",
|
||||
dest="updatepot",
|
||||
help="Generate .pot files from source code.",
|
||||
)
|
||||
parser.add_option(
|
||||
'--mergepot', action='store_true', dest='mergepot',
|
||||
help="Update all .po files based on .pot files."
|
||||
"--mergepot",
|
||||
action="store_true",
|
||||
dest="mergepot",
|
||||
help="Update all .po files based on .pot files.",
|
||||
)
|
||||
parser.add_option(
|
||||
'--normpo', action='store_true', dest='normpo',
|
||||
help="Normalize all PO files (do this before commit)."
|
||||
"--normpo",
|
||||
action="store_true",
|
||||
dest="normpo",
|
||||
help="Normalize all PO files (do this before commit).",
|
||||
)
|
||||
(options, args) = parser.parse_args()
|
||||
return options
|
||||
|
||||
|
||||
def build_help():
|
||||
print("Generating Help")
|
||||
current_path = op.abspath('.')
|
||||
help_basepath = op.join(current_path, 'help', 'en')
|
||||
help_destpath = op.join(current_path, 'build', 'help')
|
||||
changelog_path = op.join(current_path, 'help', 'changelog')
|
||||
current_path = op.abspath(".")
|
||||
help_basepath = op.join(current_path, "help", "en")
|
||||
help_destpath = op.join(current_path, "build", "help")
|
||||
changelog_path = op.join(current_path, "help", "changelog")
|
||||
tixurl = "https://github.com/hsoft/dupeguru/issues/{}"
|
||||
confrepl = {'language': 'en'}
|
||||
changelogtmpl = op.join(current_path, 'help', 'changelog.tmpl')
|
||||
conftmpl = op.join(current_path, 'help', 'conf.tmpl')
|
||||
sphinxgen.gen(help_basepath, help_destpath, changelog_path, tixurl, confrepl, conftmpl, changelogtmpl)
|
||||
confrepl = {"language": "en"}
|
||||
changelogtmpl = op.join(current_path, "help", "changelog.tmpl")
|
||||
conftmpl = op.join(current_path, "help", "conf.tmpl")
|
||||
sphinxgen.gen(
|
||||
help_basepath,
|
||||
help_destpath,
|
||||
changelog_path,
|
||||
tixurl,
|
||||
confrepl,
|
||||
conftmpl,
|
||||
changelogtmpl,
|
||||
)
|
||||
|
||||
|
||||
def build_qt_localizations():
|
||||
loc.compile_all_po(op.join('qtlib', 'locale'))
|
||||
loc.merge_locale_dir(op.join('qtlib', 'locale'), 'locale')
|
||||
loc.compile_all_po(op.join("qtlib", "locale"))
|
||||
loc.merge_locale_dir(op.join("qtlib", "locale"), "locale")
|
||||
|
||||
|
||||
def build_localizations():
|
||||
loc.compile_all_po('locale')
|
||||
loc.compile_all_po("locale")
|
||||
build_qt_localizations()
|
||||
locale_dest = op.join('build', 'locale')
|
||||
locale_dest = op.join("build", "locale")
|
||||
if op.exists(locale_dest):
|
||||
shutil.rmtree(locale_dest)
|
||||
shutil.copytree('locale', locale_dest, ignore=shutil.ignore_patterns('*.po', '*.pot'))
|
||||
shutil.copytree(
|
||||
"locale", locale_dest, ignore=shutil.ignore_patterns("*.po", "*.pot")
|
||||
)
|
||||
|
||||
|
||||
def build_updatepot():
|
||||
print("Building .pot files from source files")
|
||||
print("Building core.pot")
|
||||
loc.generate_pot(['core'], op.join('locale', 'core.pot'), ['tr'])
|
||||
loc.generate_pot(["core"], op.join("locale", "core.pot"), ["tr"])
|
||||
print("Building columns.pot")
|
||||
loc.generate_pot(['core'], op.join('locale', 'columns.pot'), ['coltr'])
|
||||
loc.generate_pot(["core"], op.join("locale", "columns.pot"), ["coltr"])
|
||||
print("Building ui.pot")
|
||||
# When we're not under OS X, we don't want to overwrite ui.pot because it contains Cocoa locs
|
||||
# We want to merge the generated pot with the old pot in the most preserving way possible.
|
||||
ui_packages = ['qt', op.join('cocoa', 'inter')]
|
||||
loc.generate_pot(ui_packages, op.join('locale', 'ui.pot'), ['tr'], merge=True)
|
||||
ui_packages = ["qt", op.join("cocoa", "inter")]
|
||||
loc.generate_pot(ui_packages, op.join("locale", "ui.pot"), ["tr"], merge=True)
|
||||
print("Building qtlib.pot")
|
||||
loc.generate_pot(['qtlib'], op.join('qtlib', 'locale', 'qtlib.pot'), ['tr'])
|
||||
loc.generate_pot(["qtlib"], op.join("qtlib", "locale", "qtlib.pot"), ["tr"])
|
||||
|
||||
|
||||
def build_mergepot():
|
||||
print("Updating .po files using .pot files")
|
||||
loc.merge_pots_into_pos('locale')
|
||||
loc.merge_pots_into_pos(op.join('qtlib', 'locale'))
|
||||
loc.merge_pots_into_pos(op.join('cocoalib', 'locale'))
|
||||
loc.merge_pots_into_pos("locale")
|
||||
loc.merge_pots_into_pos(op.join("qtlib", "locale"))
|
||||
loc.merge_pots_into_pos(op.join("cocoalib", "locale"))
|
||||
|
||||
|
||||
def build_normpo():
|
||||
loc.normalize_all_pos('locale')
|
||||
loc.normalize_all_pos(op.join('qtlib', 'locale'))
|
||||
loc.normalize_all_pos(op.join('cocoalib', 'locale'))
|
||||
loc.normalize_all_pos("locale")
|
||||
loc.normalize_all_pos(op.join("qtlib", "locale"))
|
||||
loc.normalize_all_pos(op.join("cocoalib", "locale"))
|
||||
|
||||
|
||||
def build_pe_modules():
|
||||
print("Building PE Modules")
|
||||
exts = [
|
||||
Extension(
|
||||
"_block",
|
||||
[op.join('core', 'pe', 'modules', 'block.c'), op.join('core', 'pe', 'modules', 'common.c')]
|
||||
[
|
||||
op.join("core", "pe", "modules", "block.c"),
|
||||
op.join("core", "pe", "modules", "common.c"),
|
||||
],
|
||||
),
|
||||
Extension(
|
||||
"_cache",
|
||||
[op.join('core', 'pe', 'modules', 'cache.c'), op.join('core', 'pe', 'modules', 'common.c')]
|
||||
[
|
||||
op.join("core", "pe", "modules", "cache.c"),
|
||||
op.join("core", "pe", "modules", "common.c"),
|
||||
],
|
||||
),
|
||||
]
|
||||
exts.append(Extension("_block_qt", [op.join('qt', 'pe', 'modules', 'block.c')]))
|
||||
exts.append(Extension("_block_qt", [op.join("qt", "pe", "modules", "block.c")]))
|
||||
setup(
|
||||
script_args=['build_ext', '--inplace'],
|
||||
ext_modules=exts,
|
||||
script_args=["build_ext", "--inplace"], ext_modules=exts,
|
||||
)
|
||||
move_all('_block_qt*', op.join('qt', 'pe'))
|
||||
move_all('_block*', op.join('core', 'pe'))
|
||||
move_all('_cache*', op.join('core', 'pe'))
|
||||
move_all("_block_qt*", op.join("qt", "pe"))
|
||||
move_all("_block*", op.join("core", "pe"))
|
||||
move_all("_cache*", op.join("core", "pe"))
|
||||
|
||||
|
||||
def build_normal():
|
||||
print("Building dupeGuru with UI qt")
|
||||
add_to_pythonpath('.')
|
||||
add_to_pythonpath(".")
|
||||
print("Building dupeGuru")
|
||||
build_pe_modules()
|
||||
print("Building localizations")
|
||||
build_localizations()
|
||||
print("Building Qt stuff")
|
||||
print_and_do("pyrcc5 {0} > {1}".format(op.join('qt', 'dg.qrc'), op.join('qt', 'dg_rc.py')))
|
||||
fix_qt_resource_file(op.join('qt', 'dg_rc.py'))
|
||||
print_and_do(
|
||||
"pyrcc5 {0} > {1}".format(op.join("qt", "dg.qrc"), op.join("qt", "dg_rc.py"))
|
||||
)
|
||||
fix_qt_resource_file(op.join("qt", "dg_rc.py"))
|
||||
build_help()
|
||||
|
||||
|
||||
def main():
|
||||
options = parse_args()
|
||||
if options.clean:
|
||||
if op.exists('build'):
|
||||
shutil.rmtree('build')
|
||||
if not op.exists('build'):
|
||||
os.mkdir('build')
|
||||
if op.exists("build"):
|
||||
shutil.rmtree("build")
|
||||
if not op.exists("build"):
|
||||
os.mkdir("build")
|
||||
if options.doc:
|
||||
build_help()
|
||||
elif options.loc:
|
||||
@ -149,5 +185,6 @@ def main():
|
||||
else:
|
||||
build_normal()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,3 +1,2 @@
|
||||
__version__ = '4.0.4'
|
||||
__appname__ = 'dupeGuru'
|
||||
|
||||
__version__ = "4.0.4"
|
||||
__appname__ = "dupeGuru"
|
||||
|
227
core/app.py
227
core/app.py
@ -34,8 +34,8 @@ from .gui.ignore_list_dialog import IgnoreListDialog
|
||||
from .gui.problem_dialog import ProblemDialog
|
||||
from .gui.stats_label import StatsLabel
|
||||
|
||||
HAD_FIRST_LAUNCH_PREFERENCE = 'HadFirstLaunch'
|
||||
DEBUG_MODE_PREFERENCE = 'DebugMode'
|
||||
HAD_FIRST_LAUNCH_PREFERENCE = "HadFirstLaunch"
|
||||
DEBUG_MODE_PREFERENCE = "DebugMode"
|
||||
|
||||
MSG_NO_MARKED_DUPES = tr("There are no marked duplicates. Nothing has been done.")
|
||||
MSG_NO_SELECTED_DUPES = tr("There are no selected duplicates. Nothing has been done.")
|
||||
@ -44,23 +44,27 @@ MSG_MANY_FILES_TO_OPEN = tr(
|
||||
"files are opened with, doing so can create quite a mess. Continue?"
|
||||
)
|
||||
|
||||
|
||||
class DestType:
|
||||
Direct = 0
|
||||
Relative = 1
|
||||
Absolute = 2
|
||||
|
||||
|
||||
class JobType:
|
||||
Scan = 'job_scan'
|
||||
Load = 'job_load'
|
||||
Move = 'job_move'
|
||||
Copy = 'job_copy'
|
||||
Delete = 'job_delete'
|
||||
Scan = "job_scan"
|
||||
Load = "job_load"
|
||||
Move = "job_move"
|
||||
Copy = "job_copy"
|
||||
Delete = "job_delete"
|
||||
|
||||
|
||||
class AppMode:
|
||||
Standard = 0
|
||||
Music = 1
|
||||
Picture = 2
|
||||
|
||||
|
||||
JOBID2TITLE = {
|
||||
JobType.Scan: tr("Scanning for duplicates"),
|
||||
JobType.Load: tr("Loading"),
|
||||
@ -69,6 +73,7 @@ JOBID2TITLE = {
|
||||
JobType.Delete: tr("Sending to Trash"),
|
||||
}
|
||||
|
||||
|
||||
class DupeGuru(Broadcaster):
|
||||
"""Holds everything together.
|
||||
|
||||
@ -100,7 +105,8 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
Instance of :mod:`meta-gui <core.gui>` table listing the results from :attr:`results`
|
||||
"""
|
||||
#--- View interface
|
||||
|
||||
# --- View interface
|
||||
# get_default(key_name)
|
||||
# set_default(key_name, value)
|
||||
# show_message(msg)
|
||||
@ -116,7 +122,7 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
NAME = PROMPT_NAME = "dupeGuru"
|
||||
|
||||
PICTURE_CACHE_TYPE = 'sqlite' # set to 'shelve' for a ShelveCache
|
||||
PICTURE_CACHE_TYPE = "sqlite" # set to 'shelve' for a ShelveCache
|
||||
|
||||
def __init__(self, view):
|
||||
if view.get_default(DEBUG_MODE_PREFERENCE):
|
||||
@ -124,7 +130,9 @@ class DupeGuru(Broadcaster):
|
||||
logging.debug("Debug mode enabled")
|
||||
Broadcaster.__init__(self)
|
||||
self.view = view
|
||||
self.appdata = desktop.special_folder_path(desktop.SpecialFolder.AppData, appname=self.NAME)
|
||||
self.appdata = desktop.special_folder_path(
|
||||
desktop.SpecialFolder.AppData, appname=self.NAME
|
||||
)
|
||||
if not op.exists(self.appdata):
|
||||
os.makedirs(self.appdata)
|
||||
self.app_mode = AppMode.Standard
|
||||
@ -136,11 +144,11 @@ class DupeGuru(Broadcaster):
|
||||
# sent to the scanner. They don't have default values because those defaults values are
|
||||
# defined in the scanner class.
|
||||
self.options = {
|
||||
'escape_filter_regexp': True,
|
||||
'clean_empty_dirs': False,
|
||||
'ignore_hardlink_matches': False,
|
||||
'copymove_dest_type': DestType.Relative,
|
||||
'picture_cache_type': self.PICTURE_CACHE_TYPE
|
||||
"escape_filter_regexp": True,
|
||||
"clean_empty_dirs": False,
|
||||
"ignore_hardlink_matches": False,
|
||||
"copymove_dest_type": DestType.Relative,
|
||||
"picture_cache_type": self.PICTURE_CACHE_TYPE,
|
||||
}
|
||||
self.selected_dupes = []
|
||||
self.details_panel = DetailsPanel(self)
|
||||
@ -155,7 +163,7 @@ class DupeGuru(Broadcaster):
|
||||
for child in children:
|
||||
child.connect()
|
||||
|
||||
#--- Private
|
||||
# --- Private
|
||||
def _recreate_result_table(self):
|
||||
if self.result_table is not None:
|
||||
self.result_table.disconnect()
|
||||
@ -169,26 +177,30 @@ class DupeGuru(Broadcaster):
|
||||
self.view.create_results_window()
|
||||
|
||||
def _get_picture_cache_path(self):
|
||||
cache_type = self.options['picture_cache_type']
|
||||
cache_name = 'cached_pictures.shelve' if cache_type == 'shelve' else 'cached_pictures.db'
|
||||
cache_type = self.options["picture_cache_type"]
|
||||
cache_name = (
|
||||
"cached_pictures.shelve" if cache_type == "shelve" else "cached_pictures.db"
|
||||
)
|
||||
return op.join(self.appdata, cache_name)
|
||||
|
||||
def _get_dupe_sort_key(self, dupe, get_group, key, delta):
|
||||
if self.app_mode in (AppMode.Music, AppMode.Picture):
|
||||
if key == 'folder_path':
|
||||
dupe_folder_path = getattr(dupe, 'display_folder_path', dupe.folder_path)
|
||||
if key == "folder_path":
|
||||
dupe_folder_path = getattr(
|
||||
dupe, "display_folder_path", dupe.folder_path
|
||||
)
|
||||
return str(dupe_folder_path).lower()
|
||||
if self.app_mode == AppMode.Picture:
|
||||
if delta and key == 'dimensions':
|
||||
if delta and key == "dimensions":
|
||||
r = cmp_value(dupe, key)
|
||||
ref_value = cmp_value(get_group().ref, key)
|
||||
return get_delta_dimensions(r, ref_value)
|
||||
if key == 'marked':
|
||||
if key == "marked":
|
||||
return self.results.is_marked(dupe)
|
||||
if key == 'percentage':
|
||||
if key == "percentage":
|
||||
m = get_group().get_match_of(dupe)
|
||||
return m.percentage
|
||||
elif key == 'dupe_count':
|
||||
elif key == "dupe_count":
|
||||
return 0
|
||||
else:
|
||||
result = cmp_value(dupe, key)
|
||||
@ -203,21 +215,25 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
def _get_group_sort_key(self, group, key):
|
||||
if self.app_mode in (AppMode.Music, AppMode.Picture):
|
||||
if key == 'folder_path':
|
||||
dupe_folder_path = getattr(group.ref, 'display_folder_path', group.ref.folder_path)
|
||||
if key == "folder_path":
|
||||
dupe_folder_path = getattr(
|
||||
group.ref, "display_folder_path", group.ref.folder_path
|
||||
)
|
||||
return str(dupe_folder_path).lower()
|
||||
if key == 'percentage':
|
||||
if key == "percentage":
|
||||
return group.percentage
|
||||
if key == 'dupe_count':
|
||||
if key == "dupe_count":
|
||||
return len(group)
|
||||
if key == 'marked':
|
||||
if key == "marked":
|
||||
return len([dupe for dupe in group.dupes if self.results.is_marked(dupe)])
|
||||
return cmp_value(group.ref, key)
|
||||
|
||||
def _do_delete(self, j, link_deleted, use_hardlinks, direct_deletion):
|
||||
def op(dupe):
|
||||
j.add_progress()
|
||||
return self._do_delete_dupe(dupe, link_deleted, use_hardlinks, direct_deletion)
|
||||
return self._do_delete_dupe(
|
||||
dupe, link_deleted, use_hardlinks, direct_deletion
|
||||
)
|
||||
|
||||
j.start_job(self.results.mark_count)
|
||||
self.results.perform_on_marked(op, True)
|
||||
@ -233,7 +249,7 @@ class DupeGuru(Broadcaster):
|
||||
else:
|
||||
os.remove(str_path)
|
||||
else:
|
||||
send2trash(str_path) # Raises OSError when there's a problem
|
||||
send2trash(str_path) # Raises OSError when there's a problem
|
||||
if link_deleted:
|
||||
group = self.results.get_group_of_duplicate(dupe)
|
||||
ref = group.ref
|
||||
@ -258,8 +274,9 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
def _get_export_data(self):
|
||||
columns = [
|
||||
col for col in self.result_table.columns.ordered_columns
|
||||
if col.visible and col.name != 'marked'
|
||||
col
|
||||
for col in self.result_table.columns.ordered_columns
|
||||
if col.visible and col.name != "marked"
|
||||
]
|
||||
colnames = [col.display for col in columns]
|
||||
rows = []
|
||||
@ -273,10 +290,11 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
def _results_changed(self):
|
||||
self.selected_dupes = [
|
||||
d for d in self.selected_dupes
|
||||
d
|
||||
for d in self.selected_dupes
|
||||
if self.results.get_group_of_duplicate(d) is not None
|
||||
]
|
||||
self.notify('results_changed')
|
||||
self.notify("results_changed")
|
||||
|
||||
def _start_job(self, jobid, func, args=()):
|
||||
title = JOBID2TITLE[jobid]
|
||||
@ -310,7 +328,9 @@ class DupeGuru(Broadcaster):
|
||||
msg = {
|
||||
JobType.Copy: tr("All marked files were copied successfully."),
|
||||
JobType.Move: tr("All marked files were moved successfully."),
|
||||
JobType.Delete: tr("All marked files were successfully sent to Trash."),
|
||||
JobType.Delete: tr(
|
||||
"All marked files were successfully sent to Trash."
|
||||
),
|
||||
}[jobid]
|
||||
self.view.show_message(msg)
|
||||
|
||||
@ -341,9 +361,9 @@ class DupeGuru(Broadcaster):
|
||||
if dupes == self.selected_dupes:
|
||||
return
|
||||
self.selected_dupes = dupes
|
||||
self.notify('dupes_selected')
|
||||
self.notify("dupes_selected")
|
||||
|
||||
#--- Protected
|
||||
# --- Protected
|
||||
def _get_fileclasses(self):
|
||||
if self.app_mode == AppMode.Picture:
|
||||
return [pe.photo.PLAT_SPECIFIC_PHOTO_CLASS]
|
||||
@ -360,7 +380,7 @@ class DupeGuru(Broadcaster):
|
||||
else:
|
||||
return prioritize.all_categories()
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
def add_directory(self, d):
|
||||
"""Adds folder ``d`` to :attr:`directories`.
|
||||
|
||||
@ -370,7 +390,7 @@ class DupeGuru(Broadcaster):
|
||||
"""
|
||||
try:
|
||||
self.directories.add_path(Path(d))
|
||||
self.notify('directories_changed')
|
||||
self.notify("directories_changed")
|
||||
except directories.AlreadyThereError:
|
||||
self.view.show_message(tr("'{}' already is in the list.").format(d))
|
||||
except directories.InvalidPathError:
|
||||
@ -383,7 +403,9 @@ class DupeGuru(Broadcaster):
|
||||
if not dupes:
|
||||
self.view.show_message(MSG_NO_SELECTED_DUPES)
|
||||
return
|
||||
msg = tr("All selected %d matches are going to be ignored in all subsequent scans. Continue?")
|
||||
msg = tr(
|
||||
"All selected %d matches are going to be ignored in all subsequent scans. Continue?"
|
||||
)
|
||||
if not self.view.ask_yes_no(msg % len(dupes)):
|
||||
return
|
||||
for dupe in dupes:
|
||||
@ -400,22 +422,22 @@ class DupeGuru(Broadcaster):
|
||||
:param str filter: filter to apply
|
||||
"""
|
||||
self.results.apply_filter(None)
|
||||
if self.options['escape_filter_regexp']:
|
||||
filter = escape(filter, set('()[]\\.|+?^'))
|
||||
filter = escape(filter, '*', '.')
|
||||
if self.options["escape_filter_regexp"]:
|
||||
filter = escape(filter, set("()[]\\.|+?^"))
|
||||
filter = escape(filter, "*", ".")
|
||||
self.results.apply_filter(filter)
|
||||
self._results_changed()
|
||||
|
||||
def clean_empty_dirs(self, path):
|
||||
if self.options['clean_empty_dirs']:
|
||||
while delete_if_empty(path, ['.DS_Store']):
|
||||
if self.options["clean_empty_dirs"]:
|
||||
while delete_if_empty(path, [".DS_Store"]):
|
||||
path = path.parent()
|
||||
|
||||
def clear_picture_cache(self):
|
||||
try:
|
||||
os.remove(self._get_picture_cache_path())
|
||||
except FileNotFoundError:
|
||||
pass # we don't care
|
||||
pass # we don't care
|
||||
|
||||
def copy_or_move(self, dupe, copy: bool, destination: str, dest_type: DestType):
|
||||
source_path = dupe.path
|
||||
@ -444,6 +466,7 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
:param bool copy: If True, duplicates will be copied instead of moved
|
||||
"""
|
||||
|
||||
def do(j):
|
||||
def op(dupe):
|
||||
j.add_progress()
|
||||
@ -459,7 +482,7 @@ class DupeGuru(Broadcaster):
|
||||
prompt = tr("Select a directory to {} marked files to").format(opname)
|
||||
destination = self.view.select_dest_folder(prompt)
|
||||
if destination:
|
||||
desttype = self.options['copymove_dest_type']
|
||||
desttype = self.options["copymove_dest_type"]
|
||||
jobid = JobType.Copy if copy else JobType.Move
|
||||
self._start_job(jobid, do)
|
||||
|
||||
@ -472,8 +495,9 @@ class DupeGuru(Broadcaster):
|
||||
if not self.deletion_options.show(self.results.mark_count):
|
||||
return
|
||||
args = [
|
||||
self.deletion_options.link_deleted, self.deletion_options.use_hardlinks,
|
||||
self.deletion_options.direct
|
||||
self.deletion_options.link_deleted,
|
||||
self.deletion_options.use_hardlinks,
|
||||
self.deletion_options.direct,
|
||||
]
|
||||
logging.debug("Starting deletion job with args %r", args)
|
||||
self._start_job(JobType.Delete, self._do_delete, args=args)
|
||||
@ -495,7 +519,9 @@ class DupeGuru(Broadcaster):
|
||||
The columns and their order in the resulting CSV file is determined in the same way as in
|
||||
:meth:`export_to_xhtml`.
|
||||
"""
|
||||
dest_file = self.view.select_dest_file(tr("Select a destination for your exported CSV"), 'csv')
|
||||
dest_file = self.view.select_dest_file(
|
||||
tr("Select a destination for your exported CSV"), "csv"
|
||||
)
|
||||
if dest_file:
|
||||
colnames, rows = self._get_export_data()
|
||||
try:
|
||||
@ -505,13 +531,16 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
def get_display_info(self, dupe, group, delta=False):
|
||||
def empty_data():
|
||||
return {c.name: '---' for c in self.result_table.COLUMNS[1:]}
|
||||
return {c.name: "---" for c in self.result_table.COLUMNS[1:]}
|
||||
|
||||
if (dupe is None) or (group is None):
|
||||
return empty_data()
|
||||
try:
|
||||
return dupe.get_display_info(group, delta)
|
||||
except Exception as e:
|
||||
logging.warning("Exception on GetDisplayInfo for %s: %s", str(dupe.path), str(e))
|
||||
logging.warning(
|
||||
"Exception on GetDisplayInfo for %s: %s", str(dupe.path), str(e)
|
||||
)
|
||||
return empty_data()
|
||||
|
||||
def invoke_custom_command(self):
|
||||
@ -521,9 +550,11 @@ class DupeGuru(Broadcaster):
|
||||
is replaced with that dupe's ref file. If there's no selection, the command is not invoked.
|
||||
If the dupe is a ref, ``%d`` and ``%r`` will be the same.
|
||||
"""
|
||||
cmd = self.view.get_default('CustomCommand')
|
||||
cmd = self.view.get_default("CustomCommand")
|
||||
if not cmd:
|
||||
msg = tr("You have no custom command set up. Set it up in your preferences.")
|
||||
msg = tr(
|
||||
"You have no custom command set up. Set it up in your preferences."
|
||||
)
|
||||
self.view.show_message(msg)
|
||||
return
|
||||
if not self.selected_dupes:
|
||||
@ -531,8 +562,8 @@ class DupeGuru(Broadcaster):
|
||||
dupe = self.selected_dupes[0]
|
||||
group = self.results.get_group_of_duplicate(dupe)
|
||||
ref = group.ref
|
||||
cmd = cmd.replace('%d', str(dupe.path))
|
||||
cmd = cmd.replace('%r', str(ref.path))
|
||||
cmd = cmd.replace("%d", str(dupe.path))
|
||||
cmd = cmd.replace("%r", str(ref.path))
|
||||
match = re.match(r'"([^"]+)"(.*)', cmd)
|
||||
if match is not None:
|
||||
# This code here is because subprocess. Popen doesn't seem to accept, under Windows,
|
||||
@ -551,9 +582,9 @@ class DupeGuru(Broadcaster):
|
||||
is persistent data, is the same as when the last session was closed (when :meth:`save` was
|
||||
called).
|
||||
"""
|
||||
self.directories.load_from_file(op.join(self.appdata, 'last_directories.xml'))
|
||||
self.notify('directories_changed')
|
||||
p = op.join(self.appdata, 'ignore_list.xml')
|
||||
self.directories.load_from_file(op.join(self.appdata, "last_directories.xml"))
|
||||
self.notify("directories_changed")
|
||||
p = op.join(self.appdata, "ignore_list.xml")
|
||||
self.ignore_list.load_from_xml(p)
|
||||
self.ignore_list_dialog.refresh()
|
||||
|
||||
@ -562,8 +593,10 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
:param str filename: path of the XML file (created with :meth:`save_as`) to load
|
||||
"""
|
||||
|
||||
def do(j):
|
||||
self.results.load_from_xml(filename, self._get_file, j)
|
||||
|
||||
self._start_job(JobType.Load, do)
|
||||
|
||||
def make_selected_reference(self):
|
||||
@ -588,35 +621,36 @@ class DupeGuru(Broadcaster):
|
||||
if not self.result_table.power_marker:
|
||||
if changed_groups:
|
||||
self.selected_dupes = [
|
||||
d for d in self.selected_dupes
|
||||
d
|
||||
for d in self.selected_dupes
|
||||
if self.results.get_group_of_duplicate(d).ref is d
|
||||
]
|
||||
self.notify('results_changed')
|
||||
self.notify("results_changed")
|
||||
else:
|
||||
# If we're in "Dupes Only" mode (previously called Power Marker), things are a bit
|
||||
# different. The refs are not shown in the table, and if our operation is successful,
|
||||
# this means that there's no way to follow our dupe selection. Then, the best thing to
|
||||
# do is to keep our selection index-wise (different dupe selection, but same index
|
||||
# selection).
|
||||
self.notify('results_changed_but_keep_selection')
|
||||
self.notify("results_changed_but_keep_selection")
|
||||
|
||||
def mark_all(self):
|
||||
"""Set all dupes in the results as marked.
|
||||
"""
|
||||
self.results.mark_all()
|
||||
self.notify('marking_changed')
|
||||
self.notify("marking_changed")
|
||||
|
||||
def mark_none(self):
|
||||
"""Set all dupes in the results as unmarked.
|
||||
"""
|
||||
self.results.mark_none()
|
||||
self.notify('marking_changed')
|
||||
self.notify("marking_changed")
|
||||
|
||||
def mark_invert(self):
|
||||
"""Invert the marked state of all dupes in the results.
|
||||
"""
|
||||
self.results.mark_invert()
|
||||
self.notify('marking_changed')
|
||||
self.notify("marking_changed")
|
||||
|
||||
def mark_dupe(self, dupe, marked):
|
||||
"""Change marked status of ``dupe``.
|
||||
@ -629,7 +663,7 @@ class DupeGuru(Broadcaster):
|
||||
self.results.mark(dupe)
|
||||
else:
|
||||
self.results.unmark(dupe)
|
||||
self.notify('marking_changed')
|
||||
self.notify("marking_changed")
|
||||
|
||||
def open_selected(self):
|
||||
"""Open :attr:`selected_dupes` with their associated application.
|
||||
@ -656,7 +690,7 @@ class DupeGuru(Broadcaster):
|
||||
indexes = sorted(indexes, reverse=True)
|
||||
for index in indexes:
|
||||
del self.directories[index]
|
||||
self.notify('directories_changed')
|
||||
self.notify("directories_changed")
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
@ -669,7 +703,7 @@ class DupeGuru(Broadcaster):
|
||||
:type duplicates: list of :class:`~core.fs.File`
|
||||
"""
|
||||
self.results.remove_duplicates(self.without_ref(duplicates))
|
||||
self.notify('results_changed_but_keep_selection')
|
||||
self.notify("results_changed_but_keep_selection")
|
||||
|
||||
def remove_marked(self):
|
||||
"""Removed marked duplicates from the results (without touching the files themselves).
|
||||
@ -724,7 +758,9 @@ class DupeGuru(Broadcaster):
|
||||
if group.prioritize(key_func=sort_key):
|
||||
count += 1
|
||||
self._results_changed()
|
||||
msg = tr("{} duplicate groups were changed by the re-prioritization.").format(count)
|
||||
msg = tr("{} duplicate groups were changed by the re-prioritization.").format(
|
||||
count
|
||||
)
|
||||
self.view.show_message(msg)
|
||||
|
||||
def reveal_selected(self):
|
||||
@ -734,10 +770,10 @@ class DupeGuru(Broadcaster):
|
||||
def save(self):
|
||||
if not op.exists(self.appdata):
|
||||
os.makedirs(self.appdata)
|
||||
self.directories.save_to_file(op.join(self.appdata, 'last_directories.xml'))
|
||||
p = op.join(self.appdata, 'ignore_list.xml')
|
||||
self.directories.save_to_file(op.join(self.appdata, "last_directories.xml"))
|
||||
p = op.join(self.appdata, "ignore_list.xml")
|
||||
self.ignore_list.save_to_xml(p)
|
||||
self.notify('save_session')
|
||||
self.notify("save_session")
|
||||
|
||||
def save_as(self, filename):
|
||||
"""Save results in ``filename``.
|
||||
@ -756,7 +792,9 @@ class DupeGuru(Broadcaster):
|
||||
"""
|
||||
scanner = self.SCANNER_CLASS()
|
||||
if not self.directories.has_any_file():
|
||||
self.view.show_message(tr("The selected directories contain no scannable file."))
|
||||
self.view.show_message(
|
||||
tr("The selected directories contain no scannable file.")
|
||||
)
|
||||
return
|
||||
# Send relevant options down to the scanner instance
|
||||
for k, v in self.options.items():
|
||||
@ -771,12 +809,16 @@ class DupeGuru(Broadcaster):
|
||||
def do(j):
|
||||
j.set_progress(0, tr("Collecting files to scan"))
|
||||
if scanner.scan_type == ScanType.Folders:
|
||||
files = list(self.directories.get_folders(folderclass=se.fs.Folder, j=j))
|
||||
files = list(
|
||||
self.directories.get_folders(folderclass=se.fs.Folder, j=j)
|
||||
)
|
||||
else:
|
||||
files = list(self.directories.get_files(fileclasses=self.fileclasses, j=j))
|
||||
if self.options['ignore_hardlink_matches']:
|
||||
files = list(
|
||||
self.directories.get_files(fileclasses=self.fileclasses, j=j)
|
||||
)
|
||||
if self.options["ignore_hardlink_matches"]:
|
||||
files = self._remove_hardlink_dupes(files)
|
||||
logging.info('Scanning %d files' % len(files))
|
||||
logging.info("Scanning %d files" % len(files))
|
||||
self.results.groups = scanner.get_dupe_groups(files, self.ignore_list, j)
|
||||
self.discarded_file_count = scanner.discarded_file_count
|
||||
|
||||
@ -792,12 +834,16 @@ class DupeGuru(Broadcaster):
|
||||
markfunc = self.results.mark
|
||||
for dupe in selected:
|
||||
markfunc(dupe)
|
||||
self.notify('marking_changed')
|
||||
self.notify("marking_changed")
|
||||
|
||||
def without_ref(self, dupes):
|
||||
"""Returns ``dupes`` with all reference elements removed.
|
||||
"""
|
||||
return [dupe for dupe in dupes if self.results.get_group_of_duplicate(dupe).ref is not dupe]
|
||||
return [
|
||||
dupe
|
||||
for dupe in dupes
|
||||
if self.results.get_group_of_duplicate(dupe).ref is not dupe
|
||||
]
|
||||
|
||||
def get_default(self, key, fallback_value=None):
|
||||
result = nonone(self.view.get_default(key), fallback_value)
|
||||
@ -812,7 +858,7 @@ class DupeGuru(Broadcaster):
|
||||
def set_default(self, key, value):
|
||||
self.view.set_default(key, value)
|
||||
|
||||
#--- Properties
|
||||
# --- Properties
|
||||
@property
|
||||
def stat_line(self):
|
||||
result = self.results.stat_line
|
||||
@ -836,12 +882,21 @@ class DupeGuru(Broadcaster):
|
||||
@property
|
||||
def METADATA_TO_READ(self):
|
||||
if self.app_mode == AppMode.Picture:
|
||||
return ['size', 'mtime', 'dimensions', 'exif_timestamp']
|
||||
return ["size", "mtime", "dimensions", "exif_timestamp"]
|
||||
elif self.app_mode == AppMode.Music:
|
||||
return [
|
||||
'size', 'mtime', 'duration', 'bitrate', 'samplerate', 'title', 'artist',
|
||||
'album', 'genre', 'year', 'track', 'comment'
|
||||
"size",
|
||||
"mtime",
|
||||
"duration",
|
||||
"bitrate",
|
||||
"samplerate",
|
||||
"title",
|
||||
"artist",
|
||||
"album",
|
||||
"genre",
|
||||
"year",
|
||||
"track",
|
||||
"comment",
|
||||
]
|
||||
else:
|
||||
return ['size', 'mtime']
|
||||
|
||||
return ["size", "mtime"]
|
||||
|
@ -15,12 +15,13 @@ from hscommon.util import FileOrPath
|
||||
from . import fs
|
||||
|
||||
__all__ = [
|
||||
'Directories',
|
||||
'DirectoryState',
|
||||
'AlreadyThereError',
|
||||
'InvalidPathError',
|
||||
"Directories",
|
||||
"DirectoryState",
|
||||
"AlreadyThereError",
|
||||
"InvalidPathError",
|
||||
]
|
||||
|
||||
|
||||
class DirectoryState:
|
||||
"""Enum describing how a folder should be considered.
|
||||
|
||||
@ -28,16 +29,20 @@ class DirectoryState:
|
||||
* DirectoryState.Reference: Scan files, but make sure never to delete any of them
|
||||
* DirectoryState.Excluded: Don't scan this folder
|
||||
"""
|
||||
|
||||
Normal = 0
|
||||
Reference = 1
|
||||
Excluded = 2
|
||||
|
||||
|
||||
class AlreadyThereError(Exception):
|
||||
"""The path being added is already in the directory list"""
|
||||
|
||||
|
||||
class InvalidPathError(Exception):
|
||||
"""The path being added is invalid"""
|
||||
|
||||
|
||||
class Directories:
|
||||
"""Holds user folder selection.
|
||||
|
||||
@ -47,7 +52,8 @@ class Directories:
|
||||
Then, when the user starts the scan, :meth:`get_files` is called to retrieve all files (wrapped
|
||||
in :mod:`core.fs`) that have to be scanned according to the chosen folders/states.
|
||||
"""
|
||||
#---Override
|
||||
|
||||
# ---Override
|
||||
def __init__(self):
|
||||
self._dirs = []
|
||||
# {path: state}
|
||||
@ -68,10 +74,10 @@ class Directories:
|
||||
def __len__(self):
|
||||
return len(self._dirs)
|
||||
|
||||
#---Private
|
||||
# ---Private
|
||||
def _default_state_for_path(self, path):
|
||||
# Override this in subclasses to specify the state of some special folders.
|
||||
if path.name.startswith('.'): # hidden
|
||||
if path.name.startswith("."): # hidden
|
||||
return DirectoryState.Excluded
|
||||
|
||||
def _get_files(self, from_path, fileclasses, j):
|
||||
@ -83,11 +89,13 @@ class Directories:
|
||||
# Recursively get files from folders with lots of subfolder is expensive. However, there
|
||||
# might be a subfolder in this path that is not excluded. What we want to do is to skim
|
||||
# through self.states and see if we must continue, or we can stop right here to save time
|
||||
if not any(p[:len(root)] == root for p in self.states):
|
||||
if not any(p[: len(root)] == root for p in self.states):
|
||||
del dirs[:]
|
||||
try:
|
||||
if state != DirectoryState.Excluded:
|
||||
found_files = [fs.get_file(root + f, fileclasses=fileclasses) for f in files]
|
||||
found_files = [
|
||||
fs.get_file(root + f, fileclasses=fileclasses) for f in files
|
||||
]
|
||||
found_files = [f for f in found_files if f is not None]
|
||||
# In some cases, directories can be considered as files by dupeGuru, which is
|
||||
# why we have this line below. In fact, there only one case: Bundle files under
|
||||
@ -97,7 +105,11 @@ class Directories:
|
||||
if f is not None:
|
||||
found_files.append(f)
|
||||
dirs.remove(d)
|
||||
logging.debug("Collected %d files in folder %s", len(found_files), str(from_path))
|
||||
logging.debug(
|
||||
"Collected %d files in folder %s",
|
||||
len(found_files),
|
||||
str(from_path),
|
||||
)
|
||||
for file in found_files:
|
||||
file.is_ref = state == DirectoryState.Reference
|
||||
yield file
|
||||
@ -118,7 +130,7 @@ class Directories:
|
||||
except (EnvironmentError, fs.InvalidPath):
|
||||
pass
|
||||
|
||||
#---Public
|
||||
# ---Public
|
||||
def add_path(self, path):
|
||||
"""Adds ``path`` to self, if not already there.
|
||||
|
||||
@ -212,21 +224,21 @@ class Directories:
|
||||
root = ET.parse(infile).getroot()
|
||||
except Exception:
|
||||
return
|
||||
for rdn in root.getiterator('root_directory'):
|
||||
for rdn in root.getiterator("root_directory"):
|
||||
attrib = rdn.attrib
|
||||
if 'path' not in attrib:
|
||||
if "path" not in attrib:
|
||||
continue
|
||||
path = attrib['path']
|
||||
path = attrib["path"]
|
||||
try:
|
||||
self.add_path(Path(path))
|
||||
except (AlreadyThereError, InvalidPathError):
|
||||
pass
|
||||
for sn in root.getiterator('state'):
|
||||
for sn in root.getiterator("state"):
|
||||
attrib = sn.attrib
|
||||
if not ('path' in attrib and 'value' in attrib):
|
||||
if not ("path" in attrib and "value" in attrib):
|
||||
continue
|
||||
path = attrib['path']
|
||||
state = attrib['value']
|
||||
path = attrib["path"]
|
||||
state = attrib["value"]
|
||||
self.states[Path(path)] = int(state)
|
||||
|
||||
def save_to_file(self, outfile):
|
||||
@ -234,17 +246,17 @@ class Directories:
|
||||
|
||||
:param file outfile: path or file pointer to XML file to save to.
|
||||
"""
|
||||
with FileOrPath(outfile, 'wb') as fp:
|
||||
root = ET.Element('directories')
|
||||
with FileOrPath(outfile, "wb") as fp:
|
||||
root = ET.Element("directories")
|
||||
for root_path in self:
|
||||
root_path_node = ET.SubElement(root, 'root_directory')
|
||||
root_path_node.set('path', str(root_path))
|
||||
root_path_node = ET.SubElement(root, "root_directory")
|
||||
root_path_node.set("path", str(root_path))
|
||||
for path, state in self.states.items():
|
||||
state_node = ET.SubElement(root, 'state')
|
||||
state_node.set('path', str(path))
|
||||
state_node.set('value', str(state))
|
||||
state_node = ET.SubElement(root, "state")
|
||||
state_node.set("path", str(path))
|
||||
state_node.set("value", str(state))
|
||||
tree = ET.ElementTree(root)
|
||||
tree.write(fp, encoding='utf-8')
|
||||
tree.write(fp, encoding="utf-8")
|
||||
|
||||
def set_state(self, path, state):
|
||||
"""Set the state of folder at ``path``.
|
||||
@ -259,4 +271,3 @@ class Directories:
|
||||
if path.is_parent_of(iter_path):
|
||||
del self.states[iter_path]
|
||||
self.states[path] = state
|
||||
|
||||
|
112
core/engine.py
112
core/engine.py
@ -17,25 +17,26 @@ from hscommon.util import flatten, multi_replace
|
||||
from hscommon.trans import tr
|
||||
from hscommon.jobprogress import job
|
||||
|
||||
(
|
||||
WEIGHT_WORDS,
|
||||
MATCH_SIMILAR_WORDS,
|
||||
NO_FIELD_ORDER,
|
||||
) = range(3)
|
||||
(WEIGHT_WORDS, MATCH_SIMILAR_WORDS, NO_FIELD_ORDER,) = range(3)
|
||||
|
||||
JOB_REFRESH_RATE = 100
|
||||
|
||||
|
||||
def getwords(s):
|
||||
# We decompose the string so that ascii letters with accents can be part of the word.
|
||||
s = normalize('NFD', s)
|
||||
s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", ' ').lower()
|
||||
s = ''.join(c for c in s if c in string.ascii_letters + string.digits + string.whitespace)
|
||||
return [_f for _f in s.split(' ') if _f] # remove empty elements
|
||||
s = normalize("NFD", s)
|
||||
s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", " ").lower()
|
||||
s = "".join(
|
||||
c for c in s if c in string.ascii_letters + string.digits + string.whitespace
|
||||
)
|
||||
return [_f for _f in s.split(" ") if _f] # remove empty elements
|
||||
|
||||
|
||||
def getfields(s):
|
||||
fields = [getwords(field) for field in s.split(' - ')]
|
||||
fields = [getwords(field) for field in s.split(" - ")]
|
||||
return [_f for _f in fields if _f]
|
||||
|
||||
|
||||
def unpack_fields(fields):
|
||||
result = []
|
||||
for field in fields:
|
||||
@ -45,6 +46,7 @@ def unpack_fields(fields):
|
||||
result.append(field)
|
||||
return result
|
||||
|
||||
|
||||
def compare(first, second, flags=()):
|
||||
"""Returns the % of words that match between ``first`` and ``second``
|
||||
|
||||
@ -55,11 +57,11 @@ def compare(first, second, flags=()):
|
||||
return 0
|
||||
if any(isinstance(element, list) for element in first):
|
||||
return compare_fields(first, second, flags)
|
||||
second = second[:] #We must use a copy of second because we remove items from it
|
||||
second = second[:] # We must use a copy of second because we remove items from it
|
||||
match_similar = MATCH_SIMILAR_WORDS in flags
|
||||
weight_words = WEIGHT_WORDS in flags
|
||||
joined = first + second
|
||||
total_count = (sum(len(word) for word in joined) if weight_words else len(joined))
|
||||
total_count = sum(len(word) for word in joined) if weight_words else len(joined)
|
||||
match_count = 0
|
||||
in_order = True
|
||||
for word in first:
|
||||
@ -71,12 +73,13 @@ def compare(first, second, flags=()):
|
||||
if second[0] != word:
|
||||
in_order = False
|
||||
second.remove(word)
|
||||
match_count += (len(word) if weight_words else 1)
|
||||
match_count += len(word) if weight_words else 1
|
||||
result = round(((match_count * 2) / total_count) * 100)
|
||||
if (result == 100) and (not in_order):
|
||||
result = 99 # We cannot consider a match exact unless the ordering is the same
|
||||
result = 99 # We cannot consider a match exact unless the ordering is the same
|
||||
return result
|
||||
|
||||
|
||||
def compare_fields(first, second, flags=()):
|
||||
"""Returns the score for the lowest matching :ref:`fields`.
|
||||
|
||||
@ -87,7 +90,7 @@ def compare_fields(first, second, flags=()):
|
||||
return 0
|
||||
if NO_FIELD_ORDER in flags:
|
||||
results = []
|
||||
#We don't want to remove field directly in the list. We must work on a copy.
|
||||
# We don't want to remove field directly in the list. We must work on a copy.
|
||||
second = second[:]
|
||||
for field1 in first:
|
||||
max = 0
|
||||
@ -101,9 +104,12 @@ def compare_fields(first, second, flags=()):
|
||||
if matched_field:
|
||||
second.remove(matched_field)
|
||||
else:
|
||||
results = [compare(field1, field2, flags) for field1, field2 in zip(first, second)]
|
||||
results = [
|
||||
compare(field1, field2, flags) for field1, field2 in zip(first, second)
|
||||
]
|
||||
return min(results) if results else 0
|
||||
|
||||
|
||||
def build_word_dict(objects, j=job.nulljob):
|
||||
"""Returns a dict of objects mapped by their words.
|
||||
|
||||
@ -113,11 +119,14 @@ def build_word_dict(objects, j=job.nulljob):
|
||||
The result will be a dict with words as keys, lists of objects as values.
|
||||
"""
|
||||
result = defaultdict(set)
|
||||
for object in j.iter_with_progress(objects, 'Prepared %d/%d files', JOB_REFRESH_RATE):
|
||||
for object in j.iter_with_progress(
|
||||
objects, "Prepared %d/%d files", JOB_REFRESH_RATE
|
||||
):
|
||||
for word in unpack_fields(object.words):
|
||||
result[word].add(object)
|
||||
return result
|
||||
|
||||
|
||||
def merge_similar_words(word_dict):
|
||||
"""Take all keys in ``word_dict`` that are similar, and merge them together.
|
||||
|
||||
@ -126,7 +135,7 @@ def merge_similar_words(word_dict):
|
||||
a word equal to the other.
|
||||
"""
|
||||
keys = list(word_dict.keys())
|
||||
keys.sort(key=len)# we want the shortest word to stay
|
||||
keys.sort(key=len) # we want the shortest word to stay
|
||||
while keys:
|
||||
key = keys.pop(0)
|
||||
similars = difflib.get_close_matches(key, keys, 100, 0.8)
|
||||
@ -138,6 +147,7 @@ def merge_similar_words(word_dict):
|
||||
del word_dict[similar]
|
||||
keys.remove(similar)
|
||||
|
||||
|
||||
def reduce_common_words(word_dict, threshold):
|
||||
"""Remove all objects from ``word_dict`` values where the object count >= ``threshold``
|
||||
|
||||
@ -146,7 +156,9 @@ def reduce_common_words(word_dict, threshold):
|
||||
The exception to this removal are the objects where all the words of the object are common.
|
||||
Because if we remove them, we will miss some duplicates!
|
||||
"""
|
||||
uncommon_words = set(word for word, objects in word_dict.items() if len(objects) < threshold)
|
||||
uncommon_words = set(
|
||||
word for word, objects in word_dict.items() if len(objects) < threshold
|
||||
)
|
||||
for word, objects in list(word_dict.items()):
|
||||
if len(objects) < threshold:
|
||||
continue
|
||||
@ -159,11 +171,13 @@ def reduce_common_words(word_dict, threshold):
|
||||
else:
|
||||
del word_dict[word]
|
||||
|
||||
|
||||
# Writing docstrings in a namedtuple is tricky. From Python 3.3, it's possible to set __doc__, but
|
||||
# some research allowed me to find a more elegant solution, which is what is done here. See
|
||||
# http://stackoverflow.com/questions/1606436/adding-docstrings-to-namedtuples-in-python
|
||||
|
||||
class Match(namedtuple('Match', 'first second percentage')):
|
||||
|
||||
class Match(namedtuple("Match", "first second percentage")):
|
||||
"""Represents a match between two :class:`~core.fs.File`.
|
||||
|
||||
Regarless of the matching method, when two files are determined to match, a Match pair is created,
|
||||
@ -182,16 +196,24 @@ class Match(namedtuple('Match', 'first second percentage')):
|
||||
their match level according to the scan method which found the match. int from 1 to 100. For
|
||||
exact scan methods, such as Contents scans, this will always be 100.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
def get_match(first, second, flags=()):
|
||||
#it is assumed here that first and second both have a "words" attribute
|
||||
# it is assumed here that first and second both have a "words" attribute
|
||||
percentage = compare(first.words, second.words, flags)
|
||||
return Match(first, second, percentage)
|
||||
|
||||
|
||||
def getmatches(
|
||||
objects, min_match_percentage=0, match_similar_words=False, weight_words=False,
|
||||
no_field_order=False, j=job.nulljob):
|
||||
objects,
|
||||
min_match_percentage=0,
|
||||
match_similar_words=False,
|
||||
weight_words=False,
|
||||
no_field_order=False,
|
||||
j=job.nulljob,
|
||||
):
|
||||
"""Returns a list of :class:`Match` within ``objects`` after fuzzily matching their words.
|
||||
|
||||
:param objects: List of :class:`~core.fs.File` to match.
|
||||
@ -206,7 +228,7 @@ def getmatches(
|
||||
j = j.start_subjob(2)
|
||||
sj = j.start_subjob(2)
|
||||
for o in objects:
|
||||
if not hasattr(o, 'words'):
|
||||
if not hasattr(o, "words"):
|
||||
o.words = getwords(o.name)
|
||||
word_dict = build_word_dict(objects, sj)
|
||||
reduce_common_words(word_dict, COMMON_WORD_THRESHOLD)
|
||||
@ -241,11 +263,15 @@ def getmatches(
|
||||
except MemoryError:
|
||||
# This is the place where the memory usage is at its peak during the scan.
|
||||
# Just continue the process with an incomplete list of matches.
|
||||
del compared # This should give us enough room to call logging.
|
||||
logging.warning('Memory Overflow. Matches: %d. Word dict: %d' % (len(result), len(word_dict)))
|
||||
del compared # This should give us enough room to call logging.
|
||||
logging.warning(
|
||||
"Memory Overflow. Matches: %d. Word dict: %d"
|
||||
% (len(result), len(word_dict))
|
||||
)
|
||||
return result
|
||||
return result
|
||||
|
||||
|
||||
def getmatches_by_contents(files, j=job.nulljob):
|
||||
"""Returns a list of :class:`Match` within ``files`` if their contents is the same.
|
||||
|
||||
@ -263,13 +289,14 @@ def getmatches_by_contents(files, j=job.nulljob):
|
||||
for group in possible_matches:
|
||||
for first, second in itertools.combinations(group, 2):
|
||||
if first.is_ref and second.is_ref:
|
||||
continue # Don't spend time comparing two ref pics together.
|
||||
continue # Don't spend time comparing two ref pics together.
|
||||
if first.md5partial == second.md5partial:
|
||||
if first.md5 == second.md5:
|
||||
result.append(Match(first, second, 100))
|
||||
j.add_progress(desc=tr("%d matches found") % len(result))
|
||||
return result
|
||||
|
||||
|
||||
class Group:
|
||||
"""A group of :class:`~core.fs.File` that match together.
|
||||
|
||||
@ -297,7 +324,8 @@ class Group:
|
||||
|
||||
Average match percentage of match pairs containing :attr:`ref`.
|
||||
"""
|
||||
#---Override
|
||||
|
||||
# ---Override
|
||||
def __init__(self):
|
||||
self._clear()
|
||||
|
||||
@ -313,7 +341,7 @@ class Group:
|
||||
def __len__(self):
|
||||
return len(self.ordered)
|
||||
|
||||
#---Private
|
||||
# ---Private
|
||||
def _clear(self):
|
||||
self._percentage = None
|
||||
self._matches_for_ref = None
|
||||
@ -328,7 +356,7 @@ class Group:
|
||||
self._matches_for_ref = [match for match in self.matches if ref in match]
|
||||
return self._matches_for_ref
|
||||
|
||||
#---Public
|
||||
# ---Public
|
||||
def add_match(self, match):
|
||||
"""Adds ``match`` to internal match list and possibly add duplicates to the group.
|
||||
|
||||
@ -339,6 +367,7 @@ class Group:
|
||||
|
||||
:param tuple match: pair of :class:`~core.fs.File` to add
|
||||
"""
|
||||
|
||||
def add_candidate(item, match):
|
||||
matches = self.candidates[item]
|
||||
matches.add(match)
|
||||
@ -362,7 +391,11 @@ class Group:
|
||||
|
||||
You can call this after the duplicate scanning process to free a bit of memory.
|
||||
"""
|
||||
discarded = set(m for m in self.matches if not all(obj in self.unordered for obj in [m.first, m.second]))
|
||||
discarded = set(
|
||||
m
|
||||
for m in self.matches
|
||||
if not all(obj in self.unordered for obj in [m.first, m.second])
|
||||
)
|
||||
self.matches -= discarded
|
||||
self.candidates = defaultdict(set)
|
||||
return discarded
|
||||
@ -409,7 +442,9 @@ class Group:
|
||||
self.unordered.remove(item)
|
||||
self._percentage = None
|
||||
self._matches_for_ref = None
|
||||
if (len(self) > 1) and any(not getattr(item, 'is_ref', False) for item in self):
|
||||
if (len(self) > 1) and any(
|
||||
not getattr(item, "is_ref", False) for item in self
|
||||
):
|
||||
if discard_matches:
|
||||
self.matches = set(m for m in self.matches if item not in m)
|
||||
else:
|
||||
@ -438,7 +473,9 @@ class Group:
|
||||
if self._percentage is None:
|
||||
if self.dupes:
|
||||
matches = self._get_matches_for_ref()
|
||||
self._percentage = sum(match.percentage for match in matches) // len(matches)
|
||||
self._percentage = sum(match.percentage for match in matches) // len(
|
||||
matches
|
||||
)
|
||||
else:
|
||||
self._percentage = 0
|
||||
return self._percentage
|
||||
@ -485,7 +522,7 @@ def get_groups(matches):
|
||||
del dupe2group
|
||||
del matches
|
||||
# should free enough memory to continue
|
||||
logging.warning('Memory Overflow. Groups: {0}'.format(len(groups)))
|
||||
logging.warning("Memory Overflow. Groups: {0}".format(len(groups)))
|
||||
# Now that we have a group, we have to discard groups' matches and see if there're any "orphan"
|
||||
# matches, that is, matches that were candidate in a group but that none of their 2 files were
|
||||
# accepted in the group. With these orphan groups, it's safe to build additional groups
|
||||
@ -493,9 +530,12 @@ def get_groups(matches):
|
||||
orphan_matches = []
|
||||
for group in groups:
|
||||
orphan_matches += {
|
||||
m for m in group.discard_matches()
|
||||
m
|
||||
for m in group.discard_matches()
|
||||
if not any(obj in matched_files for obj in [m.first, m.second])
|
||||
}
|
||||
if groups and orphan_matches:
|
||||
groups += get_groups(orphan_matches) # no job, as it isn't supposed to take a long time
|
||||
groups += get_groups(
|
||||
orphan_matches
|
||||
) # no job, as it isn't supposed to take a long time
|
||||
return groups
|
||||
|
@ -114,36 +114,42 @@ ROW_TEMPLATE = """
|
||||
|
||||
CELL_TEMPLATE = """<td>{value}</td>"""
|
||||
|
||||
|
||||
def export_to_xhtml(colnames, rows):
|
||||
# a row is a list of values with the first value being a flag indicating if the row should be indented
|
||||
if rows:
|
||||
assert len(rows[0]) == len(colnames) + 1 # + 1 is for the "indented" flag
|
||||
colheaders = ''.join(COLHEADERS_TEMPLATE.format(name=name) for name in colnames)
|
||||
assert len(rows[0]) == len(colnames) + 1 # + 1 is for the "indented" flag
|
||||
colheaders = "".join(COLHEADERS_TEMPLATE.format(name=name) for name in colnames)
|
||||
rendered_rows = []
|
||||
previous_group_id = None
|
||||
for row in rows:
|
||||
# [2:] is to remove the indented flag + filename
|
||||
if row[0] != previous_group_id:
|
||||
# We've just changed dupe group, which means that this dupe is a ref. We don't indent it.
|
||||
indented = ''
|
||||
indented = ""
|
||||
else:
|
||||
indented = 'indented'
|
||||
indented = "indented"
|
||||
filename = row[1]
|
||||
cells = ''.join(CELL_TEMPLATE.format(value=value) for value in row[2:])
|
||||
rendered_rows.append(ROW_TEMPLATE.format(indented=indented, filename=filename, cells=cells))
|
||||
cells = "".join(CELL_TEMPLATE.format(value=value) for value in row[2:])
|
||||
rendered_rows.append(
|
||||
ROW_TEMPLATE.format(indented=indented, filename=filename, cells=cells)
|
||||
)
|
||||
previous_group_id = row[0]
|
||||
rendered_rows = ''.join(rendered_rows)
|
||||
rendered_rows = "".join(rendered_rows)
|
||||
# The main template can't use format because the css code uses {}
|
||||
content = MAIN_TEMPLATE.replace('$colheaders', colheaders).replace('$rows', rendered_rows)
|
||||
content = MAIN_TEMPLATE.replace("$colheaders", colheaders).replace(
|
||||
"$rows", rendered_rows
|
||||
)
|
||||
folder = mkdtemp()
|
||||
destpath = op.join(folder, 'export.htm')
|
||||
fp = open(destpath, 'wt', encoding='utf-8')
|
||||
destpath = op.join(folder, "export.htm")
|
||||
fp = open(destpath, "wt", encoding="utf-8")
|
||||
fp.write(content)
|
||||
fp.close()
|
||||
return destpath
|
||||
|
||||
|
||||
def export_to_csv(dest, colnames, rows):
|
||||
writer = csv.writer(open(dest, 'wt', encoding='utf-8'))
|
||||
writer = csv.writer(open(dest, "wt", encoding="utf-8"))
|
||||
writer.writerow(["Group ID"] + colnames)
|
||||
for row in rows:
|
||||
writer.writerow(row)
|
||||
|
80
core/fs.py
80
core/fs.py
@ -17,19 +17,20 @@ import logging
|
||||
from hscommon.util import nonone, get_file_ext
|
||||
|
||||
__all__ = [
|
||||
'File',
|
||||
'Folder',
|
||||
'get_file',
|
||||
'get_files',
|
||||
'FSError',
|
||||
'AlreadyExistsError',
|
||||
'InvalidPath',
|
||||
'InvalidDestinationError',
|
||||
'OperationError',
|
||||
"File",
|
||||
"Folder",
|
||||
"get_file",
|
||||
"get_files",
|
||||
"FSError",
|
||||
"AlreadyExistsError",
|
||||
"InvalidPath",
|
||||
"InvalidDestinationError",
|
||||
"OperationError",
|
||||
]
|
||||
|
||||
NOT_SET = object()
|
||||
|
||||
|
||||
class FSError(Exception):
|
||||
cls_message = "An error has occured on '{name}' in '{parent}'"
|
||||
|
||||
@ -40,8 +41,8 @@ class FSError(Exception):
|
||||
elif isinstance(fsobject, File):
|
||||
name = fsobject.name
|
||||
else:
|
||||
name = ''
|
||||
parentname = str(parent) if parent is not None else ''
|
||||
name = ""
|
||||
parentname = str(parent) if parent is not None else ""
|
||||
Exception.__init__(self, message.format(name=name, parent=parentname))
|
||||
|
||||
|
||||
@ -49,32 +50,39 @@ class AlreadyExistsError(FSError):
|
||||
"The directory or file name we're trying to add already exists"
|
||||
cls_message = "'{name}' already exists in '{parent}'"
|
||||
|
||||
|
||||
class InvalidPath(FSError):
|
||||
"The path of self is invalid, and cannot be worked with."
|
||||
cls_message = "'{name}' is invalid."
|
||||
|
||||
|
||||
class InvalidDestinationError(FSError):
|
||||
"""A copy/move operation has been called, but the destination is invalid."""
|
||||
|
||||
cls_message = "'{name}' is an invalid destination for this operation."
|
||||
|
||||
|
||||
class OperationError(FSError):
|
||||
"""A copy/move/delete operation has been called, but the checkup after the
|
||||
operation shows that it didn't work."""
|
||||
|
||||
cls_message = "Operation on '{name}' failed."
|
||||
|
||||
|
||||
class File:
|
||||
"""Represents a file and holds metadata to be used for scanning.
|
||||
"""
|
||||
|
||||
INITIAL_INFO = {
|
||||
'size': 0,
|
||||
'mtime': 0,
|
||||
'md5': '',
|
||||
'md5partial': '',
|
||||
"size": 0,
|
||||
"mtime": 0,
|
||||
"md5": "",
|
||||
"md5partial": "",
|
||||
}
|
||||
# Slots for File make us save quite a bit of memory. In a memory test I've made with a lot of
|
||||
# files, I saved 35% memory usage with "unread" files (no _read_info() call) and gains become
|
||||
# even greater when we take into account read attributes (70%!). Yeah, it's worth it.
|
||||
__slots__ = ('path', 'is_ref', 'words') + tuple(INITIAL_INFO.keys())
|
||||
__slots__ = ("path", "is_ref", "words") + tuple(INITIAL_INFO.keys())
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
@ -90,25 +98,27 @@ class File:
|
||||
try:
|
||||
self._read_info(attrname)
|
||||
except Exception as e:
|
||||
logging.warning("An error '%s' was raised while decoding '%s'", e, repr(self.path))
|
||||
logging.warning(
|
||||
"An error '%s' was raised while decoding '%s'", e, repr(self.path)
|
||||
)
|
||||
result = object.__getattribute__(self, attrname)
|
||||
if result is NOT_SET:
|
||||
result = self.INITIAL_INFO[attrname]
|
||||
return result
|
||||
|
||||
#This offset is where we should start reading the file to get a partial md5
|
||||
#For audio file, it should be where audio data starts
|
||||
# This offset is where we should start reading the file to get a partial md5
|
||||
# For audio file, it should be where audio data starts
|
||||
def _get_md5partial_offset_and_size(self):
|
||||
return (0x4000, 0x4000) #16Kb
|
||||
return (0x4000, 0x4000) # 16Kb
|
||||
|
||||
def _read_info(self, field):
|
||||
if field in ('size', 'mtime'):
|
||||
if field in ("size", "mtime"):
|
||||
stats = self.path.stat()
|
||||
self.size = nonone(stats.st_size, 0)
|
||||
self.mtime = nonone(stats.st_mtime, 0)
|
||||
elif field == 'md5partial':
|
||||
elif field == "md5partial":
|
||||
try:
|
||||
fp = self.path.open('rb')
|
||||
fp = self.path.open("rb")
|
||||
offset, size = self._get_md5partial_offset_and_size()
|
||||
fp.seek(offset)
|
||||
partialdata = fp.read(size)
|
||||
@ -117,14 +127,14 @@ class File:
|
||||
fp.close()
|
||||
except Exception:
|
||||
pass
|
||||
elif field == 'md5':
|
||||
elif field == "md5":
|
||||
try:
|
||||
fp = self.path.open('rb')
|
||||
fp = self.path.open("rb")
|
||||
md5 = hashlib.md5()
|
||||
# The goal here is to not run out of memory on really big files. However, the chunk
|
||||
# size has to be large enough so that the python loop isn't too costly in terms of
|
||||
# CPU.
|
||||
CHUNK_SIZE = 1024 * 1024 # 1 mb
|
||||
CHUNK_SIZE = 1024 * 1024 # 1 mb
|
||||
filedata = fp.read(CHUNK_SIZE)
|
||||
while filedata:
|
||||
md5.update(filedata)
|
||||
@ -144,7 +154,7 @@ class File:
|
||||
for attrname in attrnames:
|
||||
getattr(self, attrname)
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
@classmethod
|
||||
def can_handle(cls, path):
|
||||
"""Returns whether this file wrapper class can handle ``path``.
|
||||
@ -170,7 +180,7 @@ class File:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
#--- Properties
|
||||
# --- Properties
|
||||
@property
|
||||
def extension(self):
|
||||
return get_file_ext(self.name)
|
||||
@ -189,7 +199,8 @@ class Folder(File):
|
||||
|
||||
It has the size/md5 info of a File, but it's value are the sum of its subitems.
|
||||
"""
|
||||
__slots__ = File.__slots__ + ('_subfolders', )
|
||||
|
||||
__slots__ = File.__slots__ + ("_subfolders",)
|
||||
|
||||
def __init__(self, path):
|
||||
File.__init__(self, path)
|
||||
@ -201,12 +212,12 @@ class Folder(File):
|
||||
return folders + files
|
||||
|
||||
def _read_info(self, field):
|
||||
if field in {'size', 'mtime'}:
|
||||
if field in {"size", "mtime"}:
|
||||
size = sum((f.size for f in self._all_items()), 0)
|
||||
self.size = size
|
||||
stats = self.path.stat()
|
||||
self.mtime = nonone(stats.st_mtime, 0)
|
||||
elif field in {'md5', 'md5partial'}:
|
||||
elif field in {"md5", "md5partial"}:
|
||||
# What's sensitive here is that we must make sure that subfiles'
|
||||
# md5 are always added up in the same order, but we also want a
|
||||
# different md5 if a file gets moved in a different subdirectory.
|
||||
@ -214,7 +225,7 @@ class Folder(File):
|
||||
items = self._all_items()
|
||||
items.sort(key=lambda f: f.path)
|
||||
md5s = [getattr(f, field) for f in items]
|
||||
return b''.join(md5s)
|
||||
return b"".join(md5s)
|
||||
|
||||
md5 = hashlib.md5(get_dir_md5_concat())
|
||||
digest = md5.digest()
|
||||
@ -223,7 +234,9 @@ class Folder(File):
|
||||
@property
|
||||
def subfolders(self):
|
||||
if self._subfolders is None:
|
||||
subfolders = [p for p in self.path.listdir() if not p.islink() and p.isdir()]
|
||||
subfolders = [
|
||||
p for p in self.path.listdir() if not p.islink() and p.isdir()
|
||||
]
|
||||
self._subfolders = [self.__class__(p) for p in subfolders]
|
||||
return self._subfolders
|
||||
|
||||
@ -244,6 +257,7 @@ def get_file(path, fileclasses=[File]):
|
||||
if fileclass.can_handle(path):
|
||||
return fileclass(path)
|
||||
|
||||
|
||||
def get_files(path, fileclasses=[File]):
|
||||
"""Returns a list of :class:`File` for each file contained in ``path``.
|
||||
|
||||
|
@ -13,4 +13,3 @@ blue, which is supposed to be orange, does the sorting logic, holds selection, e
|
||||
|
||||
.. _cross-toolkit: http://www.hardcoded.net/articles/cross-toolkit-software
|
||||
"""
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
from hscommon.notify import Listener
|
||||
|
||||
|
||||
class DupeGuruGUIObject(Listener):
|
||||
def __init__(self, app):
|
||||
Listener.__init__(self, app)
|
||||
@ -27,4 +28,3 @@ class DupeGuruGUIObject(Listener):
|
||||
|
||||
def results_changed_but_keep_selection(self):
|
||||
pass
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Created On: 2012-05-30
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import os
|
||||
@ -10,42 +10,46 @@ import os
|
||||
from hscommon.gui.base import GUIObject
|
||||
from hscommon.trans import tr
|
||||
|
||||
|
||||
class DeletionOptionsView:
|
||||
"""Expected interface for :class:`DeletionOptions`'s view.
|
||||
|
||||
|
||||
*Not actually used in the code. For documentation purposes only.*
|
||||
|
||||
|
||||
Our view presents the user with an appropriate way (probably a mix of checkboxes and radio
|
||||
buttons) to set the different flags in :class:`DeletionOptions`. Note that
|
||||
:attr:`DeletionOptions.use_hardlinks` is only relevant if :attr:`DeletionOptions.link_deleted`
|
||||
is true. This is why we toggle the "enabled" state of that flag.
|
||||
|
||||
|
||||
We expect the view to set :attr:`DeletionOptions.link_deleted` immediately as the user changes
|
||||
its value because it will toggle :meth:`set_hardlink_option_enabled`
|
||||
|
||||
|
||||
Other than the flags, there's also a prompt message which has a dynamic content, defined by
|
||||
:meth:`update_msg`.
|
||||
"""
|
||||
|
||||
def update_msg(self, msg: str):
|
||||
"""Update the dialog's prompt with ``str``.
|
||||
"""
|
||||
|
||||
|
||||
def show(self):
|
||||
"""Show the dialog in a modal fashion.
|
||||
|
||||
|
||||
Returns whether the dialog was "accepted" (the user pressed OK).
|
||||
"""
|
||||
|
||||
|
||||
def set_hardlink_option_enabled(self, is_enabled: bool):
|
||||
"""Enable or disable the widget controlling :attr:`DeletionOptions.use_hardlinks`.
|
||||
"""
|
||||
|
||||
|
||||
class DeletionOptions(GUIObject):
|
||||
"""Present the user with deletion options before proceeding.
|
||||
|
||||
|
||||
When the user activates "Send to trash", we present him with a couple of options that changes
|
||||
the behavior of that deletion operation.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
GUIObject.__init__(self)
|
||||
#: Whether symlinks or hardlinks are used when doing :attr:`link_deleted`.
|
||||
@ -54,10 +58,10 @@ class DeletionOptions(GUIObject):
|
||||
#: Delete dupes directly and don't send to trash.
|
||||
#: *bool*. *get/set*
|
||||
self.direct = False
|
||||
|
||||
|
||||
def show(self, mark_count):
|
||||
"""Prompt the user with a modal dialog offering our deletion options.
|
||||
|
||||
|
||||
:param int mark_count: Number of dupes marked for deletion.
|
||||
:rtype: bool
|
||||
:returns: Whether the user accepted the dialog (we cancel deletion if false).
|
||||
@ -69,7 +73,7 @@ class DeletionOptions(GUIObject):
|
||||
msg = tr("You are sending {} file(s) to the Trash.").format(mark_count)
|
||||
self.view.update_msg(msg)
|
||||
return self.view.show()
|
||||
|
||||
|
||||
def supports_links(self):
|
||||
"""Returns whether our platform supports symlinks.
|
||||
"""
|
||||
@ -87,21 +91,19 @@ class DeletionOptions(GUIObject):
|
||||
except TypeError:
|
||||
# wrong number of arguments
|
||||
return True
|
||||
|
||||
|
||||
@property
|
||||
def link_deleted(self):
|
||||
"""Replace deleted dupes with symlinks (or hardlinks) to the dupe group reference.
|
||||
|
||||
|
||||
*bool*. *get/set*
|
||||
|
||||
|
||||
Whether the link is a symlink or hardlink is decided by :attr:`use_hardlinks`.
|
||||
"""
|
||||
return self._link_deleted
|
||||
|
||||
|
||||
@link_deleted.setter
|
||||
def link_deleted(self, value):
|
||||
self._link_deleted = value
|
||||
hardlinks_enabled = value and self.supports_links()
|
||||
self.view.set_hardlink_option_enabled(hardlinks_enabled)
|
||||
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
from hscommon.gui.base import GUIObject
|
||||
from .base import DupeGuruGUIObject
|
||||
|
||||
|
||||
class DetailsPanel(GUIObject, DupeGuruGUIObject):
|
||||
def __init__(self, app):
|
||||
GUIObject.__init__(self, multibind=True)
|
||||
@ -19,7 +20,7 @@ class DetailsPanel(GUIObject, DupeGuruGUIObject):
|
||||
self._refresh()
|
||||
self.view.refresh()
|
||||
|
||||
#--- Private
|
||||
# --- Private
|
||||
def _refresh(self):
|
||||
if self.app.selected_dupes:
|
||||
dupe = self.app.selected_dupes[0]
|
||||
@ -31,18 +32,19 @@ class DetailsPanel(GUIObject, DupeGuruGUIObject):
|
||||
# we don't want the two sides of the table to display the stats for the same file
|
||||
ref = group.ref if group is not None and group.ref is not dupe else None
|
||||
data2 = self.app.get_display_info(ref, group, False)
|
||||
columns = self.app.result_table.COLUMNS[1:] # first column is the 'marked' column
|
||||
columns = self.app.result_table.COLUMNS[
|
||||
1:
|
||||
] # first column is the 'marked' column
|
||||
self._table = [(c.display, data1[c.name], data2[c.name]) for c in columns]
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
def row_count(self):
|
||||
return len(self._table)
|
||||
|
||||
def row(self, row_index):
|
||||
return self._table[row_index]
|
||||
|
||||
#--- Event Handlers
|
||||
# --- Event Handlers
|
||||
def dupes_selected(self):
|
||||
self._refresh()
|
||||
self.view.refresh()
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-02-06
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.tree import Tree, Node
|
||||
@ -13,6 +13,7 @@ from .base import DupeGuruGUIObject
|
||||
|
||||
STATE_ORDER = [DirectoryState.Normal, DirectoryState.Reference, DirectoryState.Excluded]
|
||||
|
||||
|
||||
# Lazily loads children
|
||||
class DirectoryNode(Node):
|
||||
def __init__(self, tree, path, name):
|
||||
@ -21,29 +22,31 @@ class DirectoryNode(Node):
|
||||
self._directory_path = path
|
||||
self._loaded = False
|
||||
self._state = STATE_ORDER.index(self._tree.app.directories.get_state(path))
|
||||
|
||||
|
||||
def __len__(self):
|
||||
if not self._loaded:
|
||||
self._load()
|
||||
return Node.__len__(self)
|
||||
|
||||
|
||||
def _load(self):
|
||||
self.clear()
|
||||
subpaths = self._tree.app.directories.get_subfolders(self._directory_path)
|
||||
for path in subpaths:
|
||||
self.append(DirectoryNode(self._tree, path, path.name))
|
||||
self._loaded = True
|
||||
|
||||
|
||||
def update_all_states(self):
|
||||
self._state = STATE_ORDER.index(self._tree.app.directories.get_state(self._directory_path))
|
||||
self._state = STATE_ORDER.index(
|
||||
self._tree.app.directories.get_state(self._directory_path)
|
||||
)
|
||||
for node in self:
|
||||
node.update_all_states()
|
||||
|
||||
|
||||
# The state propery is an index to the combobox
|
||||
@property
|
||||
def state(self):
|
||||
return self._state
|
||||
|
||||
|
||||
@state.setter
|
||||
def state(self, value):
|
||||
if value == self._state:
|
||||
@ -52,29 +55,29 @@ class DirectoryNode(Node):
|
||||
state = STATE_ORDER[value]
|
||||
self._tree.app.directories.set_state(self._directory_path, state)
|
||||
self._tree.update_all_states()
|
||||
|
||||
|
||||
|
||||
class DirectoryTree(Tree, DupeGuruGUIObject):
|
||||
#--- model -> view calls:
|
||||
# --- model -> view calls:
|
||||
# refresh()
|
||||
# refresh_states() # when only states label need to be refreshed
|
||||
#
|
||||
def __init__(self, app):
|
||||
Tree.__init__(self)
|
||||
DupeGuruGUIObject.__init__(self, app)
|
||||
|
||||
|
||||
def _view_updated(self):
|
||||
self._refresh()
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
def _refresh(self):
|
||||
self.clear()
|
||||
for path in self.app.directories:
|
||||
self.append(DirectoryNode(self, path, str(path)))
|
||||
|
||||
|
||||
def add_directory(self, path):
|
||||
self.app.add_directory(path)
|
||||
|
||||
|
||||
def remove_selected(self):
|
||||
selected_paths = self.selected_paths
|
||||
if not selected_paths:
|
||||
@ -90,18 +93,17 @@ class DirectoryTree(Tree, DupeGuruGUIObject):
|
||||
newstate = DirectoryState.Normal
|
||||
for node in nodes:
|
||||
node.state = newstate
|
||||
|
||||
|
||||
def select_all(self):
|
||||
self.selected_nodes = list(self)
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
def update_all_states(self):
|
||||
for node in self:
|
||||
node.update_all_states()
|
||||
self.view.refresh_states()
|
||||
|
||||
#--- Event Handlers
|
||||
|
||||
# --- Event Handlers
|
||||
def directories_changed(self):
|
||||
self._refresh()
|
||||
self.view.refresh()
|
||||
|
||||
|
@ -8,8 +8,9 @@
|
||||
from hscommon.trans import tr
|
||||
from .ignore_list_table import IgnoreListTable
|
||||
|
||||
|
||||
class IgnoreListDialog:
|
||||
#--- View interface
|
||||
# --- View interface
|
||||
# show()
|
||||
#
|
||||
|
||||
@ -21,7 +22,9 @@ class IgnoreListDialog:
|
||||
def clear(self):
|
||||
if not self.ignore_list:
|
||||
return
|
||||
msg = tr("Do you really want to remove all %d items from the ignore list?") % len(self.ignore_list)
|
||||
msg = tr(
|
||||
"Do you really want to remove all %d items from the ignore list?"
|
||||
) % len(self.ignore_list)
|
||||
if self.app.view.ask_yes_no(msg):
|
||||
self.ignore_list.Clear()
|
||||
self.refresh()
|
||||
@ -36,4 +39,3 @@ class IgnoreListDialog:
|
||||
|
||||
def show(self):
|
||||
self.view.show()
|
||||
|
||||
|
@ -1,35 +1,36 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2012-03-13
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.table import GUITable, Row
|
||||
from hscommon.gui.column import Column, Columns
|
||||
from hscommon.trans import trget
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class IgnoreListTable(GUITable):
|
||||
COLUMNS = [
|
||||
# the str concat below saves us needless localization.
|
||||
Column('path1', coltr("File Path") + " 1"),
|
||||
Column('path2', coltr("File Path") + " 2"),
|
||||
Column("path1", coltr("File Path") + " 1"),
|
||||
Column("path2", coltr("File Path") + " 2"),
|
||||
]
|
||||
|
||||
|
||||
def __init__(self, ignore_list_dialog):
|
||||
GUITable.__init__(self)
|
||||
self.columns = Columns(self)
|
||||
self.view = None
|
||||
self.dialog = ignore_list_dialog
|
||||
|
||||
#--- Override
|
||||
|
||||
# --- Override
|
||||
def _fill(self):
|
||||
for path1, path2 in self.dialog.ignore_list:
|
||||
self.append(IgnoreListRow(self, path1, path2))
|
||||
|
||||
|
||||
|
||||
class IgnoreListRow(Row):
|
||||
def __init__(self, table, path1, path2):
|
||||
@ -38,4 +39,3 @@ class IgnoreListRow(Row):
|
||||
self.path2_original = path2
|
||||
self.path1 = str(path1)
|
||||
self.path2 = str(path2)
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
from hscommon.gui.base import GUIObject
|
||||
from hscommon.gui.selectable_list import GUISelectableList
|
||||
|
||||
|
||||
class CriterionCategoryList(GUISelectableList):
|
||||
def __init__(self, dialog):
|
||||
self.dialog = dialog
|
||||
@ -18,6 +19,7 @@ class CriterionCategoryList(GUISelectableList):
|
||||
self.dialog.select_category(self.dialog.categories[self.selected_index])
|
||||
GUISelectableList._update_selection(self)
|
||||
|
||||
|
||||
class PrioritizationList(GUISelectableList):
|
||||
def __init__(self, dialog):
|
||||
self.dialog = dialog
|
||||
@ -41,6 +43,7 @@ class PrioritizationList(GUISelectableList):
|
||||
del prilist[i]
|
||||
self._refresh_contents()
|
||||
|
||||
|
||||
class PrioritizeDialog(GUIObject):
|
||||
def __init__(self, app):
|
||||
GUIObject.__init__(self)
|
||||
@ -52,15 +55,15 @@ class PrioritizeDialog(GUIObject):
|
||||
self.prioritizations = []
|
||||
self.prioritization_list = PrioritizationList(self)
|
||||
|
||||
#--- Override
|
||||
# --- Override
|
||||
def _view_updated(self):
|
||||
self.category_list.select(0)
|
||||
|
||||
#--- Private
|
||||
# --- Private
|
||||
def _sort_key(self, dupe):
|
||||
return tuple(crit.sort_key(dupe) for crit in self.prioritizations)
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
def select_category(self, category):
|
||||
self.criteria = category.criteria_list()
|
||||
self.criteria_list[:] = [c.display_value for c in self.criteria]
|
||||
|
@ -1,29 +1,29 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-04-12
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon import desktop
|
||||
|
||||
from .problem_table import ProblemTable
|
||||
|
||||
|
||||
class ProblemDialog:
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self._selected_dupe = None
|
||||
self.problem_table = ProblemTable(self)
|
||||
|
||||
|
||||
def refresh(self):
|
||||
self._selected_dupe = None
|
||||
self.problem_table.refresh()
|
||||
|
||||
|
||||
def reveal_selected_dupe(self):
|
||||
if self._selected_dupe is not None:
|
||||
desktop.reveal_path(self._selected_dupe.path)
|
||||
|
||||
|
||||
def select_dupe(self, dupe):
|
||||
self._selected_dupe = dupe
|
||||
|
||||
|
@ -1,39 +1,40 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-04-12
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.table import GUITable, Row
|
||||
from hscommon.gui.column import Column, Columns
|
||||
from hscommon.trans import trget
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class ProblemTable(GUITable):
|
||||
COLUMNS = [
|
||||
Column('path', coltr("File Path")),
|
||||
Column('msg', coltr("Error Message")),
|
||||
Column("path", coltr("File Path")),
|
||||
Column("msg", coltr("Error Message")),
|
||||
]
|
||||
|
||||
|
||||
def __init__(self, problem_dialog):
|
||||
GUITable.__init__(self)
|
||||
self.columns = Columns(self)
|
||||
self.dialog = problem_dialog
|
||||
|
||||
#--- Override
|
||||
|
||||
# --- Override
|
||||
def _update_selection(self):
|
||||
row = self.selected_row
|
||||
dupe = row.dupe if row is not None else None
|
||||
self.dialog.select_dupe(dupe)
|
||||
|
||||
|
||||
def _fill(self):
|
||||
problems = self.dialog.app.results.problems
|
||||
for dupe, msg in problems:
|
||||
self.append(ProblemRow(self, dupe, msg))
|
||||
|
||||
|
||||
|
||||
class ProblemRow(Row):
|
||||
def __init__(self, table, dupe, msg):
|
||||
@ -41,4 +42,3 @@ class ProblemRow(Row):
|
||||
self.dupe = dupe
|
||||
self.msg = msg
|
||||
self.path = str(dupe.path)
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-02-11
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from operator import attrgetter
|
||||
@ -13,6 +13,7 @@ from hscommon.gui.column import Columns
|
||||
|
||||
from .base import DupeGuruGUIObject
|
||||
|
||||
|
||||
class DupeRow(Row):
|
||||
def __init__(self, table, group, dupe):
|
||||
Row.__init__(self, table)
|
||||
@ -22,14 +23,14 @@ class DupeRow(Row):
|
||||
self._data = None
|
||||
self._data_delta = None
|
||||
self._delta_columns = None
|
||||
|
||||
|
||||
def is_cell_delta(self, column_name):
|
||||
"""Returns whether a cell is in delta mode (orange color).
|
||||
|
||||
|
||||
If the result table is in delta mode, returns True if the column is one of the "delta
|
||||
columns", that is, one of the columns that display a a differential value rather than an
|
||||
absolute value.
|
||||
|
||||
|
||||
If not, returns True if the dupe's value is different from its ref value.
|
||||
"""
|
||||
if not self.table.delta_values:
|
||||
@ -42,62 +43,64 @@ class DupeRow(Row):
|
||||
dupe_info = self.data
|
||||
ref_info = self._group.ref.get_display_info(group=self._group, delta=False)
|
||||
for key, value in dupe_info.items():
|
||||
if (key not in self._delta_columns) and (ref_info[key].lower() != value.lower()):
|
||||
if (key not in self._delta_columns) and (
|
||||
ref_info[key].lower() != value.lower()
|
||||
):
|
||||
self._delta_columns.add(key)
|
||||
return column_name in self._delta_columns
|
||||
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
if self._data is None:
|
||||
self._data = self._app.get_display_info(self._dupe, self._group, False)
|
||||
return self._data
|
||||
|
||||
|
||||
@property
|
||||
def data_delta(self):
|
||||
if self._data_delta is None:
|
||||
self._data_delta = self._app.get_display_info(self._dupe, self._group, True)
|
||||
return self._data_delta
|
||||
|
||||
|
||||
@property
|
||||
def isref(self):
|
||||
return self._dupe is self._group.ref
|
||||
|
||||
|
||||
@property
|
||||
def markable(self):
|
||||
return self._app.results.is_markable(self._dupe)
|
||||
|
||||
|
||||
@property
|
||||
def marked(self):
|
||||
return self._app.results.is_marked(self._dupe)
|
||||
|
||||
|
||||
@marked.setter
|
||||
def marked(self, value):
|
||||
self._app.mark_dupe(self._dupe, value)
|
||||
|
||||
|
||||
|
||||
class ResultTable(GUITable, DupeGuruGUIObject):
|
||||
def __init__(self, app):
|
||||
GUITable.__init__(self)
|
||||
DupeGuruGUIObject.__init__(self, app)
|
||||
self.columns = Columns(self, prefaccess=app, savename='ResultTable')
|
||||
self.columns = Columns(self, prefaccess=app, savename="ResultTable")
|
||||
self._power_marker = False
|
||||
self._delta_values = False
|
||||
self._sort_descriptors = ('name', True)
|
||||
|
||||
#--- Override
|
||||
self._sort_descriptors = ("name", True)
|
||||
|
||||
# --- Override
|
||||
def _view_updated(self):
|
||||
self._refresh_with_view()
|
||||
|
||||
|
||||
def _restore_selection(self, previous_selection):
|
||||
if self.app.selected_dupes:
|
||||
to_find = set(self.app.selected_dupes)
|
||||
indexes = [i for i, r in enumerate(self) if r._dupe in to_find]
|
||||
self.selected_indexes = indexes
|
||||
|
||||
|
||||
def _update_selection(self):
|
||||
rows = self.selected_rows
|
||||
self.app._select_dupes(list(map(attrgetter('_dupe'), rows)))
|
||||
|
||||
self.app._select_dupes(list(map(attrgetter("_dupe"), rows)))
|
||||
|
||||
def _fill(self):
|
||||
if not self.power_marker:
|
||||
for group in self.app.results.groups:
|
||||
@ -108,22 +111,22 @@ class ResultTable(GUITable, DupeGuruGUIObject):
|
||||
for dupe in self.app.results.dupes:
|
||||
group = self.app.results.get_group_of_duplicate(dupe)
|
||||
self.append(DupeRow(self, group, dupe))
|
||||
|
||||
|
||||
def _refresh_with_view(self):
|
||||
self.refresh()
|
||||
self.view.show_selected_row()
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
def get_row_value(self, index, column):
|
||||
try:
|
||||
row = self[index]
|
||||
except IndexError:
|
||||
return '---'
|
||||
return "---"
|
||||
if self.delta_values:
|
||||
return row.data_delta[column]
|
||||
else:
|
||||
return row.data[column]
|
||||
|
||||
|
||||
def rename_selected(self, newname):
|
||||
row = self.selected_row
|
||||
if row is None:
|
||||
@ -133,7 +136,7 @@ class ResultTable(GUITable, DupeGuruGUIObject):
|
||||
row._data = None
|
||||
row._data_delta = None
|
||||
return self.app.rename_selected(newname)
|
||||
|
||||
|
||||
def sort(self, key, asc):
|
||||
if self.power_marker:
|
||||
self.app.results.sort_dupes(key, asc, self.delta_values)
|
||||
@ -141,12 +144,12 @@ class ResultTable(GUITable, DupeGuruGUIObject):
|
||||
self.app.results.sort_groups(key, asc)
|
||||
self._sort_descriptors = (key, asc)
|
||||
self._refresh_with_view()
|
||||
|
||||
#--- Properties
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def power_marker(self):
|
||||
return self._power_marker
|
||||
|
||||
|
||||
@power_marker.setter
|
||||
def power_marker(self, value):
|
||||
if value == self._power_marker:
|
||||
@ -155,29 +158,29 @@ class ResultTable(GUITable, DupeGuruGUIObject):
|
||||
key, asc = self._sort_descriptors
|
||||
self.sort(key, asc)
|
||||
# no need to refresh, it has happened in sort()
|
||||
|
||||
|
||||
@property
|
||||
def delta_values(self):
|
||||
return self._delta_values
|
||||
|
||||
|
||||
@delta_values.setter
|
||||
def delta_values(self, value):
|
||||
if value == self._delta_values:
|
||||
return
|
||||
self._delta_values = value
|
||||
self.refresh()
|
||||
|
||||
|
||||
@property
|
||||
def selected_dupe_count(self):
|
||||
return sum(1 for row in self.selected_rows if not row.isref)
|
||||
|
||||
#--- Event Handlers
|
||||
|
||||
# --- Event Handlers
|
||||
def marking_changed(self):
|
||||
self.view.invalidate_markings()
|
||||
|
||||
|
||||
def results_changed(self):
|
||||
self._refresh_with_view()
|
||||
|
||||
|
||||
def results_changed_but_keep_selection(self):
|
||||
# What we want to to here is that instead of restoring selected *dupes* after refresh, we
|
||||
# restore selected *paths*.
|
||||
@ -185,7 +188,6 @@ class ResultTable(GUITable, DupeGuruGUIObject):
|
||||
self.refresh(refresh_view=False)
|
||||
self.select(indexes)
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
def save_session(self):
|
||||
self.columns.save_columns()
|
||||
|
||||
|
@ -1,21 +1,23 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-02-11
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import DupeGuruGUIObject
|
||||
|
||||
|
||||
class StatsLabel(DupeGuruGUIObject):
|
||||
def _view_updated(self):
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
@property
|
||||
def display(self):
|
||||
return self.app.stat_line
|
||||
|
||||
|
||||
def results_changed(self):
|
||||
self.view.refresh()
|
||||
|
||||
marking_changed = results_changed
|
||||
|
@ -10,13 +10,15 @@ from xml.etree import ElementTree as ET
|
||||
|
||||
from hscommon.util import FileOrPath
|
||||
|
||||
|
||||
class IgnoreList:
|
||||
"""An ignore list implementation that is iterable, filterable and exportable to XML.
|
||||
|
||||
Call Ignore to add an ignore list entry, and AreIgnore to check if 2 items are in the list.
|
||||
When iterated, 2 sized tuples will be returned, the tuples containing 2 items ignored together.
|
||||
"""
|
||||
#---Override
|
||||
|
||||
# ---Override
|
||||
def __init__(self):
|
||||
self._ignored = {}
|
||||
self._count = 0
|
||||
@ -29,7 +31,7 @@ class IgnoreList:
|
||||
def __len__(self):
|
||||
return self._count
|
||||
|
||||
#---Public
|
||||
# ---Public
|
||||
def AreIgnored(self, first, second):
|
||||
def do_check(first, second):
|
||||
try:
|
||||
@ -99,14 +101,14 @@ class IgnoreList:
|
||||
root = ET.parse(infile).getroot()
|
||||
except Exception:
|
||||
return
|
||||
file_elems = (e for e in root if e.tag == 'file')
|
||||
file_elems = (e for e in root if e.tag == "file")
|
||||
for fn in file_elems:
|
||||
file_path = fn.get('path')
|
||||
file_path = fn.get("path")
|
||||
if not file_path:
|
||||
continue
|
||||
subfile_elems = (e for e in fn if e.tag == 'file')
|
||||
subfile_elems = (e for e in fn if e.tag == "file")
|
||||
for sfn in subfile_elems:
|
||||
subfile_path = sfn.get('path')
|
||||
subfile_path = sfn.get("path")
|
||||
if subfile_path:
|
||||
self.Ignore(file_path, subfile_path)
|
||||
|
||||
@ -115,15 +117,13 @@ class IgnoreList:
|
||||
|
||||
outfile can be a file object or a filename.
|
||||
"""
|
||||
root = ET.Element('ignore_list')
|
||||
root = ET.Element("ignore_list")
|
||||
for filename, subfiles in self._ignored.items():
|
||||
file_node = ET.SubElement(root, 'file')
|
||||
file_node.set('path', filename)
|
||||
file_node = ET.SubElement(root, "file")
|
||||
file_node.set("path", filename)
|
||||
for subfilename in subfiles:
|
||||
subfile_node = ET.SubElement(file_node, 'file')
|
||||
subfile_node.set('path', subfilename)
|
||||
subfile_node = ET.SubElement(file_node, "file")
|
||||
subfile_node.set("path", subfilename)
|
||||
tree = ET.ElementTree(root)
|
||||
with FileOrPath(outfile, 'wb') as fp:
|
||||
tree.write(fp, encoding='utf-8')
|
||||
|
||||
|
||||
with FileOrPath(outfile, "wb") as fp:
|
||||
tree.write(fp, encoding="utf-8")
|
||||
|
@ -2,40 +2,41 @@
|
||||
# Created On: 2006/02/23
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
|
||||
class Markable:
|
||||
def __init__(self):
|
||||
self.__marked = set()
|
||||
self.__inverted = False
|
||||
|
||||
#---Virtual
|
||||
#About did_mark and did_unmark: They only happen what an object is actually added/removed
|
||||
|
||||
# ---Virtual
|
||||
# About did_mark and did_unmark: They only happen what an object is actually added/removed
|
||||
# in self.__marked, and is not affected by __inverted. Thus, self.mark while __inverted
|
||||
#is True will launch _DidUnmark.
|
||||
# is True will launch _DidUnmark.
|
||||
def _did_mark(self, o):
|
||||
pass
|
||||
|
||||
|
||||
def _did_unmark(self, o):
|
||||
pass
|
||||
|
||||
|
||||
def _get_markable_count(self):
|
||||
return 0
|
||||
|
||||
|
||||
def _is_markable(self, o):
|
||||
return True
|
||||
|
||||
#---Protected
|
||||
|
||||
# ---Protected
|
||||
def _remove_mark_flag(self, o):
|
||||
try:
|
||||
self.__marked.remove(o)
|
||||
self._did_unmark(o)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
#---Public
|
||||
pass
|
||||
|
||||
# ---Public
|
||||
def is_marked(self, o):
|
||||
if not self._is_markable(o):
|
||||
return False
|
||||
@ -43,31 +44,31 @@ class Markable:
|
||||
if self.__inverted:
|
||||
is_marked = not is_marked
|
||||
return is_marked
|
||||
|
||||
|
||||
def mark(self, o):
|
||||
if self.is_marked(o):
|
||||
return False
|
||||
if not self._is_markable(o):
|
||||
return False
|
||||
return self.mark_toggle(o)
|
||||
|
||||
|
||||
def mark_multiple(self, objects):
|
||||
for o in objects:
|
||||
self.mark(o)
|
||||
|
||||
|
||||
def mark_all(self):
|
||||
self.mark_none()
|
||||
self.__inverted = True
|
||||
|
||||
|
||||
def mark_invert(self):
|
||||
self.__inverted = not self.__inverted
|
||||
|
||||
|
||||
def mark_none(self):
|
||||
for o in self.__marked:
|
||||
self._did_unmark(o)
|
||||
self.__marked = set()
|
||||
self.__inverted = False
|
||||
|
||||
|
||||
def mark_toggle(self, o):
|
||||
try:
|
||||
self.__marked.remove(o)
|
||||
@ -78,32 +79,33 @@ class Markable:
|
||||
self.__marked.add(o)
|
||||
self._did_mark(o)
|
||||
return True
|
||||
|
||||
|
||||
def mark_toggle_multiple(self, objects):
|
||||
for o in objects:
|
||||
self.mark_toggle(o)
|
||||
|
||||
|
||||
def unmark(self, o):
|
||||
if not self.is_marked(o):
|
||||
return False
|
||||
return self.mark_toggle(o)
|
||||
|
||||
|
||||
def unmark_multiple(self, objects):
|
||||
for o in objects:
|
||||
self.unmark(o)
|
||||
|
||||
#--- Properties
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def mark_count(self):
|
||||
if self.__inverted:
|
||||
return self._get_markable_count() - len(self.__marked)
|
||||
else:
|
||||
return len(self.__marked)
|
||||
|
||||
|
||||
@property
|
||||
def mark_inverted(self):
|
||||
return self.__inverted
|
||||
|
||||
|
||||
class MarkableList(list, Markable):
|
||||
def __init__(self):
|
||||
list.__init__(self)
|
||||
|
@ -1 +1 @@
|
||||
from . import fs, prioritize, result_table, scanner # noqa
|
||||
from . import fs, prioritize, result_table, scanner # noqa
|
||||
|
@ -13,25 +13,37 @@ from core.util import format_timestamp, format_perc, format_words, format_dupe_c
|
||||
from core import fs
|
||||
|
||||
TAG_FIELDS = {
|
||||
'audiosize', 'duration', 'bitrate', 'samplerate', 'title', 'artist',
|
||||
'album', 'genre', 'year', 'track', 'comment'
|
||||
"audiosize",
|
||||
"duration",
|
||||
"bitrate",
|
||||
"samplerate",
|
||||
"title",
|
||||
"artist",
|
||||
"album",
|
||||
"genre",
|
||||
"year",
|
||||
"track",
|
||||
"comment",
|
||||
}
|
||||
|
||||
|
||||
class MusicFile(fs.File):
|
||||
INITIAL_INFO = fs.File.INITIAL_INFO.copy()
|
||||
INITIAL_INFO.update({
|
||||
'audiosize': 0,
|
||||
'bitrate': 0,
|
||||
'duration': 0,
|
||||
'samplerate': 0,
|
||||
'artist': '',
|
||||
'album': '',
|
||||
'title': '',
|
||||
'genre': '',
|
||||
'comment': '',
|
||||
'year': '',
|
||||
'track': 0,
|
||||
})
|
||||
INITIAL_INFO.update(
|
||||
{
|
||||
"audiosize": 0,
|
||||
"bitrate": 0,
|
||||
"duration": 0,
|
||||
"samplerate": 0,
|
||||
"artist": "",
|
||||
"album": "",
|
||||
"title": "",
|
||||
"genre": "",
|
||||
"comment": "",
|
||||
"year": "",
|
||||
"track": 0,
|
||||
}
|
||||
)
|
||||
__slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys())
|
||||
|
||||
@classmethod
|
||||
@ -60,26 +72,26 @@ class MusicFile(fs.File):
|
||||
else:
|
||||
percentage = group.percentage
|
||||
dupe_count = len(group.dupes)
|
||||
dupe_folder_path = getattr(self, 'display_folder_path', self.folder_path)
|
||||
dupe_folder_path = getattr(self, "display_folder_path", self.folder_path)
|
||||
return {
|
||||
'name': self.name,
|
||||
'folder_path': str(dupe_folder_path),
|
||||
'size': format_size(size, 2, 2, False),
|
||||
'duration': format_time(duration, with_hours=False),
|
||||
'bitrate': str(bitrate),
|
||||
'samplerate': str(samplerate),
|
||||
'extension': self.extension,
|
||||
'mtime': format_timestamp(mtime, delta and m),
|
||||
'title': self.title,
|
||||
'artist': self.artist,
|
||||
'album': self.album,
|
||||
'genre': self.genre,
|
||||
'year': self.year,
|
||||
'track': str(self.track),
|
||||
'comment': self.comment,
|
||||
'percentage': format_perc(percentage),
|
||||
'words': format_words(self.words) if hasattr(self, 'words') else '',
|
||||
'dupe_count': format_dupe_count(dupe_count),
|
||||
"name": self.name,
|
||||
"folder_path": str(dupe_folder_path),
|
||||
"size": format_size(size, 2, 2, False),
|
||||
"duration": format_time(duration, with_hours=False),
|
||||
"bitrate": str(bitrate),
|
||||
"samplerate": str(samplerate),
|
||||
"extension": self.extension,
|
||||
"mtime": format_timestamp(mtime, delta and m),
|
||||
"title": self.title,
|
||||
"artist": self.artist,
|
||||
"album": self.album,
|
||||
"genre": self.genre,
|
||||
"year": self.year,
|
||||
"track": str(self.track),
|
||||
"comment": self.comment,
|
||||
"percentage": format_perc(percentage),
|
||||
"words": format_words(self.words) if hasattr(self, "words") else "",
|
||||
"dupe_count": format_dupe_count(dupe_count),
|
||||
}
|
||||
|
||||
def _get_md5partial_offset_and_size(self):
|
||||
@ -101,4 +113,3 @@ class MusicFile(fs.File):
|
||||
self.comment = f.comment
|
||||
self.year = f.year
|
||||
self.track = f.track
|
||||
|
||||
|
@ -8,11 +8,16 @@
|
||||
from hscommon.trans import trget
|
||||
|
||||
from core.prioritize import (
|
||||
KindCategory, FolderCategory, FilenameCategory, NumericalCategory,
|
||||
SizeCategory, MtimeCategory
|
||||
KindCategory,
|
||||
FolderCategory,
|
||||
FilenameCategory,
|
||||
NumericalCategory,
|
||||
SizeCategory,
|
||||
MtimeCategory,
|
||||
)
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class DurationCategory(NumericalCategory):
|
||||
NAME = coltr("Duration")
|
||||
@ -20,21 +25,29 @@ class DurationCategory(NumericalCategory):
|
||||
def extract_value(self, dupe):
|
||||
return dupe.duration
|
||||
|
||||
|
||||
class BitrateCategory(NumericalCategory):
|
||||
NAME = coltr("Bitrate")
|
||||
|
||||
def extract_value(self, dupe):
|
||||
return dupe.bitrate
|
||||
|
||||
|
||||
class SamplerateCategory(NumericalCategory):
|
||||
NAME = coltr("Samplerate")
|
||||
|
||||
def extract_value(self, dupe):
|
||||
return dupe.samplerate
|
||||
|
||||
|
||||
def all_categories():
|
||||
return [
|
||||
KindCategory, FolderCategory, FilenameCategory, SizeCategory, DurationCategory,
|
||||
BitrateCategory, SamplerateCategory, MtimeCategory
|
||||
KindCategory,
|
||||
FolderCategory,
|
||||
FilenameCategory,
|
||||
SizeCategory,
|
||||
DurationCategory,
|
||||
BitrateCategory,
|
||||
SamplerateCategory,
|
||||
MtimeCategory,
|
||||
]
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Created On: 2011-11-27
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.column import Column
|
||||
@ -10,28 +10,29 @@ from hscommon.trans import trget
|
||||
|
||||
from core.gui.result_table import ResultTable as ResultTableBase
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class ResultTable(ResultTableBase):
|
||||
COLUMNS = [
|
||||
Column('marked', ''),
|
||||
Column('name', coltr("Filename")),
|
||||
Column('folder_path', coltr("Folder"), visible=False, optional=True),
|
||||
Column('size', coltr("Size (MB)"), optional=True),
|
||||
Column('duration', coltr("Time"), optional=True),
|
||||
Column('bitrate', coltr("Bitrate"), optional=True),
|
||||
Column('samplerate', coltr("Sample Rate"), visible=False, optional=True),
|
||||
Column('extension', coltr("Kind"), optional=True),
|
||||
Column('mtime', coltr("Modification"), visible=False, optional=True),
|
||||
Column('title', coltr("Title"), visible=False, optional=True),
|
||||
Column('artist', coltr("Artist"), visible=False, optional=True),
|
||||
Column('album', coltr("Album"), visible=False, optional=True),
|
||||
Column('genre', coltr("Genre"), visible=False, optional=True),
|
||||
Column('year', coltr("Year"), visible=False, optional=True),
|
||||
Column('track', coltr("Track Number"), visible=False, optional=True),
|
||||
Column('comment', coltr("Comment"), visible=False, optional=True),
|
||||
Column('percentage', coltr("Match %"), optional=True),
|
||||
Column('words', coltr("Words Used"), visible=False, optional=True),
|
||||
Column('dupe_count', coltr("Dupe Count"), visible=False, optional=True),
|
||||
Column("marked", ""),
|
||||
Column("name", coltr("Filename")),
|
||||
Column("folder_path", coltr("Folder"), visible=False, optional=True),
|
||||
Column("size", coltr("Size (MB)"), optional=True),
|
||||
Column("duration", coltr("Time"), optional=True),
|
||||
Column("bitrate", coltr("Bitrate"), optional=True),
|
||||
Column("samplerate", coltr("Sample Rate"), visible=False, optional=True),
|
||||
Column("extension", coltr("Kind"), optional=True),
|
||||
Column("mtime", coltr("Modification"), visible=False, optional=True),
|
||||
Column("title", coltr("Title"), visible=False, optional=True),
|
||||
Column("artist", coltr("Artist"), visible=False, optional=True),
|
||||
Column("album", coltr("Album"), visible=False, optional=True),
|
||||
Column("genre", coltr("Genre"), visible=False, optional=True),
|
||||
Column("year", coltr("Year"), visible=False, optional=True),
|
||||
Column("track", coltr("Track Number"), visible=False, optional=True),
|
||||
Column("comment", coltr("Comment"), visible=False, optional=True),
|
||||
Column("percentage", coltr("Match %"), optional=True),
|
||||
Column("words", coltr("Words Used"), visible=False, optional=True),
|
||||
Column("dupe_count", coltr("Dupe Count"), visible=False, optional=True),
|
||||
]
|
||||
DELTA_COLUMNS = {'size', 'duration', 'bitrate', 'samplerate', 'mtime'}
|
||||
DELTA_COLUMNS = {"size", "duration", "bitrate", "samplerate", "mtime"}
|
||||
|
@ -8,6 +8,7 @@ from hscommon.trans import tr
|
||||
|
||||
from core.scanner import Scanner as ScannerBase, ScanOption, ScanType
|
||||
|
||||
|
||||
class ScannerME(ScannerBase):
|
||||
@staticmethod
|
||||
def _key_func(dupe):
|
||||
@ -22,5 +23,3 @@ class ScannerME(ScannerBase):
|
||||
ScanOption(ScanType.Tag, tr("Tags")),
|
||||
ScanOption(ScanType.Contents, tr("Contents")),
|
||||
]
|
||||
|
||||
|
||||
|
@ -1 +1,12 @@
|
||||
from . import block, cache, exif, iphoto_plist, matchblock, matchexif, photo, prioritize, result_table, scanner # noqa
|
||||
from . import ( # noqa
|
||||
block,
|
||||
cache,
|
||||
exif,
|
||||
iphoto_plist,
|
||||
matchblock,
|
||||
matchexif,
|
||||
photo,
|
||||
prioritize,
|
||||
result_table,
|
||||
scanner,
|
||||
)
|
||||
|
@ -6,7 +6,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ._block import NoBlocksError, DifferentBlockCountError, avgdiff, getblocks2 # NOQA
|
||||
from ._block import NoBlocksError, DifferentBlockCountError, avgdiff, getblocks2 # NOQA
|
||||
|
||||
# Converted to C
|
||||
# def getblock(image):
|
||||
|
@ -4,7 +4,8 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ._cache import string_to_colors # noqa
|
||||
from ._cache import string_to_colors # noqa
|
||||
|
||||
|
||||
def colors_to_string(colors):
|
||||
"""Transform the 3 sized tuples 'colors' into a hex string.
|
||||
@ -12,7 +13,8 @@ def colors_to_string(colors):
|
||||
[(0,100,255)] --> 0064ff
|
||||
[(1,2,3),(4,5,6)] --> 010203040506
|
||||
"""
|
||||
return ''.join('%02x%02x%02x' % (r, g, b) for r, g, b in colors)
|
||||
return "".join("%02x%02x%02x" % (r, g, b) for r, g, b in colors)
|
||||
|
||||
|
||||
# This function is an important bottleneck of dupeGuru PE. It has been converted to C.
|
||||
# def string_to_colors(s):
|
||||
@ -23,4 +25,3 @@ def colors_to_string(colors):
|
||||
# number = int(s[i:i+6], 16)
|
||||
# result.append((number >> 16, (number >> 8) & 0xff, number & 0xff))
|
||||
# return result
|
||||
|
||||
|
@ -12,29 +12,36 @@ from collections import namedtuple
|
||||
|
||||
from .cache import string_to_colors, colors_to_string
|
||||
|
||||
|
||||
def wrap_path(path):
|
||||
return 'path:{}'.format(path)
|
||||
return "path:{}".format(path)
|
||||
|
||||
|
||||
def unwrap_path(key):
|
||||
return key[5:]
|
||||
|
||||
|
||||
def wrap_id(path):
|
||||
return 'id:{}'.format(path)
|
||||
return "id:{}".format(path)
|
||||
|
||||
|
||||
def unwrap_id(key):
|
||||
return int(key[3:])
|
||||
|
||||
CacheRow = namedtuple('CacheRow', 'id path blocks mtime')
|
||||
|
||||
CacheRow = namedtuple("CacheRow", "id path blocks mtime")
|
||||
|
||||
|
||||
class ShelveCache:
|
||||
"""A class to cache picture blocks in a shelve backend.
|
||||
"""
|
||||
|
||||
def __init__(self, db=None, readonly=False):
|
||||
self.istmp = db is None
|
||||
if self.istmp:
|
||||
self.dtmp = tempfile.mkdtemp()
|
||||
self.ftmp = db = op.join(self.dtmp, 'tmpdb')
|
||||
flag = 'r' if readonly else 'c'
|
||||
self.ftmp = db = op.join(self.dtmp, "tmpdb")
|
||||
flag = "r" if readonly else "c"
|
||||
self.shelve = shelve.open(db, flag)
|
||||
self.maxid = self._compute_maxid()
|
||||
|
||||
@ -54,10 +61,10 @@ class ShelveCache:
|
||||
return string_to_colors(self.shelve[skey].blocks)
|
||||
|
||||
def __iter__(self):
|
||||
return (unwrap_path(k) for k in self.shelve if k.startswith('path:'))
|
||||
return (unwrap_path(k) for k in self.shelve if k.startswith("path:"))
|
||||
|
||||
def __len__(self):
|
||||
return sum(1 for k in self.shelve if k.startswith('path:'))
|
||||
return sum(1 for k in self.shelve if k.startswith("path:"))
|
||||
|
||||
def __setitem__(self, path_str, blocks):
|
||||
blocks = colors_to_string(blocks)
|
||||
@ -74,7 +81,9 @@ class ShelveCache:
|
||||
self.shelve[wrap_id(rowid)] = wrap_path(path_str)
|
||||
|
||||
def _compute_maxid(self):
|
||||
return max((unwrap_id(k) for k in self.shelve if k.startswith('id:')), default=1)
|
||||
return max(
|
||||
(unwrap_id(k) for k in self.shelve if k.startswith("id:")), default=1
|
||||
)
|
||||
|
||||
def _get_new_id(self):
|
||||
self.maxid += 1
|
||||
@ -133,4 +142,3 @@ class ShelveCache:
|
||||
# #402 and #439. I don't think it hurts to silently ignore the error, so that's
|
||||
# what we do
|
||||
pass
|
||||
|
||||
|
@ -11,10 +11,12 @@ import sqlite3 as sqlite
|
||||
|
||||
from .cache import string_to_colors, colors_to_string
|
||||
|
||||
|
||||
class SqliteCache:
|
||||
"""A class to cache picture blocks in a sqlite backend.
|
||||
"""
|
||||
def __init__(self, db=':memory:', readonly=False):
|
||||
|
||||
def __init__(self, db=":memory:", readonly=False):
|
||||
# readonly is not used in the sqlite version of the cache
|
||||
self.dbname = db
|
||||
self.con = None
|
||||
@ -67,34 +69,40 @@ class SqliteCache:
|
||||
try:
|
||||
self.con.execute(sql, [blocks, mtime, path_str])
|
||||
except sqlite.OperationalError:
|
||||
logging.warning('Picture cache could not set value for key %r', path_str)
|
||||
logging.warning("Picture cache could not set value for key %r", path_str)
|
||||
except sqlite.DatabaseError as e:
|
||||
logging.warning('DatabaseError while setting value for key %r: %s', path_str, str(e))
|
||||
logging.warning(
|
||||
"DatabaseError while setting value for key %r: %s", path_str, str(e)
|
||||
)
|
||||
|
||||
def _create_con(self, second_try=False):
|
||||
def create_tables():
|
||||
logging.debug("Creating picture cache tables.")
|
||||
self.con.execute("drop table if exists pictures")
|
||||
self.con.execute("drop index if exists idx_path")
|
||||
self.con.execute("create table pictures(path TEXT, mtime INTEGER, blocks TEXT)")
|
||||
self.con.execute(
|
||||
"create table pictures(path TEXT, mtime INTEGER, blocks TEXT)"
|
||||
)
|
||||
self.con.execute("create index idx_path on pictures (path)")
|
||||
|
||||
self.con = sqlite.connect(self.dbname, isolation_level=None)
|
||||
try:
|
||||
self.con.execute("select path, mtime, blocks from pictures where 1=2")
|
||||
except sqlite.OperationalError: # new db
|
||||
except sqlite.OperationalError: # new db
|
||||
create_tables()
|
||||
except sqlite.DatabaseError as e: # corrupted db
|
||||
except sqlite.DatabaseError as e: # corrupted db
|
||||
if second_try:
|
||||
raise # Something really strange is happening
|
||||
logging.warning('Could not create picture cache because of an error: %s', str(e))
|
||||
raise # Something really strange is happening
|
||||
logging.warning(
|
||||
"Could not create picture cache because of an error: %s", str(e)
|
||||
)
|
||||
self.con.close()
|
||||
os.remove(self.dbname)
|
||||
self._create_con(second_try=True)
|
||||
|
||||
def clear(self):
|
||||
self.close()
|
||||
if self.dbname != ':memory:':
|
||||
if self.dbname != ":memory:":
|
||||
os.remove(self.dbname)
|
||||
self._create_con()
|
||||
|
||||
@ -117,7 +125,9 @@ class SqliteCache:
|
||||
raise ValueError(path)
|
||||
|
||||
def get_multiple(self, rowids):
|
||||
sql = "select rowid, blocks from pictures where rowid in (%s)" % ','.join(map(str, rowids))
|
||||
sql = "select rowid, blocks from pictures where rowid in (%s)" % ",".join(
|
||||
map(str, rowids)
|
||||
)
|
||||
cur = self.con.execute(sql)
|
||||
return ((rowid, string_to_colors(blocks)) for rowid, blocks in cur)
|
||||
|
||||
@ -138,6 +148,7 @@ class SqliteCache:
|
||||
continue
|
||||
todelete.append(rowid)
|
||||
if todelete:
|
||||
sql = "delete from pictures where rowid in (%s)" % ','.join(map(str, todelete))
|
||||
sql = "delete from pictures where rowid in (%s)" % ",".join(
|
||||
map(str, todelete)
|
||||
)
|
||||
self.con.execute(sql)
|
||||
|
||||
|
@ -83,17 +83,17 @@ EXIF_TAGS = {
|
||||
0xA003: "PixelYDimension",
|
||||
0xA004: "RelatedSoundFile",
|
||||
0xA005: "InteroperabilityIFDPointer",
|
||||
0xA20B: "FlashEnergy", # 0x920B in TIFF/EP
|
||||
0xA20C: "SpatialFrequencyResponse", # 0x920C - -
|
||||
0xA20E: "FocalPlaneXResolution", # 0x920E - -
|
||||
0xA20F: "FocalPlaneYResolution", # 0x920F - -
|
||||
0xA210: "FocalPlaneResolutionUnit", # 0x9210 - -
|
||||
0xA214: "SubjectLocation", # 0x9214 - -
|
||||
0xA215: "ExposureIndex", # 0x9215 - -
|
||||
0xA217: "SensingMethod", # 0x9217 - -
|
||||
0xA20B: "FlashEnergy", # 0x920B in TIFF/EP
|
||||
0xA20C: "SpatialFrequencyResponse", # 0x920C - -
|
||||
0xA20E: "FocalPlaneXResolution", # 0x920E - -
|
||||
0xA20F: "FocalPlaneYResolution", # 0x920F - -
|
||||
0xA210: "FocalPlaneResolutionUnit", # 0x9210 - -
|
||||
0xA214: "SubjectLocation", # 0x9214 - -
|
||||
0xA215: "ExposureIndex", # 0x9215 - -
|
||||
0xA217: "SensingMethod", # 0x9217 - -
|
||||
0xA300: "FileSource",
|
||||
0xA301: "SceneType",
|
||||
0xA302: "CFAPattern", # 0x828E in TIFF/EP
|
||||
0xA302: "CFAPattern", # 0x828E in TIFF/EP
|
||||
0xA401: "CustomRendered",
|
||||
0xA402: "ExposureMode",
|
||||
0xA403: "WhiteBalance",
|
||||
@ -148,17 +148,18 @@ GPS_TA0GS = {
|
||||
0x1B: "GPSProcessingMethod",
|
||||
0x1C: "GPSAreaInformation",
|
||||
0x1D: "GPSDateStamp",
|
||||
0x1E: "GPSDifferential"
|
||||
0x1E: "GPSDifferential",
|
||||
}
|
||||
|
||||
INTEL_ENDIAN = ord('I')
|
||||
MOTOROLA_ENDIAN = ord('M')
|
||||
INTEL_ENDIAN = ord("I")
|
||||
MOTOROLA_ENDIAN = ord("M")
|
||||
|
||||
# About MAX_COUNT: It's possible to have corrupted exif tags where the entry count is way too high
|
||||
# and thus makes us loop, not endlessly, but for heck of a long time for nothing. Therefore, we put
|
||||
# an arbitrary limit on the entry count we'll allow ourselves to read and any IFD reporting more
|
||||
# entries than that will be considered corrupt.
|
||||
MAX_COUNT = 0xffff
|
||||
MAX_COUNT = 0xFFFF
|
||||
|
||||
|
||||
def s2n_motorola(bytes):
|
||||
x = 0
|
||||
@ -166,6 +167,7 @@ def s2n_motorola(bytes):
|
||||
x = (x << 8) | c
|
||||
return x
|
||||
|
||||
|
||||
def s2n_intel(bytes):
|
||||
x = 0
|
||||
y = 0
|
||||
@ -174,13 +176,14 @@ def s2n_intel(bytes):
|
||||
y = y + 8
|
||||
return x
|
||||
|
||||
|
||||
class Fraction:
|
||||
def __init__(self, num, den):
|
||||
self.num = num
|
||||
self.den = den
|
||||
|
||||
def __repr__(self):
|
||||
return '%d/%d' % (self.num, self.den)
|
||||
return "%d/%d" % (self.num, self.den)
|
||||
|
||||
|
||||
class TIFF_file:
|
||||
@ -190,16 +193,22 @@ class TIFF_file:
|
||||
self.s2nfunc = s2n_intel if self.endian == INTEL_ENDIAN else s2n_motorola
|
||||
|
||||
def s2n(self, offset, length, signed=0, debug=False):
|
||||
slice = self.data[offset:offset+length]
|
||||
slice = self.data[offset : offset + length]
|
||||
val = self.s2nfunc(slice)
|
||||
# Sign extension ?
|
||||
if signed:
|
||||
msb = 1 << (8*length - 1)
|
||||
msb = 1 << (8 * length - 1)
|
||||
if val & msb:
|
||||
val = val - (msb << 1)
|
||||
if debug:
|
||||
logging.debug(self.endian)
|
||||
logging.debug("Slice for offset %d length %d: %r and value: %d", offset, length, slice, val)
|
||||
logging.debug(
|
||||
"Slice for offset %d length %d: %r and value: %d",
|
||||
offset,
|
||||
length,
|
||||
slice,
|
||||
val,
|
||||
)
|
||||
return val
|
||||
|
||||
def first_IFD(self):
|
||||
@ -225,30 +234,31 @@ class TIFF_file:
|
||||
return []
|
||||
a = []
|
||||
for i in range(entries):
|
||||
entry = ifd + 2 + 12*i
|
||||
entry = ifd + 2 + 12 * i
|
||||
tag = self.s2n(entry, 2)
|
||||
type = self.s2n(entry+2, 2)
|
||||
type = self.s2n(entry + 2, 2)
|
||||
if not 1 <= type <= 10:
|
||||
continue # not handled
|
||||
typelen = [1, 1, 2, 4, 8, 1, 1, 2, 4, 8][type-1]
|
||||
count = self.s2n(entry+4, 4)
|
||||
continue # not handled
|
||||
typelen = [1, 1, 2, 4, 8, 1, 1, 2, 4, 8][type - 1]
|
||||
count = self.s2n(entry + 4, 4)
|
||||
if count > MAX_COUNT:
|
||||
logging.debug("Probably corrupt. Aborting.")
|
||||
return []
|
||||
offset = entry+8
|
||||
if count*typelen > 4:
|
||||
offset = entry + 8
|
||||
if count * typelen > 4:
|
||||
offset = self.s2n(offset, 4)
|
||||
if type == 2:
|
||||
# Special case: nul-terminated ASCII string
|
||||
values = str(self.data[offset:offset+count-1], encoding='latin-1')
|
||||
values = str(self.data[offset : offset + count - 1], encoding="latin-1")
|
||||
else:
|
||||
values = []
|
||||
signed = (type == 6 or type >= 8)
|
||||
signed = type == 6 or type >= 8
|
||||
for j in range(count):
|
||||
if type in {5, 10}:
|
||||
# The type is either 5 or 10
|
||||
value_j = Fraction(self.s2n(offset, 4, signed),
|
||||
self.s2n(offset+4, 4, signed))
|
||||
value_j = Fraction(
|
||||
self.s2n(offset, 4, signed), self.s2n(offset + 4, 4, signed)
|
||||
)
|
||||
else:
|
||||
# Not a fraction
|
||||
value_j = self.s2n(offset, typelen, signed)
|
||||
@ -258,32 +268,37 @@ class TIFF_file:
|
||||
a.append((tag, type, values))
|
||||
return a
|
||||
|
||||
|
||||
def read_exif_header(fp):
|
||||
# If `fp`'s first bytes are not exif, it tries to find it in the next 4kb
|
||||
def isexif(data):
|
||||
return data[0:4] == b'\377\330\377\341' and data[6:10] == b'Exif'
|
||||
return data[0:4] == b"\377\330\377\341" and data[6:10] == b"Exif"
|
||||
|
||||
data = fp.read(12)
|
||||
if isexif(data):
|
||||
return data
|
||||
# ok, not exif, try to find it
|
||||
large_data = fp.read(4096)
|
||||
try:
|
||||
index = large_data.index(b'Exif')
|
||||
data = large_data[index-6:index+6]
|
||||
index = large_data.index(b"Exif")
|
||||
data = large_data[index - 6 : index + 6]
|
||||
# large_data omits the first 12 bytes, and the index is at the middle of the header, so we
|
||||
# must seek index + 18
|
||||
fp.seek(index+18)
|
||||
fp.seek(index + 18)
|
||||
return data
|
||||
except ValueError:
|
||||
raise ValueError("Not an Exif file")
|
||||
|
||||
|
||||
def get_fields(fp):
|
||||
data = read_exif_header(fp)
|
||||
length = data[4] * 256 + data[5]
|
||||
logging.debug("Exif header length: %d bytes", length)
|
||||
data = fp.read(length-8)
|
||||
data = fp.read(length - 8)
|
||||
data_format = data[0]
|
||||
logging.debug("%s format", {INTEL_ENDIAN: 'Intel', MOTOROLA_ENDIAN: 'Motorola'}[data_format])
|
||||
logging.debug(
|
||||
"%s format", {INTEL_ENDIAN: "Intel", MOTOROLA_ENDIAN: "Motorola"}[data_format]
|
||||
)
|
||||
T = TIFF_file(data)
|
||||
# There may be more than one IFD per file, but we only read the first one because others are
|
||||
# most likely thumbnails.
|
||||
@ -294,9 +309,9 @@ def get_fields(fp):
|
||||
try:
|
||||
stag = EXIF_TAGS[tag]
|
||||
except KeyError:
|
||||
stag = '0x%04X' % tag
|
||||
stag = "0x%04X" % tag
|
||||
if stag in result:
|
||||
return # don't overwrite data
|
||||
return # don't overwrite data
|
||||
result[stag] = values
|
||||
|
||||
logging.debug("IFD at offset %d", main_IFD_offset)
|
||||
|
@ -1,24 +1,26 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2014-03-15
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import plistlib
|
||||
|
||||
|
||||
class IPhotoPlistParser(plistlib._PlistParser):
|
||||
"""A parser for iPhoto plists.
|
||||
|
||||
iPhoto plists tend to be malformed, so we have to subclass the built-in parser to be a bit more
|
||||
lenient.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
plistlib._PlistParser.__init__(self, use_builtin_types=True, dict_type=dict)
|
||||
# For debugging purposes, we remember the last bit of data to be analyzed so that we can
|
||||
# log it in case of an exception
|
||||
self.lastdata = ''
|
||||
self.lastdata = ""
|
||||
|
||||
def get_data(self):
|
||||
self.lastdata = plistlib._PlistParser.get_data(self)
|
||||
|
@ -48,14 +48,18 @@ except Exception:
|
||||
logging.warning("Had problems to determine cpu count on launch.")
|
||||
RESULTS_QUEUE_LIMIT = 8
|
||||
|
||||
|
||||
def get_cache(cache_path, readonly=False):
|
||||
if cache_path.endswith('shelve'):
|
||||
if cache_path.endswith("shelve"):
|
||||
from .cache_shelve import ShelveCache
|
||||
|
||||
return ShelveCache(cache_path, readonly=readonly)
|
||||
else:
|
||||
from .cache_sqlite import SqliteCache
|
||||
|
||||
return SqliteCache(cache_path, readonly=readonly)
|
||||
|
||||
|
||||
def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
|
||||
# The MemoryError handlers in there use logging without first caring about whether or not
|
||||
# there is enough memory left to carry on the operation because it is assumed that the
|
||||
@ -63,7 +67,7 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
|
||||
# time that MemoryError is raised.
|
||||
cache = get_cache(cache_path)
|
||||
cache.purge_outdated()
|
||||
prepared = [] # only pictures for which there was no error getting blocks
|
||||
prepared = [] # only pictures for which there was no error getting blocks
|
||||
try:
|
||||
for picture in j.iter_with_progress(pictures, tr("Analyzed %d/%d pictures")):
|
||||
if not picture.path:
|
||||
@ -77,7 +81,7 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
|
||||
picture.unicode_path = str(picture.path)
|
||||
logging.debug("Analyzing picture at %s", picture.unicode_path)
|
||||
if with_dimensions:
|
||||
picture.dimensions # pre-read dimensions
|
||||
picture.dimensions # pre-read dimensions
|
||||
try:
|
||||
if picture.unicode_path not in cache:
|
||||
blocks = picture.get_blocks(BLOCK_COUNT_PER_SIDE)
|
||||
@ -86,32 +90,45 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
|
||||
except (IOError, ValueError) as e:
|
||||
logging.warning(str(e))
|
||||
except MemoryError:
|
||||
logging.warning("Ran out of memory while reading %s of size %d", picture.unicode_path, picture.size)
|
||||
if picture.size < 10 * 1024 * 1024: # We're really running out of memory
|
||||
logging.warning(
|
||||
"Ran out of memory while reading %s of size %d",
|
||||
picture.unicode_path,
|
||||
picture.size,
|
||||
)
|
||||
if (
|
||||
picture.size < 10 * 1024 * 1024
|
||||
): # We're really running out of memory
|
||||
raise
|
||||
except MemoryError:
|
||||
logging.warning('Ran out of memory while preparing pictures')
|
||||
logging.warning("Ran out of memory while preparing pictures")
|
||||
cache.close()
|
||||
return prepared
|
||||
|
||||
|
||||
def get_chunks(pictures):
|
||||
min_chunk_count = multiprocessing.cpu_count() * 2 # have enough chunks to feed all subprocesses
|
||||
min_chunk_count = (
|
||||
multiprocessing.cpu_count() * 2
|
||||
) # have enough chunks to feed all subprocesses
|
||||
chunk_count = len(pictures) // DEFAULT_CHUNK_SIZE
|
||||
chunk_count = max(min_chunk_count, chunk_count)
|
||||
chunk_size = (len(pictures) // chunk_count) + 1
|
||||
chunk_size = max(MIN_CHUNK_SIZE, chunk_size)
|
||||
logging.info(
|
||||
"Creating %d chunks with a chunk size of %d for %d pictures", chunk_count,
|
||||
chunk_size, len(pictures)
|
||||
"Creating %d chunks with a chunk size of %d for %d pictures",
|
||||
chunk_count,
|
||||
chunk_size,
|
||||
len(pictures),
|
||||
)
|
||||
chunks = [pictures[i:i+chunk_size] for i in range(0, len(pictures), chunk_size)]
|
||||
chunks = [pictures[i : i + chunk_size] for i in range(0, len(pictures), chunk_size)]
|
||||
return chunks
|
||||
|
||||
|
||||
def get_match(first, second, percentage):
|
||||
if percentage < 0:
|
||||
percentage = 0
|
||||
return Match(first, second, percentage)
|
||||
|
||||
|
||||
def async_compare(ref_ids, other_ids, dbname, threshold, picinfo):
|
||||
# The list of ids in ref_ids have to be compared to the list of ids in other_ids. other_ids
|
||||
# can be None. In this case, ref_ids has to be compared with itself
|
||||
@ -142,6 +159,7 @@ def async_compare(ref_ids, other_ids, dbname, threshold, picinfo):
|
||||
cache.close()
|
||||
return results
|
||||
|
||||
|
||||
def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljob):
|
||||
def get_picinfo(p):
|
||||
if match_scaled:
|
||||
@ -160,11 +178,16 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
|
||||
async_results.remove(result)
|
||||
comparison_count += 1
|
||||
# About the NOQA below: I think there's a bug in pyflakes. To investigate...
|
||||
progress_msg = tr("Performed %d/%d chunk matches") % (comparison_count, len(comparisons_to_do)) # NOQA
|
||||
progress_msg = tr("Performed %d/%d chunk matches") % (
|
||||
comparison_count,
|
||||
len(comparisons_to_do),
|
||||
) # NOQA
|
||||
j.set_progress(comparison_count, progress_msg)
|
||||
|
||||
j = j.start_subjob([3, 7])
|
||||
pictures = prepare_pictures(pictures, cache_path, with_dimensions=not match_scaled, j=j)
|
||||
pictures = prepare_pictures(
|
||||
pictures, cache_path, with_dimensions=not match_scaled, j=j
|
||||
)
|
||||
j = j.start_subjob([9, 1], tr("Preparing for matching"))
|
||||
cache = get_cache(cache_path)
|
||||
id2picture = {}
|
||||
@ -175,7 +198,7 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
|
||||
except ValueError:
|
||||
pass
|
||||
cache.close()
|
||||
pictures = [p for p in pictures if hasattr(p, 'cache_id')]
|
||||
pictures = [p for p in pictures if hasattr(p, "cache_id")]
|
||||
pool = multiprocessing.Pool()
|
||||
async_results = []
|
||||
matches = []
|
||||
@ -203,9 +226,17 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
|
||||
# some wiggle room, log about the incident, and stop matching right here. We then process
|
||||
# the matches we have. The rest of the process doesn't allocate much and we should be
|
||||
# alright.
|
||||
del comparisons_to_do, chunks, pictures # some wiggle room for the next statements
|
||||
logging.warning("Ran out of memory when scanning! We had %d matches.", len(matches))
|
||||
del matches[-len(matches)//3:] # some wiggle room to ensure we don't run out of memory again.
|
||||
del (
|
||||
comparisons_to_do,
|
||||
chunks,
|
||||
pictures,
|
||||
) # some wiggle room for the next statements
|
||||
logging.warning(
|
||||
"Ran out of memory when scanning! We had %d matches.", len(matches)
|
||||
)
|
||||
del matches[
|
||||
-len(matches) // 3 :
|
||||
] # some wiggle room to ensure we don't run out of memory again.
|
||||
pool.close()
|
||||
result = []
|
||||
myiter = j.iter_with_progress(
|
||||
@ -220,10 +251,10 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
|
||||
if percentage == 100 and ref.md5 != other.md5:
|
||||
percentage = 99
|
||||
if percentage >= threshold:
|
||||
ref.dimensions # pre-read dimensions for display in results
|
||||
ref.dimensions # pre-read dimensions for display in results
|
||||
other.dimensions
|
||||
result.append(get_match(ref, other, percentage))
|
||||
return result
|
||||
|
||||
multiprocessing.freeze_support()
|
||||
|
||||
multiprocessing.freeze_support()
|
||||
|
@ -13,14 +13,15 @@ from hscommon.trans import tr
|
||||
|
||||
from core.engine import Match
|
||||
|
||||
|
||||
def getmatches(files, match_scaled, j):
|
||||
timestamp2pic = defaultdict(set)
|
||||
for picture in j.iter_with_progress(files, tr("Read EXIF of %d/%d pictures")):
|
||||
timestamp = picture.exif_timestamp
|
||||
if timestamp:
|
||||
timestamp2pic[timestamp].add(picture)
|
||||
if '0000:00:00 00:00:00' in timestamp2pic: # very likely false matches
|
||||
del timestamp2pic['0000:00:00 00:00:00']
|
||||
if "0000:00:00 00:00:00" in timestamp2pic: # very likely false matches
|
||||
del timestamp2pic["0000:00:00 00:00:00"]
|
||||
matches = []
|
||||
for pictures in timestamp2pic.values():
|
||||
for p1, p2 in combinations(pictures, 2):
|
||||
@ -28,4 +29,3 @@ def getmatches(files, match_scaled, j):
|
||||
continue
|
||||
matches.append(Match(p1, p2, 100))
|
||||
return matches
|
||||
|
||||
|
@ -14,23 +14,22 @@ from . import exif
|
||||
# This global value is set by the platform-specific subclasser of the Photo base class
|
||||
PLAT_SPECIFIC_PHOTO_CLASS = None
|
||||
|
||||
|
||||
def format_dimensions(dimensions):
|
||||
return '%d x %d' % (dimensions[0], dimensions[1])
|
||||
return "%d x %d" % (dimensions[0], dimensions[1])
|
||||
|
||||
|
||||
def get_delta_dimensions(value, ref_value):
|
||||
return (value[0]-ref_value[0], value[1]-ref_value[1])
|
||||
return (value[0] - ref_value[0], value[1] - ref_value[1])
|
||||
|
||||
|
||||
class Photo(fs.File):
|
||||
INITIAL_INFO = fs.File.INITIAL_INFO.copy()
|
||||
INITIAL_INFO.update({
|
||||
'dimensions': (0, 0),
|
||||
'exif_timestamp': '',
|
||||
})
|
||||
INITIAL_INFO.update({"dimensions": (0, 0), "exif_timestamp": ""})
|
||||
__slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys())
|
||||
|
||||
# These extensions are supported on all platforms
|
||||
HANDLED_EXTS = {'png', 'jpg', 'jpeg', 'gif', 'bmp', 'tiff', 'tif'}
|
||||
HANDLED_EXTS = {"png", "jpg", "jpeg", "gif", "bmp", "tiff", "tif"}
|
||||
|
||||
def _plat_get_dimensions(self):
|
||||
raise NotImplementedError()
|
||||
@ -39,25 +38,25 @@ class Photo(fs.File):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_orientation(self):
|
||||
if not hasattr(self, '_cached_orientation'):
|
||||
if not hasattr(self, "_cached_orientation"):
|
||||
try:
|
||||
with self.path.open('rb') as fp:
|
||||
with self.path.open("rb") as fp:
|
||||
exifdata = exif.get_fields(fp)
|
||||
# the value is a list (probably one-sized) of ints
|
||||
orientations = exifdata['Orientation']
|
||||
orientations = exifdata["Orientation"]
|
||||
self._cached_orientation = orientations[0]
|
||||
except Exception: # Couldn't read EXIF data, no transforms
|
||||
except Exception: # Couldn't read EXIF data, no transforms
|
||||
self._cached_orientation = 0
|
||||
return self._cached_orientation
|
||||
|
||||
def _get_exif_timestamp(self):
|
||||
try:
|
||||
with self.path.open('rb') as fp:
|
||||
with self.path.open("rb") as fp:
|
||||
exifdata = exif.get_fields(fp)
|
||||
return exifdata['DateTimeOriginal']
|
||||
return exifdata["DateTimeOriginal"]
|
||||
except Exception:
|
||||
logging.info("Couldn't read EXIF of picture: %s", self.path)
|
||||
return ''
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def can_handle(cls, path):
|
||||
@ -79,28 +78,27 @@ class Photo(fs.File):
|
||||
else:
|
||||
percentage = group.percentage
|
||||
dupe_count = len(group.dupes)
|
||||
dupe_folder_path = getattr(self, 'display_folder_path', self.folder_path)
|
||||
dupe_folder_path = getattr(self, "display_folder_path", self.folder_path)
|
||||
return {
|
||||
'name': self.name,
|
||||
'folder_path': str(dupe_folder_path),
|
||||
'size': format_size(size, 0, 1, False),
|
||||
'extension': self.extension,
|
||||
'dimensions': format_dimensions(dimensions),
|
||||
'exif_timestamp': self.exif_timestamp,
|
||||
'mtime': format_timestamp(mtime, delta and m),
|
||||
'percentage': format_perc(percentage),
|
||||
'dupe_count': format_dupe_count(dupe_count),
|
||||
"name": self.name,
|
||||
"folder_path": str(dupe_folder_path),
|
||||
"size": format_size(size, 0, 1, False),
|
||||
"extension": self.extension,
|
||||
"dimensions": format_dimensions(dimensions),
|
||||
"exif_timestamp": self.exif_timestamp,
|
||||
"mtime": format_timestamp(mtime, delta and m),
|
||||
"percentage": format_perc(percentage),
|
||||
"dupe_count": format_dupe_count(dupe_count),
|
||||
}
|
||||
|
||||
def _read_info(self, field):
|
||||
fs.File._read_info(self, field)
|
||||
if field == 'dimensions':
|
||||
if field == "dimensions":
|
||||
self.dimensions = self._plat_get_dimensions()
|
||||
if self._get_orientation() in {5, 6, 7, 8}:
|
||||
self.dimensions = (self.dimensions[1], self.dimensions[0])
|
||||
elif field == 'exif_timestamp':
|
||||
elif field == "exif_timestamp":
|
||||
self.exif_timestamp = self._get_exif_timestamp()
|
||||
|
||||
def get_blocks(self, block_count_per_side):
|
||||
return self._plat_get_blocks(block_count_per_side, self._get_orientation())
|
||||
|
||||
|
@ -8,11 +8,16 @@
|
||||
from hscommon.trans import trget
|
||||
|
||||
from core.prioritize import (
|
||||
KindCategory, FolderCategory, FilenameCategory, NumericalCategory,
|
||||
SizeCategory, MtimeCategory
|
||||
KindCategory,
|
||||
FolderCategory,
|
||||
FilenameCategory,
|
||||
NumericalCategory,
|
||||
SizeCategory,
|
||||
MtimeCategory,
|
||||
)
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class DimensionsCategory(NumericalCategory):
|
||||
NAME = coltr("Dimensions")
|
||||
@ -24,8 +29,13 @@ class DimensionsCategory(NumericalCategory):
|
||||
width, height = value
|
||||
return (-width, -height)
|
||||
|
||||
|
||||
def all_categories():
|
||||
return [
|
||||
KindCategory, FolderCategory, FilenameCategory, SizeCategory, DimensionsCategory,
|
||||
MtimeCategory
|
||||
KindCategory,
|
||||
FolderCategory,
|
||||
FilenameCategory,
|
||||
SizeCategory,
|
||||
DimensionsCategory,
|
||||
MtimeCategory,
|
||||
]
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Created On: 2011-11-27
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.column import Column
|
||||
@ -10,19 +10,20 @@ from hscommon.trans import trget
|
||||
|
||||
from core.gui.result_table import ResultTable as ResultTableBase
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class ResultTable(ResultTableBase):
|
||||
COLUMNS = [
|
||||
Column('marked', ''),
|
||||
Column('name', coltr("Filename")),
|
||||
Column('folder_path', coltr("Folder"), optional=True),
|
||||
Column('size', coltr("Size (KB)"), optional=True),
|
||||
Column('extension', coltr("Kind"), visible=False, optional=True),
|
||||
Column('dimensions', coltr("Dimensions"), optional=True),
|
||||
Column('exif_timestamp', coltr("EXIF Timestamp"), visible=False, optional=True),
|
||||
Column('mtime', coltr("Modification"), visible=False, optional=True),
|
||||
Column('percentage', coltr("Match %"), optional=True),
|
||||
Column('dupe_count', coltr("Dupe Count"), visible=False, optional=True),
|
||||
Column("marked", ""),
|
||||
Column("name", coltr("Filename")),
|
||||
Column("folder_path", coltr("Folder"), optional=True),
|
||||
Column("size", coltr("Size (KB)"), optional=True),
|
||||
Column("extension", coltr("Kind"), visible=False, optional=True),
|
||||
Column("dimensions", coltr("Dimensions"), optional=True),
|
||||
Column("exif_timestamp", coltr("EXIF Timestamp"), visible=False, optional=True),
|
||||
Column("mtime", coltr("Modification"), visible=False, optional=True),
|
||||
Column("percentage", coltr("Match %"), optional=True),
|
||||
Column("dupe_count", coltr("Dupe Count"), visible=False, optional=True),
|
||||
]
|
||||
DELTA_COLUMNS = {'size', 'dimensions', 'mtime'}
|
||||
DELTA_COLUMNS = {"size", "dimensions", "mtime"}
|
||||
|
@ -10,6 +10,7 @@ from core.scanner import Scanner, ScanType, ScanOption
|
||||
|
||||
from . import matchblock, matchexif
|
||||
|
||||
|
||||
class ScannerPE(Scanner):
|
||||
cache_path = None
|
||||
match_scaled = False
|
||||
@ -28,10 +29,9 @@ class ScannerPE(Scanner):
|
||||
cache_path=self.cache_path,
|
||||
threshold=self.min_match_percentage,
|
||||
match_scaled=self.match_scaled,
|
||||
j=j
|
||||
j=j,
|
||||
)
|
||||
elif self.scan_type == ScanType.ExifTimestamp:
|
||||
return matchexif.getmatches(files, self.match_scaled, j)
|
||||
else:
|
||||
raise Exception("Invalid scan type")
|
||||
|
||||
|
@ -1,48 +1,50 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2011/09/07
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.util import dedupe, flatten, rem_file_ext
|
||||
from hscommon.trans import trget, tr
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class CriterionCategory:
|
||||
NAME = "Undefined"
|
||||
|
||||
|
||||
def __init__(self, results):
|
||||
self.results = results
|
||||
|
||||
#--- Virtual
|
||||
|
||||
# --- Virtual
|
||||
def extract_value(self, dupe):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def format_criterion_value(self, value):
|
||||
return value
|
||||
|
||||
|
||||
def sort_key(self, dupe, crit_value):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def criteria_list(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Criterion:
|
||||
def __init__(self, category, value):
|
||||
self.category = category
|
||||
self.value = value
|
||||
self.display_value = category.format_criterion_value(value)
|
||||
|
||||
|
||||
def sort_key(self, dupe):
|
||||
return self.category.sort_key(dupe, self.value)
|
||||
|
||||
|
||||
@property
|
||||
def display(self):
|
||||
return "{} ({})".format(self.category.NAME, self.display_value)
|
||||
|
||||
|
||||
|
||||
class ValueListCategory(CriterionCategory):
|
||||
def sort_key(self, dupe, crit_value):
|
||||
@ -52,45 +54,47 @@ class ValueListCategory(CriterionCategory):
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def criteria_list(self):
|
||||
dupes = flatten(g[:] for g in self.results.groups)
|
||||
values = sorted(dedupe(self.extract_value(d) for d in dupes))
|
||||
return [Criterion(self, value) for value in values]
|
||||
|
||||
|
||||
|
||||
class KindCategory(ValueListCategory):
|
||||
NAME = coltr("Kind")
|
||||
|
||||
|
||||
def extract_value(self, dupe):
|
||||
value = dupe.extension
|
||||
if not value:
|
||||
value = tr("None")
|
||||
return value
|
||||
|
||||
|
||||
class FolderCategory(ValueListCategory):
|
||||
NAME = coltr("Folder")
|
||||
|
||||
|
||||
def extract_value(self, dupe):
|
||||
return dupe.folder_path
|
||||
|
||||
|
||||
def format_criterion_value(self, value):
|
||||
return str(value)
|
||||
|
||||
|
||||
def sort_key(self, dupe, crit_value):
|
||||
value = self.extract_value(dupe)
|
||||
if value[:len(crit_value)] == crit_value:
|
||||
if value[: len(crit_value)] == crit_value:
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
class FilenameCategory(CriterionCategory):
|
||||
NAME = coltr("Filename")
|
||||
ENDS_WITH_NUMBER = 0
|
||||
DOESNT_END_WITH_NUMBER = 1
|
||||
LONGEST = 2
|
||||
SHORTEST = 3
|
||||
|
||||
|
||||
def format_criterion_value(self, value):
|
||||
return {
|
||||
self.ENDS_WITH_NUMBER: tr("Ends with number"),
|
||||
@ -98,10 +102,10 @@ class FilenameCategory(CriterionCategory):
|
||||
self.LONGEST: tr("Longest"),
|
||||
self.SHORTEST: tr("Shortest"),
|
||||
}[value]
|
||||
|
||||
|
||||
def extract_value(self, dupe):
|
||||
return rem_file_ext(dupe.name)
|
||||
|
||||
|
||||
def sort_key(self, dupe, crit_value):
|
||||
value = self.extract_value(dupe)
|
||||
if crit_value in {self.ENDS_WITH_NUMBER, self.DOESNT_END_WITH_NUMBER}:
|
||||
@ -113,50 +117,57 @@ class FilenameCategory(CriterionCategory):
|
||||
else:
|
||||
value = len(value)
|
||||
if crit_value == self.LONGEST:
|
||||
value *= -1 # We want the biggest values on top
|
||||
value *= -1 # We want the biggest values on top
|
||||
return value
|
||||
|
||||
|
||||
def criteria_list(self):
|
||||
return [Criterion(self, crit_value) for crit_value in [
|
||||
self.ENDS_WITH_NUMBER,
|
||||
self.DOESNT_END_WITH_NUMBER,
|
||||
self.LONGEST,
|
||||
self.SHORTEST,
|
||||
]]
|
||||
return [
|
||||
Criterion(self, crit_value)
|
||||
for crit_value in [
|
||||
self.ENDS_WITH_NUMBER,
|
||||
self.DOESNT_END_WITH_NUMBER,
|
||||
self.LONGEST,
|
||||
self.SHORTEST,
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class NumericalCategory(CriterionCategory):
|
||||
HIGHEST = 0
|
||||
LOWEST = 1
|
||||
|
||||
|
||||
def format_criterion_value(self, value):
|
||||
return tr("Highest") if value == self.HIGHEST else tr("Lowest")
|
||||
|
||||
def invert_numerical_value(self, value): # Virtual
|
||||
|
||||
def invert_numerical_value(self, value): # Virtual
|
||||
return value * -1
|
||||
|
||||
|
||||
def sort_key(self, dupe, crit_value):
|
||||
value = self.extract_value(dupe)
|
||||
if crit_value == self.HIGHEST: # we want highest values on top
|
||||
if crit_value == self.HIGHEST: # we want highest values on top
|
||||
value = self.invert_numerical_value(value)
|
||||
return value
|
||||
|
||||
|
||||
def criteria_list(self):
|
||||
return [Criterion(self, self.HIGHEST), Criterion(self, self.LOWEST)]
|
||||
|
||||
|
||||
|
||||
class SizeCategory(NumericalCategory):
|
||||
NAME = coltr("Size")
|
||||
|
||||
|
||||
def extract_value(self, dupe):
|
||||
return dupe.size
|
||||
|
||||
|
||||
class MtimeCategory(NumericalCategory):
|
||||
NAME = coltr("Modification")
|
||||
|
||||
|
||||
def extract_value(self, dupe):
|
||||
return dupe.mtime
|
||||
|
||||
|
||||
def format_criterion_value(self, value):
|
||||
return tr("Newest") if value == self.HIGHEST else tr("Oldest")
|
||||
|
||||
|
||||
def all_categories():
|
||||
return [KindCategory, FolderCategory, FilenameCategory, SizeCategory, MtimeCategory]
|
||||
|
109
core/results.py
109
core/results.py
@ -20,6 +20,7 @@ from hscommon.trans import tr
|
||||
from . import engine
|
||||
from .markable import Markable
|
||||
|
||||
|
||||
class Results(Markable):
|
||||
"""Manages a collection of duplicate :class:`~core.engine.Group`.
|
||||
|
||||
@ -34,21 +35,22 @@ class Results(Markable):
|
||||
A list of all duplicates (:class:`~core.fs.File` instances), without ref, contained in the
|
||||
currently managed :attr:`groups`.
|
||||
"""
|
||||
#---Override
|
||||
|
||||
# ---Override
|
||||
def __init__(self, app):
|
||||
Markable.__init__(self)
|
||||
self.__groups = []
|
||||
self.__group_of_duplicate = {}
|
||||
self.__groups_sort_descriptor = None # This is a tuple (key, asc)
|
||||
self.__groups_sort_descriptor = None # This is a tuple (key, asc)
|
||||
self.__dupes = None
|
||||
self.__dupes_sort_descriptor = None # This is a tuple (key, asc, delta)
|
||||
self.__dupes_sort_descriptor = None # This is a tuple (key, asc, delta)
|
||||
self.__filters = None
|
||||
self.__filtered_dupes = None
|
||||
self.__filtered_groups = None
|
||||
self.__recalculate_stats()
|
||||
self.__marked_size = 0
|
||||
self.app = app
|
||||
self.problems = [] # (dupe, error_msg)
|
||||
self.problems = [] # (dupe, error_msg)
|
||||
self.is_modified = False
|
||||
|
||||
def _did_mark(self, dupe):
|
||||
@ -90,7 +92,7 @@ class Results(Markable):
|
||||
else:
|
||||
Markable.mark_none(self)
|
||||
|
||||
#---Private
|
||||
# ---Private
|
||||
def __get_dupe_list(self):
|
||||
if self.__dupes is None:
|
||||
self.__dupes = flatten(group.dupes for group in self.groups)
|
||||
@ -98,10 +100,13 @@ class Results(Markable):
|
||||
# This is debug logging to try to figure out #44
|
||||
logging.warning(
|
||||
"There is a None value in the Results' dupe list. dupes: %r groups: %r",
|
||||
self.__dupes, self.groups
|
||||
self.__dupes,
|
||||
self.groups,
|
||||
)
|
||||
if self.__filtered_dupes:
|
||||
self.__dupes = [dupe for dupe in self.__dupes if dupe in self.__filtered_dupes]
|
||||
self.__dupes = [
|
||||
dupe for dupe in self.__dupes if dupe in self.__filtered_dupes
|
||||
]
|
||||
sd = self.__dupes_sort_descriptor
|
||||
if sd:
|
||||
self.sort_dupes(sd[0], sd[1], sd[2])
|
||||
@ -120,10 +125,18 @@ class Results(Markable):
|
||||
total_count = self.__total_count
|
||||
total_size = self.__total_size
|
||||
else:
|
||||
mark_count = len([dupe for dupe in self.__filtered_dupes if self.is_marked(dupe)])
|
||||
marked_size = sum(dupe.size for dupe in self.__filtered_dupes if self.is_marked(dupe))
|
||||
total_count = len([dupe for dupe in self.__filtered_dupes if self.is_markable(dupe)])
|
||||
total_size = sum(dupe.size for dupe in self.__filtered_dupes if self.is_markable(dupe))
|
||||
mark_count = len(
|
||||
[dupe for dupe in self.__filtered_dupes if self.is_marked(dupe)]
|
||||
)
|
||||
marked_size = sum(
|
||||
dupe.size for dupe in self.__filtered_dupes if self.is_marked(dupe)
|
||||
)
|
||||
total_count = len(
|
||||
[dupe for dupe in self.__filtered_dupes if self.is_markable(dupe)]
|
||||
)
|
||||
total_size = sum(
|
||||
dupe.size for dupe in self.__filtered_dupes if self.is_markable(dupe)
|
||||
)
|
||||
if self.mark_inverted:
|
||||
marked_size = self.__total_size - marked_size
|
||||
result = tr("%d / %d (%s / %s) duplicates marked.") % (
|
||||
@ -133,7 +146,7 @@ class Results(Markable):
|
||||
format_size(total_size, 2),
|
||||
)
|
||||
if self.__filters:
|
||||
result += tr(" filter: %s") % ' --> '.join(self.__filters)
|
||||
result += tr(" filter: %s") % " --> ".join(self.__filters)
|
||||
return result
|
||||
|
||||
def __recalculate_stats(self):
|
||||
@ -151,7 +164,7 @@ class Results(Markable):
|
||||
for g in self.__groups:
|
||||
for dupe in g:
|
||||
self.__group_of_duplicate[dupe] = g
|
||||
if not hasattr(dupe, 'is_ref'):
|
||||
if not hasattr(dupe, "is_ref"):
|
||||
dupe.is_ref = False
|
||||
self.is_modified = bool(self.__groups)
|
||||
old_filters = nonone(self.__filters, [])
|
||||
@ -159,7 +172,7 @@ class Results(Markable):
|
||||
for filter_str in old_filters:
|
||||
self.apply_filter(filter_str)
|
||||
|
||||
#---Public
|
||||
# ---Public
|
||||
def apply_filter(self, filter_str):
|
||||
"""Applies a filter ``filter_str`` to :attr:`groups`
|
||||
|
||||
@ -182,11 +195,15 @@ class Results(Markable):
|
||||
try:
|
||||
filter_re = re.compile(filter_str, re.IGNORECASE)
|
||||
except re.error:
|
||||
return # don't apply this filter.
|
||||
return # don't apply this filter.
|
||||
self.__filters.append(filter_str)
|
||||
if self.__filtered_dupes is None:
|
||||
self.__filtered_dupes = flatten(g[:] for g in self.groups)
|
||||
self.__filtered_dupes = set(dupe for dupe in self.__filtered_dupes if filter_re.search(str(dupe.path)))
|
||||
self.__filtered_dupes = set(
|
||||
dupe
|
||||
for dupe in self.__filtered_dupes
|
||||
if filter_re.search(str(dupe.path))
|
||||
)
|
||||
filtered_groups = set()
|
||||
for dupe in self.__filtered_dupes:
|
||||
filtered_groups.add(self.get_group_of_duplicate(dupe))
|
||||
@ -214,6 +231,7 @@ class Results(Markable):
|
||||
:param get_file: a function f(path) returning a :class:`~core.fs.File` wrapping the path.
|
||||
:param j: A :ref:`job progress instance <jobs>`.
|
||||
"""
|
||||
|
||||
def do_match(ref_file, other_files, group):
|
||||
if not other_files:
|
||||
return
|
||||
@ -223,31 +241,31 @@ class Results(Markable):
|
||||
|
||||
self.apply_filter(None)
|
||||
root = ET.parse(infile).getroot()
|
||||
group_elems = list(root.getiterator('group'))
|
||||
group_elems = list(root.getiterator("group"))
|
||||
groups = []
|
||||
marked = set()
|
||||
for group_elem in j.iter_with_progress(group_elems, every=100):
|
||||
group = engine.Group()
|
||||
dupes = []
|
||||
for file_elem in group_elem.getiterator('file'):
|
||||
path = file_elem.get('path')
|
||||
words = file_elem.get('words', '')
|
||||
for file_elem in group_elem.getiterator("file"):
|
||||
path = file_elem.get("path")
|
||||
words = file_elem.get("words", "")
|
||||
if not path:
|
||||
continue
|
||||
file = get_file(path)
|
||||
if file is None:
|
||||
continue
|
||||
file.words = words.split(',')
|
||||
file.is_ref = file_elem.get('is_ref') == 'y'
|
||||
file.words = words.split(",")
|
||||
file.is_ref = file_elem.get("is_ref") == "y"
|
||||
dupes.append(file)
|
||||
if file_elem.get('marked') == 'y':
|
||||
if file_elem.get("marked") == "y":
|
||||
marked.add(file)
|
||||
for match_elem in group_elem.getiterator('match'):
|
||||
for match_elem in group_elem.getiterator("match"):
|
||||
try:
|
||||
attrs = match_elem.attrib
|
||||
first_file = dupes[int(attrs['first'])]
|
||||
second_file = dupes[int(attrs['second'])]
|
||||
percentage = int(attrs['percentage'])
|
||||
first_file = dupes[int(attrs["first"])]
|
||||
second_file = dupes[int(attrs["second"])]
|
||||
percentage = int(attrs["percentage"])
|
||||
group.add_match(engine.Match(first_file, second_file, percentage))
|
||||
except (IndexError, KeyError, ValueError):
|
||||
# Covers missing attr, non-int values and indexes out of bounds
|
||||
@ -339,9 +357,9 @@ class Results(Markable):
|
||||
:param outfile: file object or path.
|
||||
"""
|
||||
self.apply_filter(None)
|
||||
root = ET.Element('results')
|
||||
root = ET.Element("results")
|
||||
for g in self.groups:
|
||||
group_elem = ET.SubElement(root, 'group')
|
||||
group_elem = ET.SubElement(root, "group")
|
||||
dupe2index = {}
|
||||
for index, d in enumerate(g):
|
||||
dupe2index[d] = index
|
||||
@ -349,24 +367,24 @@ class Results(Markable):
|
||||
words = engine.unpack_fields(d.words)
|
||||
except AttributeError:
|
||||
words = ()
|
||||
file_elem = ET.SubElement(group_elem, 'file')
|
||||
file_elem = ET.SubElement(group_elem, "file")
|
||||
try:
|
||||
file_elem.set('path', str(d.path))
|
||||
file_elem.set('words', ','.join(words))
|
||||
except ValueError: # If there's an invalid character, just skip the file
|
||||
file_elem.set('path', '')
|
||||
file_elem.set('is_ref', ('y' if d.is_ref else 'n'))
|
||||
file_elem.set('marked', ('y' if self.is_marked(d) else 'n'))
|
||||
file_elem.set("path", str(d.path))
|
||||
file_elem.set("words", ",".join(words))
|
||||
except ValueError: # If there's an invalid character, just skip the file
|
||||
file_elem.set("path", "")
|
||||
file_elem.set("is_ref", ("y" if d.is_ref else "n"))
|
||||
file_elem.set("marked", ("y" if self.is_marked(d) else "n"))
|
||||
for match in g.matches:
|
||||
match_elem = ET.SubElement(group_elem, 'match')
|
||||
match_elem.set('first', str(dupe2index[match.first]))
|
||||
match_elem.set('second', str(dupe2index[match.second]))
|
||||
match_elem.set('percentage', str(int(match.percentage)))
|
||||
match_elem = ET.SubElement(group_elem, "match")
|
||||
match_elem.set("first", str(dupe2index[match.first]))
|
||||
match_elem.set("second", str(dupe2index[match.second]))
|
||||
match_elem.set("percentage", str(int(match.percentage)))
|
||||
tree = ET.ElementTree(root)
|
||||
|
||||
def do_write(outfile):
|
||||
with FileOrPath(outfile, 'wb') as fp:
|
||||
tree.write(fp, encoding='utf-8')
|
||||
with FileOrPath(outfile, "wb") as fp:
|
||||
tree.write(fp, encoding="utf-8")
|
||||
|
||||
try:
|
||||
do_write(outfile)
|
||||
@ -392,7 +410,9 @@ class Results(Markable):
|
||||
"""
|
||||
if not self.__dupes:
|
||||
self.__get_dupe_list()
|
||||
keyfunc = lambda d: self.app._get_dupe_sort_key(d, lambda: self.get_group_of_duplicate(d), key, delta)
|
||||
keyfunc = lambda d: self.app._get_dupe_sort_key(
|
||||
d, lambda: self.get_group_of_duplicate(d), key, delta
|
||||
)
|
||||
self.__dupes.sort(key=keyfunc, reverse=not asc)
|
||||
self.__dupes_sort_descriptor = (key, asc, delta)
|
||||
|
||||
@ -408,8 +428,7 @@ class Results(Markable):
|
||||
self.groups.sort(key=keyfunc, reverse=not asc)
|
||||
self.__groups_sort_descriptor = (key, asc)
|
||||
|
||||
#---Properties
|
||||
# ---Properties
|
||||
dupes = property(__get_dupe_list)
|
||||
groups = property(__get_groups, __set_groups)
|
||||
stat_line = property(__get_stat_line)
|
||||
|
||||
|
@ -19,6 +19,7 @@ from . import engine
|
||||
# there will be some nasty bugs popping up (ScanType is used in core when in should exclusively be
|
||||
# used in core_*). One day I'll clean this up.
|
||||
|
||||
|
||||
class ScanType:
|
||||
Filename = 0
|
||||
Fields = 1
|
||||
@ -27,23 +28,26 @@ class ScanType:
|
||||
Folders = 4
|
||||
Contents = 5
|
||||
|
||||
#PE
|
||||
# PE
|
||||
FuzzyBlock = 10
|
||||
ExifTimestamp = 11
|
||||
|
||||
ScanOption = namedtuple('ScanOption', 'scan_type label')
|
||||
|
||||
SCANNABLE_TAGS = ['track', 'artist', 'album', 'title', 'genre', 'year']
|
||||
ScanOption = namedtuple("ScanOption", "scan_type label")
|
||||
|
||||
SCANNABLE_TAGS = ["track", "artist", "album", "title", "genre", "year"]
|
||||
|
||||
RE_DIGIT_ENDING = re.compile(r"\d+|\(\d+\)|\[\d+\]|{\d+}")
|
||||
|
||||
RE_DIGIT_ENDING = re.compile(r'\d+|\(\d+\)|\[\d+\]|{\d+}')
|
||||
|
||||
def is_same_with_digit(name, refname):
|
||||
# Returns True if name is the same as refname, but with digits (with brackets or not) at the end
|
||||
if not name.startswith(refname):
|
||||
return False
|
||||
end = name[len(refname):].strip()
|
||||
end = name[len(refname) :].strip()
|
||||
return RE_DIGIT_ENDING.match(end) is not None
|
||||
|
||||
|
||||
def remove_dupe_paths(files):
|
||||
# Returns files with duplicates-by-path removed. Files with the exact same path are considered
|
||||
# duplicates and only the first file to have a path is kept. In certain cases, we have files
|
||||
@ -57,25 +61,29 @@ def remove_dupe_paths(files):
|
||||
if normalized in path2file:
|
||||
try:
|
||||
if op.samefile(normalized, str(path2file[normalized].path)):
|
||||
continue # same file, it's a dupe
|
||||
continue # same file, it's a dupe
|
||||
else:
|
||||
pass # We don't treat them as dupes
|
||||
pass # We don't treat them as dupes
|
||||
except OSError:
|
||||
continue # File doesn't exist? Well, treat them as dupes
|
||||
continue # File doesn't exist? Well, treat them as dupes
|
||||
else:
|
||||
path2file[normalized] = f
|
||||
result.append(f)
|
||||
return result
|
||||
|
||||
|
||||
class Scanner:
|
||||
def __init__(self):
|
||||
self.discarded_file_count = 0
|
||||
|
||||
def _getmatches(self, files, j):
|
||||
if self.size_threshold or self.scan_type in {ScanType.Contents, ScanType.Folders}:
|
||||
if self.size_threshold or self.scan_type in {
|
||||
ScanType.Contents,
|
||||
ScanType.Folders,
|
||||
}:
|
||||
j = j.start_subjob([2, 8])
|
||||
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
|
||||
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
|
||||
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
|
||||
if self.size_threshold:
|
||||
files = [f for f in files if f.size >= self.size_threshold]
|
||||
if self.scan_type in {ScanType.Contents, ScanType.Folders}:
|
||||
@ -83,12 +91,12 @@ class Scanner:
|
||||
else:
|
||||
j = j.start_subjob([2, 8])
|
||||
kw = {}
|
||||
kw['match_similar_words'] = self.match_similar_words
|
||||
kw['weight_words'] = self.word_weighting
|
||||
kw['min_match_percentage'] = self.min_match_percentage
|
||||
kw["match_similar_words"] = self.match_similar_words
|
||||
kw["weight_words"] = self.word_weighting
|
||||
kw["min_match_percentage"] = self.min_match_percentage
|
||||
if self.scan_type == ScanType.FieldsNoOrder:
|
||||
self.scan_type = ScanType.Fields
|
||||
kw['no_field_order'] = True
|
||||
kw["no_field_order"] = True
|
||||
func = {
|
||||
ScanType.Filename: lambda f: engine.getwords(rem_file_ext(f.name)),
|
||||
ScanType.Fields: lambda f: engine.getfields(rem_file_ext(f.name)),
|
||||
@ -111,9 +119,9 @@ class Scanner:
|
||||
def _tie_breaker(ref, dupe):
|
||||
refname = rem_file_ext(ref.name).lower()
|
||||
dupename = rem_file_ext(dupe.name).lower()
|
||||
if 'copy' in dupename:
|
||||
if "copy" in dupename:
|
||||
return False
|
||||
if 'copy' in refname:
|
||||
if "copy" in refname:
|
||||
return True
|
||||
if is_same_with_digit(dupename, refname):
|
||||
return False
|
||||
@ -130,12 +138,12 @@ class Scanner:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_dupe_groups(self, files, ignore_list=None, j=job.nulljob):
|
||||
for f in (f for f in files if not hasattr(f, 'is_ref')):
|
||||
for f in (f for f in files if not hasattr(f, "is_ref")):
|
||||
f.is_ref = False
|
||||
files = remove_dupe_paths(files)
|
||||
logging.info("Getting matches. Scan type: %d", self.scan_type)
|
||||
matches = self._getmatches(files, j)
|
||||
logging.info('Found %d matches' % len(matches))
|
||||
logging.info("Found %d matches" % len(matches))
|
||||
j.set_progress(100, tr("Almost done! Fiddling with results..."))
|
||||
# In removing what we call here "false matches", we first want to remove, if we scan by
|
||||
# folders, we want to remove folder matches for which the parent is also in a match (they're
|
||||
@ -153,20 +161,38 @@ class Scanner:
|
||||
toremove.add(p)
|
||||
else:
|
||||
last_parent_path = p
|
||||
matches = [m for m in matches if m.first.path not in toremove or m.second.path not in toremove]
|
||||
matches = [
|
||||
m
|
||||
for m in matches
|
||||
if m.first.path not in toremove or m.second.path not in toremove
|
||||
]
|
||||
if not self.mix_file_kind:
|
||||
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)]
|
||||
matches = [m for m in matches if m.first.path.exists() and m.second.path.exists()]
|
||||
matches = [
|
||||
m
|
||||
for m in matches
|
||||
if get_file_ext(m.first.name) == get_file_ext(m.second.name)
|
||||
]
|
||||
matches = [
|
||||
m for m in matches if m.first.path.exists() and m.second.path.exists()
|
||||
]
|
||||
matches = [m for m in matches if not (m.first.is_ref and m.second.is_ref)]
|
||||
if ignore_list:
|
||||
matches = [
|
||||
m for m in matches
|
||||
m
|
||||
for m in matches
|
||||
if not ignore_list.AreIgnored(str(m.first.path), str(m.second.path))
|
||||
]
|
||||
logging.info('Grouping matches')
|
||||
logging.info("Grouping matches")
|
||||
groups = engine.get_groups(matches)
|
||||
if self.scan_type in {ScanType.Filename, ScanType.Fields, ScanType.FieldsNoOrder, ScanType.Tag}:
|
||||
matched_files = dedupe([m.first for m in matches] + [m.second for m in matches])
|
||||
if self.scan_type in {
|
||||
ScanType.Filename,
|
||||
ScanType.Fields,
|
||||
ScanType.FieldsNoOrder,
|
||||
ScanType.Tag,
|
||||
}:
|
||||
matched_files = dedupe(
|
||||
[m.first for m in matches] + [m.second for m in matches]
|
||||
)
|
||||
self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups)
|
||||
else:
|
||||
# Ticket #195
|
||||
@ -181,7 +207,7 @@ class Scanner:
|
||||
# reporting discarded matches.
|
||||
self.discarded_file_count = 0
|
||||
groups = [g for g in groups if any(not f.is_ref for f in g)]
|
||||
logging.info('Created %d groups' % len(groups))
|
||||
logging.info("Created %d groups" % len(groups))
|
||||
for g in groups:
|
||||
g.prioritize(self._key_func, self._tie_breaker)
|
||||
return groups
|
||||
@ -190,7 +216,6 @@ class Scanner:
|
||||
min_match_percentage = 80
|
||||
mix_file_kind = True
|
||||
scan_type = ScanType.Filename
|
||||
scanned_tags = {'artist', 'title'}
|
||||
scanned_tags = {"artist", "title"}
|
||||
size_threshold = 0
|
||||
word_weighting = False
|
||||
|
||||
|
@ -1 +1 @@
|
||||
from . import fs, result_table, scanner # noqa
|
||||
from . import fs, result_table, scanner # noqa
|
||||
|
@ -11,6 +11,7 @@ from hscommon.util import format_size
|
||||
from core import fs
|
||||
from core.util import format_timestamp, format_perc, format_words, format_dupe_count
|
||||
|
||||
|
||||
def get_display_info(dupe, group, delta):
|
||||
size = dupe.size
|
||||
mtime = dupe.mtime
|
||||
@ -26,16 +27,17 @@ def get_display_info(dupe, group, delta):
|
||||
percentage = group.percentage
|
||||
dupe_count = len(group.dupes)
|
||||
return {
|
||||
'name': dupe.name,
|
||||
'folder_path': str(dupe.folder_path),
|
||||
'size': format_size(size, 0, 1, False),
|
||||
'extension': dupe.extension,
|
||||
'mtime': format_timestamp(mtime, delta and m),
|
||||
'percentage': format_perc(percentage),
|
||||
'words': format_words(dupe.words) if hasattr(dupe, 'words') else '',
|
||||
'dupe_count': format_dupe_count(dupe_count),
|
||||
"name": dupe.name,
|
||||
"folder_path": str(dupe.folder_path),
|
||||
"size": format_size(size, 0, 1, False),
|
||||
"extension": dupe.extension,
|
||||
"mtime": format_timestamp(mtime, delta and m),
|
||||
"percentage": format_perc(percentage),
|
||||
"words": format_words(dupe.words) if hasattr(dupe, "words") else "",
|
||||
"dupe_count": format_dupe_count(dupe_count),
|
||||
}
|
||||
|
||||
|
||||
class File(fs.File):
|
||||
def get_display_info(self, group, delta):
|
||||
return get_display_info(self, group, delta)
|
||||
@ -44,4 +46,3 @@ class File(fs.File):
|
||||
class Folder(fs.Folder):
|
||||
def get_display_info(self, group, delta):
|
||||
return get_display_info(self, group, delta)
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Created On: 2011-11-27
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.column import Column
|
||||
@ -10,18 +10,19 @@ from hscommon.trans import trget
|
||||
|
||||
from core.gui.result_table import ResultTable as ResultTableBase
|
||||
|
||||
coltr = trget('columns')
|
||||
coltr = trget("columns")
|
||||
|
||||
|
||||
class ResultTable(ResultTableBase):
|
||||
COLUMNS = [
|
||||
Column('marked', ''),
|
||||
Column('name', coltr("Filename")),
|
||||
Column('folder_path', coltr("Folder"), optional=True),
|
||||
Column('size', coltr("Size (KB)"), optional=True),
|
||||
Column('extension', coltr("Kind"), visible=False, optional=True),
|
||||
Column('mtime', coltr("Modification"), visible=False, optional=True),
|
||||
Column('percentage', coltr("Match %"), optional=True),
|
||||
Column('words', coltr("Words Used"), visible=False, optional=True),
|
||||
Column('dupe_count', coltr("Dupe Count"), visible=False, optional=True),
|
||||
Column("marked", ""),
|
||||
Column("name", coltr("Filename")),
|
||||
Column("folder_path", coltr("Folder"), optional=True),
|
||||
Column("size", coltr("Size (KB)"), optional=True),
|
||||
Column("extension", coltr("Kind"), visible=False, optional=True),
|
||||
Column("mtime", coltr("Modification"), visible=False, optional=True),
|
||||
Column("percentage", coltr("Match %"), optional=True),
|
||||
Column("words", coltr("Words Used"), visible=False, optional=True),
|
||||
Column("dupe_count", coltr("Dupe Count"), visible=False, optional=True),
|
||||
]
|
||||
DELTA_COLUMNS = {'size', 'mtime'}
|
||||
DELTA_COLUMNS = {"size", "mtime"}
|
||||
|
@ -8,6 +8,7 @@ from hscommon.trans import tr
|
||||
|
||||
from core.scanner import Scanner as ScannerBase, ScanOption, ScanType
|
||||
|
||||
|
||||
class ScannerSE(ScannerBase):
|
||||
@staticmethod
|
||||
def get_scan_options():
|
||||
@ -16,4 +17,3 @@ class ScannerSE(ScannerBase):
|
||||
ScanOption(ScanType.Contents, tr("Contents")),
|
||||
ScanOption(ScanType.Folders, tr("Folders")),
|
||||
]
|
||||
|
||||
|
@ -20,93 +20,106 @@ from .results_test import GetTestGroups
|
||||
from .. import app, fs, engine
|
||||
from ..scanner import ScanType
|
||||
|
||||
|
||||
def add_fake_files_to_directories(directories, files):
|
||||
directories.get_files = lambda j=None: iter(files)
|
||||
directories._dirs.append('this is just so Scan() doesnt return 3')
|
||||
directories._dirs.append("this is just so Scan() doesnt return 3")
|
||||
|
||||
|
||||
class TestCaseDupeGuru:
|
||||
def test_apply_filter_calls_results_apply_filter(self, monkeypatch):
|
||||
dgapp = TestApp().app
|
||||
monkeypatch.setattr(dgapp.results, 'apply_filter', log_calls(dgapp.results.apply_filter))
|
||||
dgapp.apply_filter('foo')
|
||||
monkeypatch.setattr(
|
||||
dgapp.results, "apply_filter", log_calls(dgapp.results.apply_filter)
|
||||
)
|
||||
dgapp.apply_filter("foo")
|
||||
eq_(2, len(dgapp.results.apply_filter.calls))
|
||||
call = dgapp.results.apply_filter.calls[0]
|
||||
assert call['filter_str'] is None
|
||||
assert call["filter_str"] is None
|
||||
call = dgapp.results.apply_filter.calls[1]
|
||||
eq_('foo', call['filter_str'])
|
||||
eq_("foo", call["filter_str"])
|
||||
|
||||
def test_apply_filter_escapes_regexp(self, monkeypatch):
|
||||
dgapp = TestApp().app
|
||||
monkeypatch.setattr(dgapp.results, 'apply_filter', log_calls(dgapp.results.apply_filter))
|
||||
dgapp.apply_filter('()[]\\.|+?^abc')
|
||||
monkeypatch.setattr(
|
||||
dgapp.results, "apply_filter", log_calls(dgapp.results.apply_filter)
|
||||
)
|
||||
dgapp.apply_filter("()[]\\.|+?^abc")
|
||||
call = dgapp.results.apply_filter.calls[1]
|
||||
eq_('\\(\\)\\[\\]\\\\\\.\\|\\+\\?\\^abc', call['filter_str'])
|
||||
dgapp.apply_filter('(*)') # In "simple mode", we want the * to behave as a wilcard
|
||||
eq_("\\(\\)\\[\\]\\\\\\.\\|\\+\\?\\^abc", call["filter_str"])
|
||||
dgapp.apply_filter(
|
||||
"(*)"
|
||||
) # In "simple mode", we want the * to behave as a wilcard
|
||||
call = dgapp.results.apply_filter.calls[3]
|
||||
eq_(r'\(.*\)', call['filter_str'])
|
||||
dgapp.options['escape_filter_regexp'] = False
|
||||
dgapp.apply_filter('(abc)')
|
||||
eq_(r"\(.*\)", call["filter_str"])
|
||||
dgapp.options["escape_filter_regexp"] = False
|
||||
dgapp.apply_filter("(abc)")
|
||||
call = dgapp.results.apply_filter.calls[5]
|
||||
eq_('(abc)', call['filter_str'])
|
||||
eq_("(abc)", call["filter_str"])
|
||||
|
||||
def test_copy_or_move(self, tmpdir, monkeypatch):
|
||||
# The goal here is just to have a test for a previous blowup I had. I know my test coverage
|
||||
# for this unit is pathetic. What's done is done. My approach now is to add tests for
|
||||
# every change I want to make. The blowup was caused by a missing import.
|
||||
p = Path(str(tmpdir))
|
||||
p['foo'].open('w').close()
|
||||
monkeypatch.setattr(hscommon.conflict, 'smart_copy', log_calls(lambda source_path, dest_path: None))
|
||||
p["foo"].open("w").close()
|
||||
monkeypatch.setattr(
|
||||
hscommon.conflict,
|
||||
"smart_copy",
|
||||
log_calls(lambda source_path, dest_path: None),
|
||||
)
|
||||
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
|
||||
monkeypatch.setattr(app, 'smart_copy', hscommon.conflict.smart_copy)
|
||||
monkeypatch.setattr(os, 'makedirs', lambda path: None) # We don't want the test to create that fake directory
|
||||
monkeypatch.setattr(app, "smart_copy", hscommon.conflict.smart_copy)
|
||||
monkeypatch.setattr(
|
||||
os, "makedirs", lambda path: None
|
||||
) # We don't want the test to create that fake directory
|
||||
dgapp = TestApp().app
|
||||
dgapp.directories.add_path(p)
|
||||
[f] = dgapp.directories.get_files()
|
||||
dgapp.copy_or_move(f, True, 'some_destination', 0)
|
||||
dgapp.copy_or_move(f, True, "some_destination", 0)
|
||||
eq_(1, len(hscommon.conflict.smart_copy.calls))
|
||||
call = hscommon.conflict.smart_copy.calls[0]
|
||||
eq_(call['dest_path'], op.join('some_destination', 'foo'))
|
||||
eq_(call['source_path'], f.path)
|
||||
eq_(call["dest_path"], op.join("some_destination", "foo"))
|
||||
eq_(call["source_path"], f.path)
|
||||
|
||||
def test_copy_or_move_clean_empty_dirs(self, tmpdir, monkeypatch):
|
||||
tmppath = Path(str(tmpdir))
|
||||
sourcepath = tmppath['source']
|
||||
sourcepath = tmppath["source"]
|
||||
sourcepath.mkdir()
|
||||
sourcepath['myfile'].open('w')
|
||||
sourcepath["myfile"].open("w")
|
||||
app = TestApp().app
|
||||
app.directories.add_path(tmppath)
|
||||
[myfile] = app.directories.get_files()
|
||||
monkeypatch.setattr(app, 'clean_empty_dirs', log_calls(lambda path: None))
|
||||
app.copy_or_move(myfile, False, tmppath['dest'], 0)
|
||||
monkeypatch.setattr(app, "clean_empty_dirs", log_calls(lambda path: None))
|
||||
app.copy_or_move(myfile, False, tmppath["dest"], 0)
|
||||
calls = app.clean_empty_dirs.calls
|
||||
eq_(1, len(calls))
|
||||
eq_(sourcepath, calls[0]['path'])
|
||||
eq_(sourcepath, calls[0]["path"])
|
||||
|
||||
def test_Scan_with_objects_evaluating_to_false(self):
|
||||
class FakeFile(fs.File):
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
|
||||
# At some point, any() was used in a wrong way that made Scan() wrongly return 1
|
||||
app = TestApp().app
|
||||
f1, f2 = [FakeFile('foo') for i in range(2)]
|
||||
f1, f2 = [FakeFile("foo") for i in range(2)]
|
||||
f1.is_ref, f2.is_ref = (False, False)
|
||||
assert not (bool(f1) and bool(f2))
|
||||
add_fake_files_to_directories(app.directories, [f1, f2])
|
||||
app.start_scanning() # no exception
|
||||
app.start_scanning() # no exception
|
||||
|
||||
@mark.skipif("not hasattr(os, 'link')")
|
||||
def test_ignore_hardlink_matches(self, tmpdir):
|
||||
# If the ignore_hardlink_matches option is set, don't match files hardlinking to the same
|
||||
# inode.
|
||||
tmppath = Path(str(tmpdir))
|
||||
tmppath['myfile'].open('w').write('foo')
|
||||
os.link(str(tmppath['myfile']), str(tmppath['hardlink']))
|
||||
tmppath["myfile"].open("w").write("foo")
|
||||
os.link(str(tmppath["myfile"]), str(tmppath["hardlink"]))
|
||||
app = TestApp().app
|
||||
app.directories.add_path(tmppath)
|
||||
app.options['scan_type'] = ScanType.Contents
|
||||
app.options['ignore_hardlink_matches'] = True
|
||||
app.options["scan_type"] = ScanType.Contents
|
||||
app.options["ignore_hardlink_matches"] = True
|
||||
app.start_scanning()
|
||||
eq_(len(app.results.groups), 0)
|
||||
|
||||
@ -116,27 +129,32 @@ class TestCaseDupeGuru:
|
||||
# making the selected row None. Don't crash when it happens.
|
||||
dgapp = TestApp().app
|
||||
# selected_row is None because there's no result.
|
||||
assert not dgapp.result_table.rename_selected('foo') # no crash
|
||||
assert not dgapp.result_table.rename_selected("foo") # no crash
|
||||
|
||||
|
||||
class TestCaseDupeGuru_clean_empty_dirs:
|
||||
def pytest_funcarg__do_setup(self, request):
|
||||
monkeypatch = request.getfuncargvalue('monkeypatch')
|
||||
monkeypatch.setattr(hscommon.util, 'delete_if_empty', log_calls(lambda path, files_to_delete=[]: None))
|
||||
monkeypatch = request.getfuncargvalue("monkeypatch")
|
||||
monkeypatch.setattr(
|
||||
hscommon.util,
|
||||
"delete_if_empty",
|
||||
log_calls(lambda path, files_to_delete=[]: None),
|
||||
)
|
||||
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
|
||||
monkeypatch.setattr(app, 'delete_if_empty', hscommon.util.delete_if_empty)
|
||||
monkeypatch.setattr(app, "delete_if_empty", hscommon.util.delete_if_empty)
|
||||
self.app = TestApp().app
|
||||
|
||||
def test_option_off(self, do_setup):
|
||||
self.app.clean_empty_dirs(Path('/foo/bar'))
|
||||
self.app.clean_empty_dirs(Path("/foo/bar"))
|
||||
eq_(0, len(hscommon.util.delete_if_empty.calls))
|
||||
|
||||
def test_option_on(self, do_setup):
|
||||
self.app.options['clean_empty_dirs'] = True
|
||||
self.app.clean_empty_dirs(Path('/foo/bar'))
|
||||
self.app.options["clean_empty_dirs"] = True
|
||||
self.app.clean_empty_dirs(Path("/foo/bar"))
|
||||
calls = hscommon.util.delete_if_empty.calls
|
||||
eq_(1, len(calls))
|
||||
eq_(Path('/foo/bar'), calls[0]['path'])
|
||||
eq_(['.DS_Store'], calls[0]['files_to_delete'])
|
||||
eq_(Path("/foo/bar"), calls[0]["path"])
|
||||
eq_([".DS_Store"], calls[0]["files_to_delete"])
|
||||
|
||||
def test_recurse_up(self, do_setup, monkeypatch):
|
||||
# delete_if_empty must be recursively called up in the path until it returns False
|
||||
@ -144,16 +162,16 @@ class TestCaseDupeGuru_clean_empty_dirs:
|
||||
def mock_delete_if_empty(path, files_to_delete=[]):
|
||||
return len(path) > 1
|
||||
|
||||
monkeypatch.setattr(hscommon.util, 'delete_if_empty', mock_delete_if_empty)
|
||||
monkeypatch.setattr(hscommon.util, "delete_if_empty", mock_delete_if_empty)
|
||||
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
|
||||
monkeypatch.setattr(app, 'delete_if_empty', mock_delete_if_empty)
|
||||
self.app.options['clean_empty_dirs'] = True
|
||||
self.app.clean_empty_dirs(Path('not-empty/empty/empty'))
|
||||
monkeypatch.setattr(app, "delete_if_empty", mock_delete_if_empty)
|
||||
self.app.options["clean_empty_dirs"] = True
|
||||
self.app.clean_empty_dirs(Path("not-empty/empty/empty"))
|
||||
calls = hscommon.util.delete_if_empty.calls
|
||||
eq_(3, len(calls))
|
||||
eq_(Path('not-empty/empty/empty'), calls[0]['path'])
|
||||
eq_(Path('not-empty/empty'), calls[1]['path'])
|
||||
eq_(Path('not-empty'), calls[2]['path'])
|
||||
eq_(Path("not-empty/empty/empty"), calls[0]["path"])
|
||||
eq_(Path("not-empty/empty"), calls[1]["path"])
|
||||
eq_(Path("not-empty"), calls[2]["path"])
|
||||
|
||||
|
||||
class TestCaseDupeGuruWithResults:
|
||||
@ -166,10 +184,10 @@ class TestCaseDupeGuruWithResults:
|
||||
self.dtree = app.dtree
|
||||
self.rtable = app.rtable
|
||||
self.rtable.refresh()
|
||||
tmpdir = request.getfuncargvalue('tmpdir')
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
tmppath = Path(str(tmpdir))
|
||||
tmppath['foo'].mkdir()
|
||||
tmppath['bar'].mkdir()
|
||||
tmppath["foo"].mkdir()
|
||||
tmppath["bar"].mkdir()
|
||||
self.app.directories.add_path(tmppath)
|
||||
|
||||
def test_GetObjects(self, do_setup):
|
||||
@ -187,8 +205,8 @@ class TestCaseDupeGuruWithResults:
|
||||
|
||||
def test_GetObjects_after_sort(self, do_setup):
|
||||
objects = self.objects
|
||||
groups = self.groups[:] # we need an un-sorted reference
|
||||
self.rtable.sort('name', False)
|
||||
groups = self.groups[:] # we need an un-sorted reference
|
||||
self.rtable.sort("name", False)
|
||||
r = self.rtable[1]
|
||||
assert r._group is groups[1]
|
||||
assert r._dupe is objects[4]
|
||||
@ -198,7 +216,7 @@ class TestCaseDupeGuruWithResults:
|
||||
self.rtable.select([1, 2, 3])
|
||||
self.app.remove_selected()
|
||||
# The first 2 dupes have been removed. The 3rd one is a ref. it stays there, in first pos.
|
||||
eq_(self.rtable.selected_indexes, [1]) # no exception
|
||||
eq_(self.rtable.selected_indexes, [1]) # no exception
|
||||
|
||||
def test_selectResultNodePaths(self, do_setup):
|
||||
app = self.app
|
||||
@ -220,9 +238,9 @@ class TestCaseDupeGuruWithResults:
|
||||
def test_selectResultNodePaths_after_sort(self, do_setup):
|
||||
app = self.app
|
||||
objects = self.objects
|
||||
groups = self.groups[:] #To keep the old order in memory
|
||||
self.rtable.sort('name', False) #0
|
||||
#Now, the group order is supposed to be reversed
|
||||
groups = self.groups[:] # To keep the old order in memory
|
||||
self.rtable.sort("name", False) # 0
|
||||
# Now, the group order is supposed to be reversed
|
||||
self.rtable.select([1, 2, 3])
|
||||
eq_(len(app.selected_dupes), 3)
|
||||
assert app.selected_dupes[0] is objects[4]
|
||||
@ -242,13 +260,13 @@ class TestCaseDupeGuruWithResults:
|
||||
self.rtable.power_marker = True
|
||||
self.rtable.select([0, 1, 2])
|
||||
app.remove_selected()
|
||||
eq_(self.rtable.selected_indexes, []) # no exception
|
||||
eq_(self.rtable.selected_indexes, []) # no exception
|
||||
|
||||
def test_selectPowerMarkerRows_after_sort(self, do_setup):
|
||||
app = self.app
|
||||
objects = self.objects
|
||||
self.rtable.power_marker = True
|
||||
self.rtable.sort('name', False)
|
||||
self.rtable.sort("name", False)
|
||||
self.rtable.select([0, 1, 2])
|
||||
eq_(len(app.selected_dupes), 3)
|
||||
assert app.selected_dupes[0] is objects[4]
|
||||
@ -285,11 +303,11 @@ class TestCaseDupeGuruWithResults:
|
||||
|
||||
def test_refreshDetailsWithSelected(self, do_setup):
|
||||
self.rtable.select([1, 4])
|
||||
eq_(self.dpanel.row(0), ('Filename', 'bar bleh', 'foo bar'))
|
||||
self.dpanel.view.check_gui_calls(['refresh'])
|
||||
eq_(self.dpanel.row(0), ("Filename", "bar bleh", "foo bar"))
|
||||
self.dpanel.view.check_gui_calls(["refresh"])
|
||||
self.rtable.select([])
|
||||
eq_(self.dpanel.row(0), ('Filename', '---', '---'))
|
||||
self.dpanel.view.check_gui_calls(['refresh'])
|
||||
eq_(self.dpanel.row(0), ("Filename", "---", "---"))
|
||||
self.dpanel.view.check_gui_calls(["refresh"])
|
||||
|
||||
def test_makeSelectedReference(self, do_setup):
|
||||
app = self.app
|
||||
@ -300,12 +318,14 @@ class TestCaseDupeGuruWithResults:
|
||||
assert groups[0].ref is objects[1]
|
||||
assert groups[1].ref is objects[4]
|
||||
|
||||
def test_makeSelectedReference_by_selecting_two_dupes_in_the_same_group(self, do_setup):
|
||||
def test_makeSelectedReference_by_selecting_two_dupes_in_the_same_group(
|
||||
self, do_setup
|
||||
):
|
||||
app = self.app
|
||||
objects = self.objects
|
||||
groups = self.groups
|
||||
self.rtable.select([1, 2, 4])
|
||||
#Only [0, 0] and [1, 0] must go ref, not [0, 1] because it is a part of the same group
|
||||
# Only [0, 0] and [1, 0] must go ref, not [0, 1] because it is a part of the same group
|
||||
app.make_selected_reference()
|
||||
assert groups[0].ref is objects[1]
|
||||
assert groups[1].ref is objects[4]
|
||||
@ -314,7 +334,7 @@ class TestCaseDupeGuruWithResults:
|
||||
app = self.app
|
||||
self.rtable.select([1, 4])
|
||||
app.remove_selected()
|
||||
eq_(len(app.results.dupes), 1) # the first path is now selected
|
||||
eq_(len(app.results.dupes), 1) # the first path is now selected
|
||||
app.remove_selected()
|
||||
eq_(len(app.results.dupes), 0)
|
||||
|
||||
@ -336,27 +356,27 @@ class TestCaseDupeGuruWithResults:
|
||||
|
||||
def test_addDirectory_does_not_exist(self, do_setup):
|
||||
app = self.app
|
||||
app.add_directory('/does_not_exist')
|
||||
app.add_directory("/does_not_exist")
|
||||
eq_(len(app.view.messages), 1)
|
||||
assert "exist" in app.view.messages[0]
|
||||
|
||||
def test_ignore(self, do_setup):
|
||||
app = self.app
|
||||
self.rtable.select([4]) #The dupe of the second, 2 sized group
|
||||
self.rtable.select([4]) # The dupe of the second, 2 sized group
|
||||
app.add_selected_to_ignore_list()
|
||||
eq_(len(app.ignore_list), 1)
|
||||
self.rtable.select([1]) #first dupe of the 3 dupes group
|
||||
self.rtable.select([1]) # first dupe of the 3 dupes group
|
||||
app.add_selected_to_ignore_list()
|
||||
#BOTH the ref and the other dupe should have been added
|
||||
# BOTH the ref and the other dupe should have been added
|
||||
eq_(len(app.ignore_list), 3)
|
||||
|
||||
def test_purgeIgnoreList(self, do_setup, tmpdir):
|
||||
app = self.app
|
||||
p1 = str(tmpdir.join('file1'))
|
||||
p2 = str(tmpdir.join('file2'))
|
||||
open(p1, 'w').close()
|
||||
open(p2, 'w').close()
|
||||
dne = '/does_not_exist'
|
||||
p1 = str(tmpdir.join("file1"))
|
||||
p2 = str(tmpdir.join("file2"))
|
||||
open(p1, "w").close()
|
||||
open(p2, "w").close()
|
||||
dne = "/does_not_exist"
|
||||
app.ignore_list.Ignore(dne, p1)
|
||||
app.ignore_list.Ignore(p2, dne)
|
||||
app.ignore_list.Ignore(p1, p2)
|
||||
@ -381,9 +401,11 @@ class TestCaseDupeGuruWithResults:
|
||||
# When doing a scan with results being present prior to the scan, correctly invalidate the
|
||||
# results table.
|
||||
app = self.app
|
||||
app.JOB = Job(1, lambda *args, **kw: False) # Cancels the task
|
||||
add_fake_files_to_directories(app.directories, self.objects) # We want the scan to at least start
|
||||
app.start_scanning() # will be cancelled immediately
|
||||
app.JOB = Job(1, lambda *args, **kw: False) # Cancels the task
|
||||
add_fake_files_to_directories(
|
||||
app.directories, self.objects
|
||||
) # We want the scan to at least start
|
||||
app.start_scanning() # will be cancelled immediately
|
||||
eq_(len(app.result_table), 0)
|
||||
|
||||
def test_selected_dupes_after_removal(self, do_setup):
|
||||
@ -401,21 +423,21 @@ class TestCaseDupeGuruWithResults:
|
||||
# Ref #238
|
||||
self.rtable.delta_values = True
|
||||
self.rtable.power_marker = True
|
||||
self.rtable.sort('dupe_count', False)
|
||||
self.rtable.sort("dupe_count", False)
|
||||
# don't crash
|
||||
self.rtable.sort('percentage', False)
|
||||
self.rtable.sort("percentage", False)
|
||||
# don't crash
|
||||
|
||||
|
||||
class TestCaseDupeGuru_renameSelected:
|
||||
def pytest_funcarg__do_setup(self, request):
|
||||
tmpdir = request.getfuncargvalue('tmpdir')
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
p = Path(str(tmpdir))
|
||||
fp = open(str(p['foo bar 1']), mode='w')
|
||||
fp = open(str(p["foo bar 1"]), mode="w")
|
||||
fp.close()
|
||||
fp = open(str(p['foo bar 2']), mode='w')
|
||||
fp = open(str(p["foo bar 2"]), mode="w")
|
||||
fp.close()
|
||||
fp = open(str(p['foo bar 3']), mode='w')
|
||||
fp = open(str(p["foo bar 3"]), mode="w")
|
||||
fp.close()
|
||||
files = fs.get_files(p)
|
||||
for f in files:
|
||||
@ -437,46 +459,46 @@ class TestCaseDupeGuru_renameSelected:
|
||||
app = self.app
|
||||
g = self.groups[0]
|
||||
self.rtable.select([1])
|
||||
assert app.rename_selected('renamed')
|
||||
assert app.rename_selected("renamed")
|
||||
names = [p.name for p in self.p.listdir()]
|
||||
assert 'renamed' in names
|
||||
assert 'foo bar 2' not in names
|
||||
eq_(g.dupes[0].name, 'renamed')
|
||||
assert "renamed" in names
|
||||
assert "foo bar 2" not in names
|
||||
eq_(g.dupes[0].name, "renamed")
|
||||
|
||||
def test_none_selected(self, do_setup, monkeypatch):
|
||||
app = self.app
|
||||
g = self.groups[0]
|
||||
self.rtable.select([])
|
||||
monkeypatch.setattr(logging, 'warning', log_calls(lambda msg: None))
|
||||
assert not app.rename_selected('renamed')
|
||||
msg = logging.warning.calls[0]['msg']
|
||||
eq_('dupeGuru Warning: list index out of range', msg)
|
||||
monkeypatch.setattr(logging, "warning", log_calls(lambda msg: None))
|
||||
assert not app.rename_selected("renamed")
|
||||
msg = logging.warning.calls[0]["msg"]
|
||||
eq_("dupeGuru Warning: list index out of range", msg)
|
||||
names = [p.name for p in self.p.listdir()]
|
||||
assert 'renamed' not in names
|
||||
assert 'foo bar 2' in names
|
||||
eq_(g.dupes[0].name, 'foo bar 2')
|
||||
assert "renamed" not in names
|
||||
assert "foo bar 2" in names
|
||||
eq_(g.dupes[0].name, "foo bar 2")
|
||||
|
||||
def test_name_already_exists(self, do_setup, monkeypatch):
|
||||
app = self.app
|
||||
g = self.groups[0]
|
||||
self.rtable.select([1])
|
||||
monkeypatch.setattr(logging, 'warning', log_calls(lambda msg: None))
|
||||
assert not app.rename_selected('foo bar 1')
|
||||
msg = logging.warning.calls[0]['msg']
|
||||
assert msg.startswith('dupeGuru Warning: \'foo bar 1\' already exists in')
|
||||
monkeypatch.setattr(logging, "warning", log_calls(lambda msg: None))
|
||||
assert not app.rename_selected("foo bar 1")
|
||||
msg = logging.warning.calls[0]["msg"]
|
||||
assert msg.startswith("dupeGuru Warning: 'foo bar 1' already exists in")
|
||||
names = [p.name for p in self.p.listdir()]
|
||||
assert 'foo bar 1' in names
|
||||
assert 'foo bar 2' in names
|
||||
eq_(g.dupes[0].name, 'foo bar 2')
|
||||
assert "foo bar 1" in names
|
||||
assert "foo bar 2" in names
|
||||
eq_(g.dupes[0].name, "foo bar 2")
|
||||
|
||||
|
||||
class TestAppWithDirectoriesInTree:
|
||||
def pytest_funcarg__do_setup(self, request):
|
||||
tmpdir = request.getfuncargvalue('tmpdir')
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
p = Path(str(tmpdir))
|
||||
p['sub1'].mkdir()
|
||||
p['sub2'].mkdir()
|
||||
p['sub3'].mkdir()
|
||||
p["sub1"].mkdir()
|
||||
p["sub2"].mkdir()
|
||||
p["sub3"].mkdir()
|
||||
app = TestApp()
|
||||
self.app = app.app
|
||||
self.dtree = app.dtree
|
||||
@ -487,12 +509,11 @@ class TestAppWithDirectoriesInTree:
|
||||
# Setting a node state to something also affect subnodes. These subnodes must be correctly
|
||||
# refreshed.
|
||||
node = self.dtree[0]
|
||||
eq_(len(node), 3) # a len() call is required for subnodes to be loaded
|
||||
eq_(len(node), 3) # a len() call is required for subnodes to be loaded
|
||||
subnode = node[0]
|
||||
node.state = 1 # the state property is a state index
|
||||
node.state = 1 # the state property is a state index
|
||||
node = self.dtree[0]
|
||||
eq_(len(node), 3)
|
||||
subnode = node[0]
|
||||
eq_(subnode.state, 1)
|
||||
self.dtree.view.check_gui_calls(['refresh_states'])
|
||||
|
||||
self.dtree.view.check_gui_calls(["refresh_states"])
|
||||
|
@ -4,7 +4,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.testutil import TestApp as TestAppBase, CallLogger, eq_, with_app # noqa
|
||||
from hscommon.testutil import TestApp as TestAppBase, CallLogger, eq_, with_app # noqa
|
||||
from hscommon.path import Path
|
||||
from hscommon.util import get_file_ext, format_size
|
||||
from hscommon.gui.column import Column
|
||||
@ -17,6 +17,7 @@ from ..app import DupeGuru as DupeGuruBase
|
||||
from ..gui.result_table import ResultTable as ResultTableBase
|
||||
from ..gui.prioritize_dialog import PrioritizeDialog
|
||||
|
||||
|
||||
class DupeGuruView:
|
||||
JOB = nulljob
|
||||
|
||||
@ -39,28 +40,32 @@ class DupeGuruView:
|
||||
self.messages.append(msg)
|
||||
|
||||
def ask_yes_no(self, prompt):
|
||||
return True # always answer yes
|
||||
return True # always answer yes
|
||||
|
||||
def create_results_window(self):
|
||||
pass
|
||||
|
||||
|
||||
class ResultTable(ResultTableBase):
|
||||
COLUMNS = [
|
||||
Column('marked', ''),
|
||||
Column('name', 'Filename'),
|
||||
Column('folder_path', 'Directory'),
|
||||
Column('size', 'Size (KB)'),
|
||||
Column('extension', 'Kind'),
|
||||
Column("marked", ""),
|
||||
Column("name", "Filename"),
|
||||
Column("folder_path", "Directory"),
|
||||
Column("size", "Size (KB)"),
|
||||
Column("extension", "Kind"),
|
||||
]
|
||||
DELTA_COLUMNS = {'size', }
|
||||
DELTA_COLUMNS = {
|
||||
"size",
|
||||
}
|
||||
|
||||
|
||||
class DupeGuru(DupeGuruBase):
|
||||
NAME = 'dupeGuru'
|
||||
METADATA_TO_READ = ['size']
|
||||
NAME = "dupeGuru"
|
||||
METADATA_TO_READ = ["size"]
|
||||
|
||||
def __init__(self):
|
||||
DupeGuruBase.__init__(self, DupeGuruView())
|
||||
self.appdata = '/tmp'
|
||||
self.appdata = "/tmp"
|
||||
self._recreate_result_table()
|
||||
|
||||
def _prioritization_categories(self):
|
||||
@ -78,7 +83,7 @@ class NamedObject:
|
||||
def __init__(self, name="foobar", with_words=False, size=1, folder=None):
|
||||
self.name = name
|
||||
if folder is None:
|
||||
folder = 'basepath'
|
||||
folder = "basepath"
|
||||
self._folder = Path(folder)
|
||||
self.size = size
|
||||
self.md5partial = name
|
||||
@ -88,7 +93,7 @@ class NamedObject:
|
||||
self.is_ref = False
|
||||
|
||||
def __bool__(self):
|
||||
return False #Make sure that operations are made correctly when the bool value of files is false.
|
||||
return False # Make sure that operations are made correctly when the bool value of files is false.
|
||||
|
||||
def get_display_info(self, group, delta):
|
||||
size = self.size
|
||||
@ -97,10 +102,10 @@ class NamedObject:
|
||||
r = group.ref
|
||||
size -= r.size
|
||||
return {
|
||||
'name': self.name,
|
||||
'folder_path': str(self.folder_path),
|
||||
'size': format_size(size, 0, 1, False),
|
||||
'extension': self.extension if hasattr(self, 'extension') else '---',
|
||||
"name": self.name,
|
||||
"folder_path": str(self.folder_path),
|
||||
"size": format_size(size, 0, 1, False),
|
||||
"extension": self.extension if hasattr(self, "extension") else "---",
|
||||
}
|
||||
|
||||
@property
|
||||
@ -115,6 +120,7 @@ class NamedObject:
|
||||
def extension(self):
|
||||
return get_file_ext(self.name)
|
||||
|
||||
|
||||
# Returns a group set that looks like that:
|
||||
# "foo bar" (1)
|
||||
# "bar bleh" (1024)
|
||||
@ -127,21 +133,24 @@ def GetTestGroups():
|
||||
NamedObject("bar bleh"),
|
||||
NamedObject("foo bleh"),
|
||||
NamedObject("ibabtu"),
|
||||
NamedObject("ibabtu")
|
||||
NamedObject("ibabtu"),
|
||||
]
|
||||
objects[1].size = 1024
|
||||
matches = engine.getmatches(objects) #we should have 5 matches
|
||||
groups = engine.get_groups(matches) #We should have 2 groups
|
||||
matches = engine.getmatches(objects) # we should have 5 matches
|
||||
groups = engine.get_groups(matches) # We should have 2 groups
|
||||
for g in groups:
|
||||
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
|
||||
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first.
|
||||
g.prioritize(
|
||||
lambda x: objects.index(x)
|
||||
) # We want the dupes to be in the same order as the list is
|
||||
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first.
|
||||
return (objects, matches, groups)
|
||||
|
||||
|
||||
class TestApp(TestAppBase):
|
||||
def __init__(self):
|
||||
def link_gui(gui):
|
||||
gui.view = self.make_logger()
|
||||
if hasattr(gui, 'columns'): # tables
|
||||
if hasattr(gui, "columns"): # tables
|
||||
gui.columns.view = self.make_logger()
|
||||
return gui
|
||||
|
||||
@ -166,7 +175,7 @@ class TestApp(TestAppBase):
|
||||
# rtable is a property because its instance can be replaced during execution
|
||||
return self.app.result_table
|
||||
|
||||
#--- Helpers
|
||||
# --- Helpers
|
||||
def select_pri_criterion(self, name):
|
||||
# Select a main prioritize criterion by name instead of by index. Makes tests more
|
||||
# maintainable.
|
||||
|
@ -13,13 +13,18 @@ try:
|
||||
except ImportError:
|
||||
skip("Can't import the block module, probably hasn't been compiled.")
|
||||
|
||||
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
|
||||
|
||||
def my_avgdiff(
|
||||
first, second, limit=768, min_iter=3
|
||||
): # this is so I don't have to re-write every call
|
||||
return avgdiff(first, second, limit, min_iter)
|
||||
|
||||
|
||||
BLACK = (0, 0, 0)
|
||||
RED = (0xff, 0, 0)
|
||||
GREEN = (0, 0xff, 0)
|
||||
BLUE = (0, 0, 0xff)
|
||||
RED = (0xFF, 0, 0)
|
||||
GREEN = (0, 0xFF, 0)
|
||||
BLUE = (0, 0, 0xFF)
|
||||
|
||||
|
||||
class FakeImage:
|
||||
def __init__(self, size, data):
|
||||
@ -37,16 +42,20 @@ class FakeImage:
|
||||
pixels.append(pixel)
|
||||
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
|
||||
|
||||
|
||||
def empty():
|
||||
return FakeImage((0, 0), [])
|
||||
|
||||
def single_pixel(): #one red pixel
|
||||
return FakeImage((1, 1), [(0xff, 0, 0)])
|
||||
|
||||
def single_pixel(): # one red pixel
|
||||
return FakeImage((1, 1), [(0xFF, 0, 0)])
|
||||
|
||||
|
||||
def four_pixels():
|
||||
pixels = [RED, (0, 0x80, 0xff), (0x80, 0, 0), (0, 0x40, 0x80)]
|
||||
pixels = [RED, (0, 0x80, 0xFF), (0x80, 0, 0), (0, 0x40, 0x80)]
|
||||
return FakeImage((2, 2), pixels)
|
||||
|
||||
|
||||
class TestCasegetblock:
|
||||
def test_single_pixel(self):
|
||||
im = single_pixel()
|
||||
@ -60,9 +69,9 @@ class TestCasegetblock:
|
||||
def test_four_pixels(self):
|
||||
im = four_pixels()
|
||||
[b] = getblocks2(im, 1)
|
||||
meanred = (0xff + 0x80) // 4
|
||||
meanred = (0xFF + 0x80) // 4
|
||||
meangreen = (0x80 + 0x40) // 4
|
||||
meanblue = (0xff + 0x80) // 4
|
||||
meanblue = (0xFF + 0x80) // 4
|
||||
eq_((meanred, meangreen, meanblue), b)
|
||||
|
||||
|
||||
@ -158,6 +167,7 @@ class TestCasegetblock:
|
||||
# eq_(BLACK, blocks[3])
|
||||
#
|
||||
|
||||
|
||||
class TestCasegetblocks2:
|
||||
def test_empty_image(self):
|
||||
im = empty()
|
||||
@ -169,9 +179,9 @@ class TestCasegetblocks2:
|
||||
blocks = getblocks2(im, 1)
|
||||
eq_(1, len(blocks))
|
||||
block = blocks[0]
|
||||
meanred = (0xff + 0x80) // 4
|
||||
meanred = (0xFF + 0x80) // 4
|
||||
meangreen = (0x80 + 0x40) // 4
|
||||
meanblue = (0xff + 0x80) // 4
|
||||
meanblue = (0xFF + 0x80) // 4
|
||||
eq_((meanred, meangreen, meanblue), block)
|
||||
|
||||
def test_four_blocks_all_black(self):
|
||||
@ -225,25 +235,25 @@ class TestCaseavgdiff:
|
||||
my_avgdiff([b, b], [b])
|
||||
|
||||
def test_first_arg_is_empty_but_not_second(self):
|
||||
#Don't return 0 (as when the 2 lists are empty), raise!
|
||||
# Don't return 0 (as when the 2 lists are empty), raise!
|
||||
b = (0, 0, 0)
|
||||
with raises(DifferentBlockCountError):
|
||||
my_avgdiff([], [b])
|
||||
|
||||
def test_limit(self):
|
||||
ref = (0, 0, 0)
|
||||
b1 = (10, 10, 10) #avg 30
|
||||
b2 = (20, 20, 20) #avg 45
|
||||
b3 = (30, 30, 30) #avg 60
|
||||
b1 = (10, 10, 10) # avg 30
|
||||
b2 = (20, 20, 20) # avg 45
|
||||
b3 = (30, 30, 30) # avg 60
|
||||
blocks1 = [ref, ref, ref]
|
||||
blocks2 = [b1, b2, b3]
|
||||
eq_(45, my_avgdiff(blocks1, blocks2, 44))
|
||||
|
||||
def test_min_iterations(self):
|
||||
ref = (0, 0, 0)
|
||||
b1 = (10, 10, 10) #avg 30
|
||||
b2 = (20, 20, 20) #avg 45
|
||||
b3 = (10, 10, 10) #avg 40
|
||||
b1 = (10, 10, 10) # avg 30
|
||||
b2 = (20, 20, 20) # avg 45
|
||||
b3 = (10, 10, 10) # avg 40
|
||||
blocks1 = [ref, ref, ref]
|
||||
blocks2 = [b1, b2, b3]
|
||||
eq_(40, my_avgdiff(blocks1, blocks2, 45 - 1, 3))
|
||||
|
@ -16,34 +16,35 @@ try:
|
||||
except ImportError:
|
||||
skip("Can't import the cache module, probably hasn't been compiled.")
|
||||
|
||||
|
||||
class TestCasecolors_to_string:
|
||||
def test_no_color(self):
|
||||
eq_('', colors_to_string([]))
|
||||
eq_("", colors_to_string([]))
|
||||
|
||||
def test_single_color(self):
|
||||
eq_('000000', colors_to_string([(0, 0, 0)]))
|
||||
eq_('010101', colors_to_string([(1, 1, 1)]))
|
||||
eq_('0a141e', colors_to_string([(10, 20, 30)]))
|
||||
eq_("000000", colors_to_string([(0, 0, 0)]))
|
||||
eq_("010101", colors_to_string([(1, 1, 1)]))
|
||||
eq_("0a141e", colors_to_string([(10, 20, 30)]))
|
||||
|
||||
def test_two_colors(self):
|
||||
eq_('000102030405', colors_to_string([(0, 1, 2), (3, 4, 5)]))
|
||||
eq_("000102030405", colors_to_string([(0, 1, 2), (3, 4, 5)]))
|
||||
|
||||
|
||||
class TestCasestring_to_colors:
|
||||
def test_empty(self):
|
||||
eq_([], string_to_colors(''))
|
||||
eq_([], string_to_colors(""))
|
||||
|
||||
def test_single_color(self):
|
||||
eq_([(0, 0, 0)], string_to_colors('000000'))
|
||||
eq_([(2, 3, 4)], string_to_colors('020304'))
|
||||
eq_([(10, 20, 30)], string_to_colors('0a141e'))
|
||||
eq_([(0, 0, 0)], string_to_colors("000000"))
|
||||
eq_([(2, 3, 4)], string_to_colors("020304"))
|
||||
eq_([(10, 20, 30)], string_to_colors("0a141e"))
|
||||
|
||||
def test_two_colors(self):
|
||||
eq_([(10, 20, 30), (40, 50, 60)], string_to_colors('0a141e28323c'))
|
||||
eq_([(10, 20, 30), (40, 50, 60)], string_to_colors("0a141e28323c"))
|
||||
|
||||
def test_incomplete_color(self):
|
||||
# don't return anything if it's not a complete color
|
||||
eq_([], string_to_colors('102'))
|
||||
eq_([], string_to_colors("102"))
|
||||
|
||||
|
||||
class BaseTestCaseCache:
|
||||
@ -54,58 +55,58 @@ class BaseTestCaseCache:
|
||||
c = self.get_cache()
|
||||
eq_(0, len(c))
|
||||
with raises(KeyError):
|
||||
c['foo']
|
||||
c["foo"]
|
||||
|
||||
def test_set_then_retrieve_blocks(self):
|
||||
c = self.get_cache()
|
||||
b = [(0, 0, 0), (1, 2, 3)]
|
||||
c['foo'] = b
|
||||
eq_(b, c['foo'])
|
||||
c["foo"] = b
|
||||
eq_(b, c["foo"])
|
||||
|
||||
def test_delitem(self):
|
||||
c = self.get_cache()
|
||||
c['foo'] = ''
|
||||
del c['foo']
|
||||
assert 'foo' not in c
|
||||
c["foo"] = ""
|
||||
del c["foo"]
|
||||
assert "foo" not in c
|
||||
with raises(KeyError):
|
||||
del c['foo']
|
||||
del c["foo"]
|
||||
|
||||
def test_persistance(self, tmpdir):
|
||||
DBNAME = tmpdir.join('hstest.db')
|
||||
DBNAME = tmpdir.join("hstest.db")
|
||||
c = self.get_cache(str(DBNAME))
|
||||
c['foo'] = [(1, 2, 3)]
|
||||
c["foo"] = [(1, 2, 3)]
|
||||
del c
|
||||
c = self.get_cache(str(DBNAME))
|
||||
eq_([(1, 2, 3)], c['foo'])
|
||||
eq_([(1, 2, 3)], c["foo"])
|
||||
|
||||
def test_filter(self):
|
||||
c = self.get_cache()
|
||||
c['foo'] = ''
|
||||
c['bar'] = ''
|
||||
c['baz'] = ''
|
||||
c.filter(lambda p: p != 'bar') #only 'bar' is removed
|
||||
c["foo"] = ""
|
||||
c["bar"] = ""
|
||||
c["baz"] = ""
|
||||
c.filter(lambda p: p != "bar") # only 'bar' is removed
|
||||
eq_(2, len(c))
|
||||
assert 'foo' in c
|
||||
assert 'baz' in c
|
||||
assert 'bar' not in c
|
||||
assert "foo" in c
|
||||
assert "baz" in c
|
||||
assert "bar" not in c
|
||||
|
||||
def test_clear(self):
|
||||
c = self.get_cache()
|
||||
c['foo'] = ''
|
||||
c['bar'] = ''
|
||||
c['baz'] = ''
|
||||
c["foo"] = ""
|
||||
c["bar"] = ""
|
||||
c["baz"] = ""
|
||||
c.clear()
|
||||
eq_(0, len(c))
|
||||
assert 'foo' not in c
|
||||
assert 'baz' not in c
|
||||
assert 'bar' not in c
|
||||
assert "foo" not in c
|
||||
assert "baz" not in c
|
||||
assert "bar" not in c
|
||||
|
||||
def test_by_id(self):
|
||||
# it's possible to use the cache by referring to the files by their row_id
|
||||
c = self.get_cache()
|
||||
b = [(0, 0, 0), (1, 2, 3)]
|
||||
c['foo'] = b
|
||||
foo_id = c.get_id('foo')
|
||||
c["foo"] = b
|
||||
foo_id = c.get_id("foo")
|
||||
eq_(c[foo_id], b)
|
||||
|
||||
|
||||
@ -120,16 +121,16 @@ class TestCaseSqliteCache(BaseTestCaseCache):
|
||||
# If we don't do this monkeypatching, we get a weird exception about trying to flush a
|
||||
# closed file. I've tried setting logging level and stuff, but nothing worked. So, there we
|
||||
# go, a dirty monkeypatch.
|
||||
monkeypatch.setattr(logging, 'warning', lambda *args, **kw: None)
|
||||
dbname = str(tmpdir.join('foo.db'))
|
||||
fp = open(dbname, 'w')
|
||||
fp.write('invalid sqlite content')
|
||||
monkeypatch.setattr(logging, "warning", lambda *args, **kw: None)
|
||||
dbname = str(tmpdir.join("foo.db"))
|
||||
fp = open(dbname, "w")
|
||||
fp.write("invalid sqlite content")
|
||||
fp.close()
|
||||
c = self.get_cache(dbname) # should not raise a DatabaseError
|
||||
c['foo'] = [(1, 2, 3)]
|
||||
c = self.get_cache(dbname) # should not raise a DatabaseError
|
||||
c["foo"] = [(1, 2, 3)]
|
||||
del c
|
||||
c = self.get_cache(dbname)
|
||||
eq_(c['foo'], [(1, 2, 3)])
|
||||
eq_(c["foo"], [(1, 2, 3)])
|
||||
|
||||
|
||||
class TestCaseShelveCache(BaseTestCaseCache):
|
||||
@ -161,4 +162,3 @@ class TestCaseCacheSQLEscape:
|
||||
del c["foo'bar"]
|
||||
except KeyError:
|
||||
assert False
|
||||
|
||||
|
@ -1 +1 @@
|
||||
from hscommon.testutil import pytest_funcarg__app # noqa
|
||||
from hscommon.testutil import pytest_funcarg__app # noqa
|
||||
|
@ -14,91 +14,105 @@ from hscommon.path import Path
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
from ..fs import File
|
||||
from ..directories import Directories, DirectoryState, AlreadyThereError, InvalidPathError
|
||||
from ..directories import (
|
||||
Directories,
|
||||
DirectoryState,
|
||||
AlreadyThereError,
|
||||
InvalidPathError,
|
||||
)
|
||||
|
||||
|
||||
def create_fake_fs(rootpath):
|
||||
# We have it as a separate function because other units are using it.
|
||||
rootpath = rootpath['fs']
|
||||
rootpath = rootpath["fs"]
|
||||
rootpath.mkdir()
|
||||
rootpath['dir1'].mkdir()
|
||||
rootpath['dir2'].mkdir()
|
||||
rootpath['dir3'].mkdir()
|
||||
fp = rootpath['file1.test'].open('w')
|
||||
fp.write('1')
|
||||
rootpath["dir1"].mkdir()
|
||||
rootpath["dir2"].mkdir()
|
||||
rootpath["dir3"].mkdir()
|
||||
fp = rootpath["file1.test"].open("w")
|
||||
fp.write("1")
|
||||
fp.close()
|
||||
fp = rootpath['file2.test'].open('w')
|
||||
fp.write('12')
|
||||
fp = rootpath["file2.test"].open("w")
|
||||
fp.write("12")
|
||||
fp.close()
|
||||
fp = rootpath['file3.test'].open('w')
|
||||
fp.write('123')
|
||||
fp = rootpath["file3.test"].open("w")
|
||||
fp.write("123")
|
||||
fp.close()
|
||||
fp = rootpath['dir1']['file1.test'].open('w')
|
||||
fp.write('1')
|
||||
fp = rootpath["dir1"]["file1.test"].open("w")
|
||||
fp.write("1")
|
||||
fp.close()
|
||||
fp = rootpath['dir2']['file2.test'].open('w')
|
||||
fp.write('12')
|
||||
fp = rootpath["dir2"]["file2.test"].open("w")
|
||||
fp.write("12")
|
||||
fp.close()
|
||||
fp = rootpath['dir3']['file3.test'].open('w')
|
||||
fp.write('123')
|
||||
fp = rootpath["dir3"]["file3.test"].open("w")
|
||||
fp.write("123")
|
||||
fp.close()
|
||||
return rootpath
|
||||
|
||||
|
||||
testpath = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
# In this unit, we have tests depending on two directory structure. One with only one file in it
|
||||
# and another with a more complex structure.
|
||||
testpath = Path(tempfile.mkdtemp())
|
||||
module.testpath = testpath
|
||||
rootpath = testpath['onefile']
|
||||
rootpath = testpath["onefile"]
|
||||
rootpath.mkdir()
|
||||
fp = rootpath['test.txt'].open('w')
|
||||
fp.write('test_data')
|
||||
fp = rootpath["test.txt"].open("w")
|
||||
fp.write("test_data")
|
||||
fp.close()
|
||||
create_fake_fs(testpath)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
shutil.rmtree(str(module.testpath))
|
||||
|
||||
|
||||
def test_empty():
|
||||
d = Directories()
|
||||
eq_(len(d), 0)
|
||||
assert 'foobar' not in d
|
||||
assert "foobar" not in d
|
||||
|
||||
|
||||
def test_add_path():
|
||||
d = Directories()
|
||||
p = testpath['onefile']
|
||||
p = testpath["onefile"]
|
||||
d.add_path(p)
|
||||
eq_(1, len(d))
|
||||
assert p in d
|
||||
assert (p['foobar']) in d
|
||||
assert (p["foobar"]) in d
|
||||
assert p.parent() not in d
|
||||
p = testpath['fs']
|
||||
p = testpath["fs"]
|
||||
d.add_path(p)
|
||||
eq_(2, len(d))
|
||||
assert p in d
|
||||
|
||||
|
||||
def test_AddPath_when_path_is_already_there():
|
||||
d = Directories()
|
||||
p = testpath['onefile']
|
||||
p = testpath["onefile"]
|
||||
d.add_path(p)
|
||||
with raises(AlreadyThereError):
|
||||
d.add_path(p)
|
||||
with raises(AlreadyThereError):
|
||||
d.add_path(p['foobar'])
|
||||
d.add_path(p["foobar"])
|
||||
eq_(1, len(d))
|
||||
|
||||
|
||||
def test_add_path_containing_paths_already_there():
|
||||
d = Directories()
|
||||
d.add_path(testpath['onefile'])
|
||||
d.add_path(testpath["onefile"])
|
||||
eq_(1, len(d))
|
||||
d.add_path(testpath)
|
||||
eq_(len(d), 1)
|
||||
eq_(d[0], testpath)
|
||||
|
||||
|
||||
def test_AddPath_non_latin(tmpdir):
|
||||
p = Path(str(tmpdir))
|
||||
to_add = p['unicode\u201a']
|
||||
to_add = p["unicode\u201a"]
|
||||
os.mkdir(str(to_add))
|
||||
d = Directories()
|
||||
try:
|
||||
@ -106,63 +120,69 @@ def test_AddPath_non_latin(tmpdir):
|
||||
except UnicodeDecodeError:
|
||||
assert False
|
||||
|
||||
|
||||
def test_del():
|
||||
d = Directories()
|
||||
d.add_path(testpath['onefile'])
|
||||
d.add_path(testpath["onefile"])
|
||||
try:
|
||||
del d[1]
|
||||
assert False
|
||||
except IndexError:
|
||||
pass
|
||||
d.add_path(testpath['fs'])
|
||||
d.add_path(testpath["fs"])
|
||||
del d[1]
|
||||
eq_(1, len(d))
|
||||
|
||||
|
||||
def test_states():
|
||||
d = Directories()
|
||||
p = testpath['onefile']
|
||||
p = testpath["onefile"]
|
||||
d.add_path(p)
|
||||
eq_(DirectoryState.Normal, d.get_state(p))
|
||||
d.set_state(p, DirectoryState.Reference)
|
||||
eq_(DirectoryState.Reference, d.get_state(p))
|
||||
eq_(DirectoryState.Reference, d.get_state(p['dir1']))
|
||||
eq_(DirectoryState.Reference, d.get_state(p["dir1"]))
|
||||
eq_(1, len(d.states))
|
||||
eq_(p, list(d.states.keys())[0])
|
||||
eq_(DirectoryState.Reference, d.states[p])
|
||||
|
||||
|
||||
def test_get_state_with_path_not_there():
|
||||
# When the path's not there, just return DirectoryState.Normal
|
||||
d = Directories()
|
||||
d.add_path(testpath['onefile'])
|
||||
d.add_path(testpath["onefile"])
|
||||
eq_(d.get_state(testpath), DirectoryState.Normal)
|
||||
|
||||
|
||||
def test_states_overwritten_when_larger_directory_eat_smaller_ones():
|
||||
# ref #248
|
||||
# When setting the state of a folder, we overwrite previously set states for subfolders.
|
||||
d = Directories()
|
||||
p = testpath['onefile']
|
||||
p = testpath["onefile"]
|
||||
d.add_path(p)
|
||||
d.set_state(p, DirectoryState.Excluded)
|
||||
d.add_path(testpath)
|
||||
d.set_state(testpath, DirectoryState.Reference)
|
||||
eq_(d.get_state(p), DirectoryState.Reference)
|
||||
eq_(d.get_state(p['dir1']), DirectoryState.Reference)
|
||||
eq_(d.get_state(p["dir1"]), DirectoryState.Reference)
|
||||
eq_(d.get_state(testpath), DirectoryState.Reference)
|
||||
|
||||
|
||||
def test_get_files():
|
||||
d = Directories()
|
||||
p = testpath['fs']
|
||||
p = testpath["fs"]
|
||||
d.add_path(p)
|
||||
d.set_state(p['dir1'], DirectoryState.Reference)
|
||||
d.set_state(p['dir2'], DirectoryState.Excluded)
|
||||
d.set_state(p["dir1"], DirectoryState.Reference)
|
||||
d.set_state(p["dir2"], DirectoryState.Excluded)
|
||||
files = list(d.get_files())
|
||||
eq_(5, len(files))
|
||||
for f in files:
|
||||
if f.path.parent() == p['dir1']:
|
||||
if f.path.parent() == p["dir1"]:
|
||||
assert f.is_ref
|
||||
else:
|
||||
assert not f.is_ref
|
||||
|
||||
|
||||
def test_get_files_with_folders():
|
||||
# When fileclasses handle folders, return them and stop recursing!
|
||||
class FakeFile(File):
|
||||
@ -171,106 +191,115 @@ def test_get_files_with_folders():
|
||||
return True
|
||||
|
||||
d = Directories()
|
||||
p = testpath['fs']
|
||||
p = testpath["fs"]
|
||||
d.add_path(p)
|
||||
files = list(d.get_files(fileclasses=[FakeFile]))
|
||||
# We have the 3 root files and the 3 root dirs
|
||||
eq_(6, len(files))
|
||||
|
||||
|
||||
def test_get_folders():
|
||||
d = Directories()
|
||||
p = testpath['fs']
|
||||
p = testpath["fs"]
|
||||
d.add_path(p)
|
||||
d.set_state(p['dir1'], DirectoryState.Reference)
|
||||
d.set_state(p['dir2'], DirectoryState.Excluded)
|
||||
d.set_state(p["dir1"], DirectoryState.Reference)
|
||||
d.set_state(p["dir2"], DirectoryState.Excluded)
|
||||
folders = list(d.get_folders())
|
||||
eq_(len(folders), 3)
|
||||
ref = [f for f in folders if f.is_ref]
|
||||
not_ref = [f for f in folders if not f.is_ref]
|
||||
eq_(len(ref), 1)
|
||||
eq_(ref[0].path, p['dir1'])
|
||||
eq_(ref[0].path, p["dir1"])
|
||||
eq_(len(not_ref), 2)
|
||||
eq_(ref[0].size, 1)
|
||||
|
||||
|
||||
def test_get_files_with_inherited_exclusion():
|
||||
d = Directories()
|
||||
p = testpath['onefile']
|
||||
p = testpath["onefile"]
|
||||
d.add_path(p)
|
||||
d.set_state(p, DirectoryState.Excluded)
|
||||
eq_([], list(d.get_files()))
|
||||
|
||||
|
||||
def test_save_and_load(tmpdir):
|
||||
d1 = Directories()
|
||||
d2 = Directories()
|
||||
p1 = Path(str(tmpdir.join('p1')))
|
||||
p1 = Path(str(tmpdir.join("p1")))
|
||||
p1.mkdir()
|
||||
p2 = Path(str(tmpdir.join('p2')))
|
||||
p2 = Path(str(tmpdir.join("p2")))
|
||||
p2.mkdir()
|
||||
d1.add_path(p1)
|
||||
d1.add_path(p2)
|
||||
d1.set_state(p1, DirectoryState.Reference)
|
||||
d1.set_state(p1['dir1'], DirectoryState.Excluded)
|
||||
tmpxml = str(tmpdir.join('directories_testunit.xml'))
|
||||
d1.set_state(p1["dir1"], DirectoryState.Excluded)
|
||||
tmpxml = str(tmpdir.join("directories_testunit.xml"))
|
||||
d1.save_to_file(tmpxml)
|
||||
d2.load_from_file(tmpxml)
|
||||
eq_(2, len(d2))
|
||||
eq_(DirectoryState.Reference, d2.get_state(p1))
|
||||
eq_(DirectoryState.Excluded, d2.get_state(p1['dir1']))
|
||||
eq_(DirectoryState.Excluded, d2.get_state(p1["dir1"]))
|
||||
|
||||
|
||||
def test_invalid_path():
|
||||
d = Directories()
|
||||
p = Path('does_not_exist')
|
||||
p = Path("does_not_exist")
|
||||
with raises(InvalidPathError):
|
||||
d.add_path(p)
|
||||
eq_(0, len(d))
|
||||
|
||||
|
||||
def test_set_state_on_invalid_path():
|
||||
d = Directories()
|
||||
try:
|
||||
d.set_state(Path('foobar',), DirectoryState.Normal)
|
||||
d.set_state(Path("foobar",), DirectoryState.Normal)
|
||||
except LookupError:
|
||||
assert False
|
||||
|
||||
|
||||
def test_load_from_file_with_invalid_path(tmpdir):
|
||||
#This test simulates a load from file resulting in a
|
||||
#InvalidPath raise. Other directories must be loaded.
|
||||
# This test simulates a load from file resulting in a
|
||||
# InvalidPath raise. Other directories must be loaded.
|
||||
d1 = Directories()
|
||||
d1.add_path(testpath['onefile'])
|
||||
#Will raise InvalidPath upon loading
|
||||
p = Path(str(tmpdir.join('toremove')))
|
||||
d1.add_path(testpath["onefile"])
|
||||
# Will raise InvalidPath upon loading
|
||||
p = Path(str(tmpdir.join("toremove")))
|
||||
p.mkdir()
|
||||
d1.add_path(p)
|
||||
p.rmdir()
|
||||
tmpxml = str(tmpdir.join('directories_testunit.xml'))
|
||||
tmpxml = str(tmpdir.join("directories_testunit.xml"))
|
||||
d1.save_to_file(tmpxml)
|
||||
d2 = Directories()
|
||||
d2.load_from_file(tmpxml)
|
||||
eq_(1, len(d2))
|
||||
|
||||
|
||||
def test_unicode_save(tmpdir):
|
||||
d = Directories()
|
||||
p1 = Path(str(tmpdir))['hello\xe9']
|
||||
p1 = Path(str(tmpdir))["hello\xe9"]
|
||||
p1.mkdir()
|
||||
p1['foo\xe9'].mkdir()
|
||||
p1["foo\xe9"].mkdir()
|
||||
d.add_path(p1)
|
||||
d.set_state(p1['foo\xe9'], DirectoryState.Excluded)
|
||||
tmpxml = str(tmpdir.join('directories_testunit.xml'))
|
||||
d.set_state(p1["foo\xe9"], DirectoryState.Excluded)
|
||||
tmpxml = str(tmpdir.join("directories_testunit.xml"))
|
||||
try:
|
||||
d.save_to_file(tmpxml)
|
||||
except UnicodeDecodeError:
|
||||
assert False
|
||||
|
||||
|
||||
def test_get_files_refreshes_its_directories():
|
||||
d = Directories()
|
||||
p = testpath['fs']
|
||||
p = testpath["fs"]
|
||||
d.add_path(p)
|
||||
files = d.get_files()
|
||||
eq_(6, len(list(files)))
|
||||
time.sleep(1)
|
||||
os.remove(str(p['dir1']['file1.test']))
|
||||
os.remove(str(p["dir1"]["file1.test"]))
|
||||
files = d.get_files()
|
||||
eq_(5, len(list(files)))
|
||||
|
||||
|
||||
def test_get_files_does_not_choke_on_non_existing_directories(tmpdir):
|
||||
d = Directories()
|
||||
p = Path(str(tmpdir))
|
||||
@ -278,36 +307,37 @@ def test_get_files_does_not_choke_on_non_existing_directories(tmpdir):
|
||||
p.rmtree()
|
||||
eq_([], list(d.get_files()))
|
||||
|
||||
|
||||
def test_get_state_returns_excluded_by_default_for_hidden_directories(tmpdir):
|
||||
d = Directories()
|
||||
p = Path(str(tmpdir))
|
||||
hidden_dir_path = p['.foo']
|
||||
p['.foo'].mkdir()
|
||||
hidden_dir_path = p[".foo"]
|
||||
p[".foo"].mkdir()
|
||||
d.add_path(p)
|
||||
eq_(d.get_state(hidden_dir_path), DirectoryState.Excluded)
|
||||
# But it can be overriden
|
||||
d.set_state(hidden_dir_path, DirectoryState.Normal)
|
||||
eq_(d.get_state(hidden_dir_path), DirectoryState.Normal)
|
||||
|
||||
|
||||
def test_default_path_state_override(tmpdir):
|
||||
# It's possible for a subclass to override the default state of a path
|
||||
class MyDirectories(Directories):
|
||||
def _default_state_for_path(self, path):
|
||||
if 'foobar' in path:
|
||||
if "foobar" in path:
|
||||
return DirectoryState.Excluded
|
||||
|
||||
d = MyDirectories()
|
||||
p1 = Path(str(tmpdir))
|
||||
p1['foobar'].mkdir()
|
||||
p1['foobar/somefile'].open('w').close()
|
||||
p1['foobaz'].mkdir()
|
||||
p1['foobaz/somefile'].open('w').close()
|
||||
p1["foobar"].mkdir()
|
||||
p1["foobar/somefile"].open("w").close()
|
||||
p1["foobaz"].mkdir()
|
||||
p1["foobaz/somefile"].open("w").close()
|
||||
d.add_path(p1)
|
||||
eq_(d.get_state(p1['foobaz']), DirectoryState.Normal)
|
||||
eq_(d.get_state(p1['foobar']), DirectoryState.Excluded)
|
||||
eq_(len(list(d.get_files())), 1) # only the 'foobaz' file is there
|
||||
eq_(d.get_state(p1["foobaz"]), DirectoryState.Normal)
|
||||
eq_(d.get_state(p1["foobar"]), DirectoryState.Excluded)
|
||||
eq_(len(list(d.get_files())), 1) # only the 'foobaz' file is there
|
||||
# However, the default state can be changed
|
||||
d.set_state(p1['foobar'], DirectoryState.Normal)
|
||||
eq_(d.get_state(p1['foobar']), DirectoryState.Normal)
|
||||
d.set_state(p1["foobar"], DirectoryState.Normal)
|
||||
eq_(d.get_state(p1["foobar"]), DirectoryState.Normal)
|
||||
eq_(len(list(d.get_files())), 2)
|
||||
|
||||
|
@ -13,13 +13,28 @@ from hscommon.testutil import eq_, log_calls
|
||||
from .base import NamedObject
|
||||
from .. import engine
|
||||
from ..engine import (
|
||||
get_match, getwords, Group, getfields, unpack_fields, compare_fields, compare, WEIGHT_WORDS,
|
||||
MATCH_SIMILAR_WORDS, NO_FIELD_ORDER, build_word_dict, get_groups, getmatches, Match,
|
||||
getmatches_by_contents, merge_similar_words, reduce_common_words
|
||||
get_match,
|
||||
getwords,
|
||||
Group,
|
||||
getfields,
|
||||
unpack_fields,
|
||||
compare_fields,
|
||||
compare,
|
||||
WEIGHT_WORDS,
|
||||
MATCH_SIMILAR_WORDS,
|
||||
NO_FIELD_ORDER,
|
||||
build_word_dict,
|
||||
get_groups,
|
||||
getmatches,
|
||||
Match,
|
||||
getmatches_by_contents,
|
||||
merge_similar_words,
|
||||
reduce_common_words,
|
||||
)
|
||||
|
||||
no = NamedObject
|
||||
|
||||
|
||||
def get_match_triangle():
|
||||
o1 = NamedObject(with_words=True)
|
||||
o2 = NamedObject(with_words=True)
|
||||
@ -29,6 +44,7 @@ def get_match_triangle():
|
||||
m3 = get_match(o2, o3)
|
||||
return [m1, m2, m3]
|
||||
|
||||
|
||||
def get_test_group():
|
||||
m1, m2, m3 = get_match_triangle()
|
||||
result = Group()
|
||||
@ -37,6 +53,7 @@ def get_test_group():
|
||||
result.add_match(m3)
|
||||
return result
|
||||
|
||||
|
||||
def assert_match(m, name1, name2):
|
||||
# When testing matches, whether objects are in first or second position very often doesn't
|
||||
# matter. This function makes this test more convenient.
|
||||
@ -46,53 +63,54 @@ def assert_match(m, name1, name2):
|
||||
eq_(m.first.name, name2)
|
||||
eq_(m.second.name, name1)
|
||||
|
||||
|
||||
class TestCasegetwords:
|
||||
def test_spaces(self):
|
||||
eq_(['a', 'b', 'c', 'd'], getwords("a b c d"))
|
||||
eq_(['a', 'b', 'c', 'd'], getwords(" a b c d "))
|
||||
eq_(["a", "b", "c", "d"], getwords("a b c d"))
|
||||
eq_(["a", "b", "c", "d"], getwords(" a b c d "))
|
||||
|
||||
def test_splitter_chars(self):
|
||||
eq_(
|
||||
[chr(i) for i in range(ord('a'), ord('z')+1)],
|
||||
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z")
|
||||
[chr(i) for i in range(ord("a"), ord("z") + 1)],
|
||||
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z"),
|
||||
)
|
||||
|
||||
def test_joiner_chars(self):
|
||||
eq_(["aec"], getwords("a'e\u0301c"))
|
||||
|
||||
def test_empty(self):
|
||||
eq_([], getwords(''))
|
||||
eq_([], getwords(""))
|
||||
|
||||
def test_returns_lowercase(self):
|
||||
eq_(['foo', 'bar'], getwords('FOO BAR'))
|
||||
eq_(["foo", "bar"], getwords("FOO BAR"))
|
||||
|
||||
def test_decompose_unicode(self):
|
||||
eq_(getwords('foo\xe9bar'), ['fooebar'])
|
||||
eq_(getwords("foo\xe9bar"), ["fooebar"])
|
||||
|
||||
|
||||
class TestCasegetfields:
|
||||
def test_simple(self):
|
||||
eq_([['a', 'b'], ['c', 'd', 'e']], getfields('a b - c d e'))
|
||||
eq_([["a", "b"], ["c", "d", "e"]], getfields("a b - c d e"))
|
||||
|
||||
def test_empty(self):
|
||||
eq_([], getfields(''))
|
||||
eq_([], getfields(""))
|
||||
|
||||
def test_cleans_empty_fields(self):
|
||||
expected = [['a', 'bc', 'def']]
|
||||
actual = getfields(' - a bc def')
|
||||
expected = [["a", "bc", "def"]]
|
||||
actual = getfields(" - a bc def")
|
||||
eq_(expected, actual)
|
||||
expected = [['bc', 'def']]
|
||||
expected = [["bc", "def"]]
|
||||
|
||||
|
||||
class TestCaseunpack_fields:
|
||||
def test_with_fields(self):
|
||||
expected = ['a', 'b', 'c', 'd', 'e', 'f']
|
||||
actual = unpack_fields([['a'], ['b', 'c'], ['d', 'e', 'f']])
|
||||
expected = ["a", "b", "c", "d", "e", "f"]
|
||||
actual = unpack_fields([["a"], ["b", "c"], ["d", "e", "f"]])
|
||||
eq_(expected, actual)
|
||||
|
||||
def test_without_fields(self):
|
||||
expected = ['a', 'b', 'c', 'd', 'e', 'f']
|
||||
actual = unpack_fields(['a', 'b', 'c', 'd', 'e', 'f'])
|
||||
expected = ["a", "b", "c", "d", "e", "f"]
|
||||
actual = unpack_fields(["a", "b", "c", "d", "e", "f"])
|
||||
eq_(expected, actual)
|
||||
|
||||
def test_empty(self):
|
||||
@ -101,134 +119,151 @@ class TestCaseunpack_fields:
|
||||
|
||||
class TestCaseWordCompare:
|
||||
def test_list(self):
|
||||
eq_(100, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']))
|
||||
eq_(86, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c']))
|
||||
eq_(100, compare(["a", "b", "c", "d"], ["a", "b", "c", "d"]))
|
||||
eq_(86, compare(["a", "b", "c", "d"], ["a", "b", "c"]))
|
||||
|
||||
def test_unordered(self):
|
||||
#Sometimes, users don't want fuzzy matching too much When they set the slider
|
||||
#to 100, they don't expect a filename with the same words, but not the same order, to match.
|
||||
#Thus, we want to return 99 in that case.
|
||||
eq_(99, compare(['a', 'b', 'c', 'd'], ['d', 'b', 'c', 'a']))
|
||||
# Sometimes, users don't want fuzzy matching too much When they set the slider
|
||||
# to 100, they don't expect a filename with the same words, but not the same order, to match.
|
||||
# Thus, we want to return 99 in that case.
|
||||
eq_(99, compare(["a", "b", "c", "d"], ["d", "b", "c", "a"]))
|
||||
|
||||
def test_word_occurs_twice(self):
|
||||
#if a word occurs twice in first, but once in second, we want the word to be only counted once
|
||||
eq_(89, compare(['a', 'b', 'c', 'd', 'a'], ['d', 'b', 'c', 'a']))
|
||||
# if a word occurs twice in first, but once in second, we want the word to be only counted once
|
||||
eq_(89, compare(["a", "b", "c", "d", "a"], ["d", "b", "c", "a"]))
|
||||
|
||||
def test_uses_copy_of_lists(self):
|
||||
first = ['foo', 'bar']
|
||||
second = ['bar', 'bleh']
|
||||
first = ["foo", "bar"]
|
||||
second = ["bar", "bleh"]
|
||||
compare(first, second)
|
||||
eq_(['foo', 'bar'], first)
|
||||
eq_(['bar', 'bleh'], second)
|
||||
eq_(["foo", "bar"], first)
|
||||
eq_(["bar", "bleh"], second)
|
||||
|
||||
def test_word_weight(self):
|
||||
eq_(int((6.0 / 13.0) * 100), compare(['foo', 'bar'], ['bar', 'bleh'], (WEIGHT_WORDS, )))
|
||||
eq_(
|
||||
int((6.0 / 13.0) * 100),
|
||||
compare(["foo", "bar"], ["bar", "bleh"], (WEIGHT_WORDS,)),
|
||||
)
|
||||
|
||||
def test_similar_words(self):
|
||||
eq_(100, compare(['the', 'white', 'stripes'], ['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, )))
|
||||
eq_(
|
||||
100,
|
||||
compare(
|
||||
["the", "white", "stripes"],
|
||||
["the", "whites", "stripe"],
|
||||
(MATCH_SIMILAR_WORDS,),
|
||||
),
|
||||
)
|
||||
|
||||
def test_empty(self):
|
||||
eq_(0, compare([], []))
|
||||
|
||||
def test_with_fields(self):
|
||||
eq_(67, compare([['a', 'b'], ['c', 'd', 'e']], [['a', 'b'], ['c', 'd', 'f']]))
|
||||
eq_(67, compare([["a", "b"], ["c", "d", "e"]], [["a", "b"], ["c", "d", "f"]]))
|
||||
|
||||
def test_propagate_flags_with_fields(self, monkeypatch):
|
||||
def mock_compare(first, second, flags):
|
||||
eq_((0, 1, 2, 3, 5), flags)
|
||||
|
||||
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
|
||||
compare([['a']], [['a']], (0, 1, 2, 3, 5))
|
||||
monkeypatch.setattr(engine, "compare_fields", mock_compare)
|
||||
compare([["a"]], [["a"]], (0, 1, 2, 3, 5))
|
||||
|
||||
|
||||
class TestCaseWordCompareWithFields:
|
||||
def test_simple(self):
|
||||
eq_(67, compare_fields([['a', 'b'], ['c', 'd', 'e']], [['a', 'b'], ['c', 'd', 'f']]))
|
||||
eq_(
|
||||
67,
|
||||
compare_fields(
|
||||
[["a", "b"], ["c", "d", "e"]], [["a", "b"], ["c", "d", "f"]]
|
||||
),
|
||||
)
|
||||
|
||||
def test_empty(self):
|
||||
eq_(0, compare_fields([], []))
|
||||
|
||||
def test_different_length(self):
|
||||
eq_(0, compare_fields([['a'], ['b']], [['a'], ['b'], ['c']]))
|
||||
eq_(0, compare_fields([["a"], ["b"]], [["a"], ["b"], ["c"]]))
|
||||
|
||||
def test_propagates_flags(self, monkeypatch):
|
||||
def mock_compare(first, second, flags):
|
||||
eq_((0, 1, 2, 3, 5), flags)
|
||||
|
||||
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
|
||||
compare_fields([['a']], [['a']], (0, 1, 2, 3, 5))
|
||||
monkeypatch.setattr(engine, "compare_fields", mock_compare)
|
||||
compare_fields([["a"]], [["a"]], (0, 1, 2, 3, 5))
|
||||
|
||||
def test_order(self):
|
||||
first = [['a', 'b'], ['c', 'd', 'e']]
|
||||
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||
first = [["a", "b"], ["c", "d", "e"]]
|
||||
second = [["c", "d", "f"], ["a", "b"]]
|
||||
eq_(0, compare_fields(first, second))
|
||||
|
||||
def test_no_order(self):
|
||||
first = [['a', 'b'], ['c', 'd', 'e']]
|
||||
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||
first = [['a', 'b'], ['a', 'b']] #a field can only be matched once.
|
||||
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||
first = [['a', 'b'], ['a', 'b', 'c']]
|
||||
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||
first = [["a", "b"], ["c", "d", "e"]]
|
||||
second = [["c", "d", "f"], ["a", "b"]]
|
||||
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER,)))
|
||||
first = [["a", "b"], ["a", "b"]] # a field can only be matched once.
|
||||
second = [["c", "d", "f"], ["a", "b"]]
|
||||
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER,)))
|
||||
first = [["a", "b"], ["a", "b", "c"]]
|
||||
second = [["c", "d", "f"], ["a", "b"]]
|
||||
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER,)))
|
||||
|
||||
def test_compare_fields_without_order_doesnt_alter_fields(self):
|
||||
#The NO_ORDER comp type altered the fields!
|
||||
first = [['a', 'b'], ['c', 'd', 'e']]
|
||||
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||
eq_([['a', 'b'], ['c', 'd', 'e']], first)
|
||||
eq_([['c', 'd', 'f'], ['a', 'b']], second)
|
||||
# The NO_ORDER comp type altered the fields!
|
||||
first = [["a", "b"], ["c", "d", "e"]]
|
||||
second = [["c", "d", "f"], ["a", "b"]]
|
||||
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER,)))
|
||||
eq_([["a", "b"], ["c", "d", "e"]], first)
|
||||
eq_([["c", "d", "f"], ["a", "b"]], second)
|
||||
|
||||
|
||||
class TestCasebuild_word_dict:
|
||||
def test_with_standard_words(self):
|
||||
l = [NamedObject('foo bar', True)]
|
||||
l.append(NamedObject('bar baz', True))
|
||||
l.append(NamedObject('baz bleh foo', True))
|
||||
d = build_word_dict(l)
|
||||
itemList = [NamedObject("foo bar", True)]
|
||||
itemList.append(NamedObject("bar baz", True))
|
||||
itemList.append(NamedObject("baz bleh foo", True))
|
||||
d = build_word_dict(itemList)
|
||||
eq_(4, len(d))
|
||||
eq_(2, len(d['foo']))
|
||||
assert l[0] in d['foo']
|
||||
assert l[2] in d['foo']
|
||||
eq_(2, len(d['bar']))
|
||||
assert l[0] in d['bar']
|
||||
assert l[1] in d['bar']
|
||||
eq_(2, len(d['baz']))
|
||||
assert l[1] in d['baz']
|
||||
assert l[2] in d['baz']
|
||||
eq_(1, len(d['bleh']))
|
||||
assert l[2] in d['bleh']
|
||||
eq_(2, len(d["foo"]))
|
||||
assert itemList[0] in d["foo"]
|
||||
assert itemList[2] in d["foo"]
|
||||
eq_(2, len(d["bar"]))
|
||||
assert itemList[0] in d["bar"]
|
||||
assert itemList[1] in d["bar"]
|
||||
eq_(2, len(d["baz"]))
|
||||
assert itemList[1] in d["baz"]
|
||||
assert itemList[2] in d["baz"]
|
||||
eq_(1, len(d["bleh"]))
|
||||
assert itemList[2] in d["bleh"]
|
||||
|
||||
def test_unpack_fields(self):
|
||||
o = NamedObject('')
|
||||
o.words = [['foo', 'bar'], ['baz']]
|
||||
o = NamedObject("")
|
||||
o.words = [["foo", "bar"], ["baz"]]
|
||||
d = build_word_dict([o])
|
||||
eq_(3, len(d))
|
||||
eq_(1, len(d['foo']))
|
||||
eq_(1, len(d["foo"]))
|
||||
|
||||
def test_words_are_unaltered(self):
|
||||
o = NamedObject('')
|
||||
o.words = [['foo', 'bar'], ['baz']]
|
||||
o = NamedObject("")
|
||||
o.words = [["foo", "bar"], ["baz"]]
|
||||
build_word_dict([o])
|
||||
eq_([['foo', 'bar'], ['baz']], o.words)
|
||||
eq_([["foo", "bar"], ["baz"]], o.words)
|
||||
|
||||
def test_object_instances_can_only_be_once_in_words_object_list(self):
|
||||
o = NamedObject('foo foo', True)
|
||||
o = NamedObject("foo foo", True)
|
||||
d = build_word_dict([o])
|
||||
eq_(1, len(d['foo']))
|
||||
eq_(1, len(d["foo"]))
|
||||
|
||||
def test_job(self):
|
||||
def do_progress(p, d=''):
|
||||
def do_progress(p, d=""):
|
||||
self.log.append(p)
|
||||
return True
|
||||
|
||||
j = job.Job(1, do_progress)
|
||||
self.log = []
|
||||
s = "foo bar"
|
||||
build_word_dict([NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j)
|
||||
build_word_dict(
|
||||
[NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j
|
||||
)
|
||||
# We don't have intermediate log because iter_with_progress is called with every > 1
|
||||
eq_(0, self.log[0])
|
||||
eq_(100, self.log[1])
|
||||
@ -237,51 +272,56 @@ class TestCasebuild_word_dict:
|
||||
class TestCasemerge_similar_words:
|
||||
def test_some_similar_words(self):
|
||||
d = {
|
||||
'foobar': set([1]),
|
||||
'foobar1': set([2]),
|
||||
'foobar2': set([3]),
|
||||
"foobar": set([1]),
|
||||
"foobar1": set([2]),
|
||||
"foobar2": set([3]),
|
||||
}
|
||||
merge_similar_words(d)
|
||||
eq_(1, len(d))
|
||||
eq_(3, len(d['foobar']))
|
||||
|
||||
eq_(3, len(d["foobar"]))
|
||||
|
||||
|
||||
class TestCasereduce_common_words:
|
||||
def test_typical(self):
|
||||
d = {
|
||||
'foo': set([NamedObject('foo bar', True) for i in range(50)]),
|
||||
'bar': set([NamedObject('foo bar', True) for i in range(49)])
|
||||
"foo": set([NamedObject("foo bar", True) for i in range(50)]),
|
||||
"bar": set([NamedObject("foo bar", True) for i in range(49)]),
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
assert 'foo' not in d
|
||||
eq_(49, len(d['bar']))
|
||||
assert "foo" not in d
|
||||
eq_(49, len(d["bar"]))
|
||||
|
||||
def test_dont_remove_objects_with_only_common_words(self):
|
||||
d = {
|
||||
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
|
||||
'uncommon': set([NamedObject("common uncommon", True)])
|
||||
"common": set(
|
||||
[NamedObject("common uncommon", True) for i in range(50)]
|
||||
+ [NamedObject("common", True)]
|
||||
),
|
||||
"uncommon": set([NamedObject("common uncommon", True)]),
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
eq_(1, len(d['common']))
|
||||
eq_(1, len(d['uncommon']))
|
||||
eq_(1, len(d["common"]))
|
||||
eq_(1, len(d["uncommon"]))
|
||||
|
||||
def test_values_still_are_set_instances(self):
|
||||
d = {
|
||||
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
|
||||
'uncommon': set([NamedObject("common uncommon", True)])
|
||||
"common": set(
|
||||
[NamedObject("common uncommon", True) for i in range(50)]
|
||||
+ [NamedObject("common", True)]
|
||||
),
|
||||
"uncommon": set([NamedObject("common uncommon", True)]),
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
assert isinstance(d['common'], set)
|
||||
assert isinstance(d['uncommon'], set)
|
||||
assert isinstance(d["common"], set)
|
||||
assert isinstance(d["uncommon"], set)
|
||||
|
||||
def test_dont_raise_KeyError_when_a_word_has_been_removed(self):
|
||||
#If a word has been removed by the reduce, an object in a subsequent common word that
|
||||
#contains the word that has been removed would cause a KeyError.
|
||||
# If a word has been removed by the reduce, an object in a subsequent common word that
|
||||
# contains the word that has been removed would cause a KeyError.
|
||||
d = {
|
||||
'foo': set([NamedObject('foo bar baz', True) for i in range(50)]),
|
||||
'bar': set([NamedObject('foo bar baz', True) for i in range(50)]),
|
||||
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
|
||||
"foo": set([NamedObject("foo bar baz", True) for i in range(50)]),
|
||||
"bar": set([NamedObject("foo bar baz", True) for i in range(50)]),
|
||||
"baz": set([NamedObject("foo bar baz", True) for i in range(49)]),
|
||||
}
|
||||
try:
|
||||
reduce_common_words(d, 50)
|
||||
@ -289,35 +329,37 @@ class TestCasereduce_common_words:
|
||||
self.fail()
|
||||
|
||||
def test_unpack_fields(self):
|
||||
#object.words may be fields.
|
||||
# object.words may be fields.
|
||||
def create_it():
|
||||
o = NamedObject('')
|
||||
o.words = [['foo', 'bar'], ['baz']]
|
||||
o = NamedObject("")
|
||||
o.words = [["foo", "bar"], ["baz"]]
|
||||
return o
|
||||
|
||||
d = {
|
||||
'foo': set([create_it() for i in range(50)])
|
||||
}
|
||||
d = {"foo": set([create_it() for i in range(50)])}
|
||||
try:
|
||||
reduce_common_words(d, 50)
|
||||
except TypeError:
|
||||
self.fail("must support fields.")
|
||||
|
||||
def test_consider_a_reduced_common_word_common_even_after_reduction(self):
|
||||
#There was a bug in the code that causeda word that has already been reduced not to
|
||||
#be counted as a common word for subsequent words. For example, if 'foo' is processed
|
||||
#as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
|
||||
#would not stay in 'bar' because 'foo' is not a common word anymore.
|
||||
only_common = NamedObject('foo bar', True)
|
||||
# There was a bug in the code that causeda word that has already been reduced not to
|
||||
# be counted as a common word for subsequent words. For example, if 'foo' is processed
|
||||
# as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
|
||||
# would not stay in 'bar' because 'foo' is not a common word anymore.
|
||||
only_common = NamedObject("foo bar", True)
|
||||
d = {
|
||||
'foo': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
|
||||
'bar': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
|
||||
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
|
||||
"foo": set(
|
||||
[NamedObject("foo bar baz", True) for i in range(49)] + [only_common]
|
||||
),
|
||||
"bar": set(
|
||||
[NamedObject("foo bar baz", True) for i in range(49)] + [only_common]
|
||||
),
|
||||
"baz": set([NamedObject("foo bar baz", True) for i in range(49)]),
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
eq_(1, len(d['foo']))
|
||||
eq_(1, len(d['bar']))
|
||||
eq_(49, len(d['baz']))
|
||||
eq_(1, len(d["foo"]))
|
||||
eq_(1, len(d["bar"]))
|
||||
eq_(49, len(d["baz"]))
|
||||
|
||||
|
||||
class TestCaseget_match:
|
||||
@ -326,8 +368,8 @@ class TestCaseget_match:
|
||||
o2 = NamedObject("bar bleh", True)
|
||||
m = get_match(o1, o2)
|
||||
eq_(50, m.percentage)
|
||||
eq_(['foo', 'bar'], m.first.words)
|
||||
eq_(['bar', 'bleh'], m.second.words)
|
||||
eq_(["foo", "bar"], m.first.words)
|
||||
eq_(["bar", "bleh"], m.second.words)
|
||||
assert m.first is o1
|
||||
assert m.second is o2
|
||||
|
||||
@ -340,7 +382,9 @@ class TestCaseget_match:
|
||||
assert object() not in m
|
||||
|
||||
def test_word_weight(self):
|
||||
m = get_match(NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS, ))
|
||||
m = get_match(
|
||||
NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS,)
|
||||
)
|
||||
eq_(m.percentage, int((6.0 / 13.0) * 100))
|
||||
|
||||
|
||||
@ -349,54 +393,59 @@ class TestCaseGetMatches:
|
||||
eq_(getmatches([]), [])
|
||||
|
||||
def test_simple(self):
|
||||
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
||||
r = getmatches(l)
|
||||
itemList = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
||||
r = getmatches(itemList)
|
||||
eq_(2, len(r))
|
||||
m = first(m for m in r if m.percentage == 50) #"foo bar" and "bar bleh"
|
||||
assert_match(m, 'foo bar', 'bar bleh')
|
||||
m = first(m for m in r if m.percentage == 33) #"foo bar" and "a b c foo"
|
||||
assert_match(m, 'foo bar', 'a b c foo')
|
||||
m = first(m for m in r if m.percentage == 50) # "foo bar" and "bar bleh"
|
||||
assert_match(m, "foo bar", "bar bleh")
|
||||
m = first(m for m in r if m.percentage == 33) # "foo bar" and "a b c foo"
|
||||
assert_match(m, "foo bar", "a b c foo")
|
||||
|
||||
def test_null_and_unrelated_objects(self):
|
||||
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject(""), NamedObject("unrelated object")]
|
||||
r = getmatches(l)
|
||||
itemList = [
|
||||
NamedObject("foo bar"),
|
||||
NamedObject("bar bleh"),
|
||||
NamedObject(""),
|
||||
NamedObject("unrelated object"),
|
||||
]
|
||||
r = getmatches(itemList)
|
||||
eq_(len(r), 1)
|
||||
m = r[0]
|
||||
eq_(m.percentage, 50)
|
||||
assert_match(m, 'foo bar', 'bar bleh')
|
||||
assert_match(m, "foo bar", "bar bleh")
|
||||
|
||||
def test_twice_the_same_word(self):
|
||||
l = [NamedObject("foo foo bar"), NamedObject("bar bleh")]
|
||||
r = getmatches(l)
|
||||
itemList = [NamedObject("foo foo bar"), NamedObject("bar bleh")]
|
||||
r = getmatches(itemList)
|
||||
eq_(1, len(r))
|
||||
|
||||
def test_twice_the_same_word_when_preworded(self):
|
||||
l = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)]
|
||||
r = getmatches(l)
|
||||
itemList = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)]
|
||||
r = getmatches(itemList)
|
||||
eq_(1, len(r))
|
||||
|
||||
def test_two_words_match(self):
|
||||
l = [NamedObject("foo bar"), NamedObject("foo bar bleh")]
|
||||
r = getmatches(l)
|
||||
itemList = [NamedObject("foo bar"), NamedObject("foo bar bleh")]
|
||||
r = getmatches(itemList)
|
||||
eq_(1, len(r))
|
||||
|
||||
def test_match_files_with_only_common_words(self):
|
||||
#If a word occurs more than 50 times, it is excluded from the matching process
|
||||
#The problem with the common_word_threshold is that the files containing only common
|
||||
#words will never be matched together. We *should* match them.
|
||||
# If a word occurs more than 50 times, it is excluded from the matching process
|
||||
# The problem with the common_word_threshold is that the files containing only common
|
||||
# words will never be matched together. We *should* match them.
|
||||
# This test assumes that the common word threashold const is 50
|
||||
l = [NamedObject("foo") for i in range(50)]
|
||||
r = getmatches(l)
|
||||
itemList = [NamedObject("foo") for i in range(50)]
|
||||
r = getmatches(itemList)
|
||||
eq_(1225, len(r))
|
||||
|
||||
def test_use_words_already_there_if_there(self):
|
||||
o1 = NamedObject('foo')
|
||||
o2 = NamedObject('bar')
|
||||
o2.words = ['foo']
|
||||
o1 = NamedObject("foo")
|
||||
o2 = NamedObject("bar")
|
||||
o2.words = ["foo"]
|
||||
eq_(1, len(getmatches([o1, o2])))
|
||||
|
||||
def test_job(self):
|
||||
def do_progress(p, d=''):
|
||||
def do_progress(p, d=""):
|
||||
self.log.append(p)
|
||||
return True
|
||||
|
||||
@ -409,28 +458,28 @@ class TestCaseGetMatches:
|
||||
eq_(100, self.log[-1])
|
||||
|
||||
def test_weight_words(self):
|
||||
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
||||
m = getmatches(l, weight_words=True)[0]
|
||||
itemList = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
||||
m = getmatches(itemList, weight_words=True)[0]
|
||||
eq_(int((6.0 / 13.0) * 100), m.percentage)
|
||||
|
||||
def test_similar_word(self):
|
||||
l = [NamedObject("foobar"), NamedObject("foobars")]
|
||||
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
||||
eq_(getmatches(l, match_similar_words=True)[0].percentage, 100)
|
||||
l = [NamedObject("foobar"), NamedObject("foo")]
|
||||
eq_(len(getmatches(l, match_similar_words=True)), 0) #too far
|
||||
l = [NamedObject("bizkit"), NamedObject("bizket")]
|
||||
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
||||
l = [NamedObject("foobar"), NamedObject("foosbar")]
|
||||
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
||||
itemList = [NamedObject("foobar"), NamedObject("foobars")]
|
||||
eq_(len(getmatches(itemList, match_similar_words=True)), 1)
|
||||
eq_(getmatches(itemList, match_similar_words=True)[0].percentage, 100)
|
||||
itemList = [NamedObject("foobar"), NamedObject("foo")]
|
||||
eq_(len(getmatches(itemList, match_similar_words=True)), 0) # too far
|
||||
itemList = [NamedObject("bizkit"), NamedObject("bizket")]
|
||||
eq_(len(getmatches(itemList, match_similar_words=True)), 1)
|
||||
itemList = [NamedObject("foobar"), NamedObject("foosbar")]
|
||||
eq_(len(getmatches(itemList, match_similar_words=True)), 1)
|
||||
|
||||
def test_single_object_with_similar_words(self):
|
||||
l = [NamedObject("foo foos")]
|
||||
eq_(len(getmatches(l, match_similar_words=True)), 0)
|
||||
itemList = [NamedObject("foo foos")]
|
||||
eq_(len(getmatches(itemList, match_similar_words=True)), 0)
|
||||
|
||||
def test_double_words_get_counted_only_once(self):
|
||||
l = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")]
|
||||
m = getmatches(l)[0]
|
||||
itemList = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")]
|
||||
m = getmatches(itemList)[0]
|
||||
eq_(75, m.percentage)
|
||||
|
||||
def test_with_fields(self):
|
||||
@ -450,13 +499,13 @@ class TestCaseGetMatches:
|
||||
eq_(m.percentage, 50)
|
||||
|
||||
def test_only_match_similar_when_the_option_is_set(self):
|
||||
l = [NamedObject("foobar"), NamedObject("foobars")]
|
||||
eq_(len(getmatches(l, match_similar_words=False)), 0)
|
||||
itemList = [NamedObject("foobar"), NamedObject("foobars")]
|
||||
eq_(len(getmatches(itemList, match_similar_words=False)), 0)
|
||||
|
||||
def test_dont_recurse_do_match(self):
|
||||
# with nosetests, the stack is increased. The number has to be high enough not to be failing falsely
|
||||
sys.setrecursionlimit(200)
|
||||
files = [NamedObject('foo bar') for i in range(201)]
|
||||
files = [NamedObject("foo bar") for i in range(201)]
|
||||
try:
|
||||
getmatches(files)
|
||||
except RuntimeError:
|
||||
@ -465,9 +514,9 @@ class TestCaseGetMatches:
|
||||
sys.setrecursionlimit(1000)
|
||||
|
||||
def test_min_match_percentage(self):
|
||||
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
||||
r = getmatches(l, min_match_percentage=50)
|
||||
eq_(1, len(r)) #Only "foo bar" / "bar bleh" should match
|
||||
itemList = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
||||
r = getmatches(itemList, min_match_percentage=50)
|
||||
eq_(1, len(r)) # Only "foo bar" / "bar bleh" should match
|
||||
|
||||
def test_MemoryError(self, monkeypatch):
|
||||
@log_calls
|
||||
@ -476,12 +525,12 @@ class TestCaseGetMatches:
|
||||
raise MemoryError()
|
||||
return Match(first, second, 0)
|
||||
|
||||
objects = [NamedObject() for i in range(10)] # results in 45 matches
|
||||
monkeypatch.setattr(engine, 'get_match', mocked_match)
|
||||
objects = [NamedObject() for i in range(10)] # results in 45 matches
|
||||
monkeypatch.setattr(engine, "get_match", mocked_match)
|
||||
try:
|
||||
r = getmatches(objects)
|
||||
except MemoryError:
|
||||
self.fail('MemorryError must be handled')
|
||||
self.fail("MemorryError must be handled")
|
||||
eq_(42, len(r))
|
||||
|
||||
|
||||
@ -599,7 +648,7 @@ class TestCaseGroup:
|
||||
eq_([o1], g.dupes)
|
||||
g.switch_ref(o2)
|
||||
assert o2 is g.ref
|
||||
g.switch_ref(NamedObject('', True))
|
||||
g.switch_ref(NamedObject("", True))
|
||||
assert o2 is g.ref
|
||||
|
||||
def test_switch_ref_from_ref_dir(self):
|
||||
@ -620,11 +669,11 @@ class TestCaseGroup:
|
||||
m = g.get_match_of(o)
|
||||
assert g.ref in m
|
||||
assert o in m
|
||||
assert g.get_match_of(NamedObject('', True)) is None
|
||||
assert g.get_match_of(NamedObject("", True)) is None
|
||||
assert g.get_match_of(g.ref) is None
|
||||
|
||||
def test_percentage(self):
|
||||
#percentage should return the avg percentage in relation to the ref
|
||||
# percentage should return the avg percentage in relation to the ref
|
||||
m1, m2, m3 = get_match_triangle()
|
||||
m1 = Match(m1[0], m1[1], 100)
|
||||
m2 = Match(m2[0], m2[1], 50)
|
||||
@ -651,9 +700,9 @@ class TestCaseGroup:
|
||||
o1 = m1.first
|
||||
o2 = m1.second
|
||||
o3 = m2.second
|
||||
o1.name = 'c'
|
||||
o2.name = 'b'
|
||||
o3.name = 'a'
|
||||
o1.name = "c"
|
||||
o2.name = "b"
|
||||
o3.name = "a"
|
||||
g = Group()
|
||||
g.add_match(m1)
|
||||
g.add_match(m2)
|
||||
@ -709,9 +758,9 @@ class TestCaseGroup:
|
||||
def test_prioritize_nothing_changes(self):
|
||||
# prioritize() returns False when nothing changes in the group.
|
||||
g = get_test_group()
|
||||
g[0].name = 'a'
|
||||
g[1].name = 'b'
|
||||
g[2].name = 'c'
|
||||
g[0].name = "a"
|
||||
g[1].name = "b"
|
||||
g[2].name = "c"
|
||||
assert not g.prioritize(lambda x: x.name)
|
||||
|
||||
def test_list_like(self):
|
||||
@ -723,7 +772,11 @@ class TestCaseGroup:
|
||||
|
||||
def test_discard_matches(self):
|
||||
g = Group()
|
||||
o1, o2, o3 = (NamedObject("foo", True), NamedObject("bar", True), NamedObject("baz", True))
|
||||
o1, o2, o3 = (
|
||||
NamedObject("foo", True),
|
||||
NamedObject("bar", True),
|
||||
NamedObject("baz", True),
|
||||
)
|
||||
g.add_match(get_match(o1, o2))
|
||||
g.add_match(get_match(o1, o3))
|
||||
g.discard_matches()
|
||||
@ -737,8 +790,8 @@ class TestCaseget_groups:
|
||||
eq_([], r)
|
||||
|
||||
def test_simple(self):
|
||||
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
||||
matches = getmatches(l)
|
||||
itemList = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
||||
matches = getmatches(itemList)
|
||||
m = matches[0]
|
||||
r = get_groups(matches)
|
||||
eq_(1, len(r))
|
||||
@ -747,28 +800,39 @@ class TestCaseget_groups:
|
||||
eq_([m.second], g.dupes)
|
||||
|
||||
def test_group_with_multiple_matches(self):
|
||||
#This results in 3 matches
|
||||
l = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")]
|
||||
matches = getmatches(l)
|
||||
# This results in 3 matches
|
||||
itemList = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")]
|
||||
matches = getmatches(itemList)
|
||||
r = get_groups(matches)
|
||||
eq_(1, len(r))
|
||||
g = r[0]
|
||||
eq_(3, len(g))
|
||||
|
||||
def test_must_choose_a_group(self):
|
||||
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("b c"), NamedObject("c d"), NamedObject("c d")]
|
||||
#There will be 2 groups here: group "a b" and group "c d"
|
||||
#"b c" can go either of them, but not both.
|
||||
matches = getmatches(l)
|
||||
itemList = [
|
||||
NamedObject("a b"),
|
||||
NamedObject("a b"),
|
||||
NamedObject("b c"),
|
||||
NamedObject("c d"),
|
||||
NamedObject("c d"),
|
||||
]
|
||||
# There will be 2 groups here: group "a b" and group "c d"
|
||||
# "b c" can go either of them, but not both.
|
||||
matches = getmatches(itemList)
|
||||
r = get_groups(matches)
|
||||
eq_(2, len(r))
|
||||
eq_(5, len(r[0])+len(r[1]))
|
||||
eq_(5, len(r[0]) + len(r[1]))
|
||||
|
||||
def test_should_all_go_in_the_same_group(self):
|
||||
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("a b"), NamedObject("a b")]
|
||||
#There will be 2 groups here: group "a b" and group "c d"
|
||||
#"b c" can fit in both, but it must be in only one of them
|
||||
matches = getmatches(l)
|
||||
itemList = [
|
||||
NamedObject("a b"),
|
||||
NamedObject("a b"),
|
||||
NamedObject("a b"),
|
||||
NamedObject("a b"),
|
||||
]
|
||||
# There will be 2 groups here: group "a b" and group "c d"
|
||||
# "b c" can fit in both, but it must be in only one of them
|
||||
matches = getmatches(itemList)
|
||||
r = get_groups(matches)
|
||||
eq_(1, len(r))
|
||||
|
||||
@ -787,8 +851,8 @@ class TestCaseget_groups:
|
||||
assert o3 in g
|
||||
|
||||
def test_four_sized_group(self):
|
||||
l = [NamedObject("foobar") for i in range(4)]
|
||||
m = getmatches(l)
|
||||
itemList = [NamedObject("foobar") for i in range(4)]
|
||||
m = getmatches(itemList)
|
||||
r = get_groups(m)
|
||||
eq_(1, len(r))
|
||||
eq_(4, len(r[0]))
|
||||
@ -808,10 +872,12 @@ class TestCaseget_groups:
|
||||
# (A, B) match is the highest (thus resulting in an (A, B) group), still match C and D
|
||||
# in a separate group instead of discarding them.
|
||||
A, B, C, D = [NamedObject() for _ in range(4)]
|
||||
m1 = Match(A, B, 90) # This is the strongest "A" match
|
||||
m2 = Match(A, C, 80) # Because C doesn't match with B, it won't be in the group
|
||||
m3 = Match(A, D, 80) # Same thing for D
|
||||
m4 = Match(C, D, 70) # However, because C and D match, they should have their own group.
|
||||
m1 = Match(A, B, 90) # This is the strongest "A" match
|
||||
m2 = Match(A, C, 80) # Because C doesn't match with B, it won't be in the group
|
||||
m3 = Match(A, D, 80) # Same thing for D
|
||||
m4 = Match(
|
||||
C, D, 70
|
||||
) # However, because C and D match, they should have their own group.
|
||||
groups = get_groups([m1, m2, m3, m4])
|
||||
eq_(len(groups), 2)
|
||||
g1, g2 = groups
|
||||
@ -819,4 +885,3 @@ class TestCaseget_groups:
|
||||
assert B in g1
|
||||
assert C in g2
|
||||
assert D in g2
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2009-10-23
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import hashlib
|
||||
@ -14,32 +14,35 @@ from core.tests.directories_test import create_fake_fs
|
||||
|
||||
from .. import fs
|
||||
|
||||
|
||||
def test_size_aggregates_subfiles(tmpdir):
|
||||
p = create_fake_fs(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
eq_(b.size, 12)
|
||||
|
||||
|
||||
def test_md5_aggregate_subfiles_sorted(tmpdir):
|
||||
#dir.allfiles can return child in any order. Thus, bundle.md5 must aggregate
|
||||
#all files' md5 it contains, but it must make sure that it does so in the
|
||||
#same order everytime.
|
||||
# dir.allfiles can return child in any order. Thus, bundle.md5 must aggregate
|
||||
# all files' md5 it contains, but it must make sure that it does so in the
|
||||
# same order everytime.
|
||||
p = create_fake_fs(Path(str(tmpdir)))
|
||||
b = fs.Folder(p)
|
||||
md51 = fs.File(p['dir1']['file1.test']).md5
|
||||
md52 = fs.File(p['dir2']['file2.test']).md5
|
||||
md53 = fs.File(p['dir3']['file3.test']).md5
|
||||
md54 = fs.File(p['file1.test']).md5
|
||||
md55 = fs.File(p['file2.test']).md5
|
||||
md56 = fs.File(p['file3.test']).md5
|
||||
md51 = fs.File(p["dir1"]["file1.test"]).md5
|
||||
md52 = fs.File(p["dir2"]["file2.test"]).md5
|
||||
md53 = fs.File(p["dir3"]["file3.test"]).md5
|
||||
md54 = fs.File(p["file1.test"]).md5
|
||||
md55 = fs.File(p["file2.test"]).md5
|
||||
md56 = fs.File(p["file3.test"]).md5
|
||||
# The expected md5 is the md5 of md5s for folders and the direct md5 for files
|
||||
folder_md51 = hashlib.md5(md51).digest()
|
||||
folder_md52 = hashlib.md5(md52).digest()
|
||||
folder_md53 = hashlib.md5(md53).digest()
|
||||
md5 = hashlib.md5(folder_md51+folder_md52+folder_md53+md54+md55+md56)
|
||||
md5 = hashlib.md5(folder_md51 + folder_md52 + folder_md53 + md54 + md55 + md56)
|
||||
eq_(b.md5, md5.digest())
|
||||
|
||||
|
||||
def test_has_file_attrs(tmpdir):
|
||||
#a Folder must behave like a file, so it must have mtime attributes
|
||||
# a Folder must behave like a file, so it must have mtime attributes
|
||||
b = fs.Folder(Path(str(tmpdir)))
|
||||
assert b.mtime > 0
|
||||
eq_(b.extension, '')
|
||||
eq_(b.extension, "")
|
||||
|
@ -12,152 +12,172 @@ from hscommon.testutil import eq_
|
||||
|
||||
from ..ignore import IgnoreList
|
||||
|
||||
|
||||
def test_empty():
|
||||
il = IgnoreList()
|
||||
eq_(0, len(il))
|
||||
assert not il.AreIgnored('foo', 'bar')
|
||||
assert not il.AreIgnored("foo", "bar")
|
||||
|
||||
|
||||
def test_simple():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
assert il.AreIgnored('foo', 'bar')
|
||||
assert il.AreIgnored('bar', 'foo')
|
||||
assert not il.AreIgnored('foo', 'bleh')
|
||||
assert not il.AreIgnored('bleh', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
assert il.AreIgnored("foo", "bar")
|
||||
assert il.AreIgnored("bar", "foo")
|
||||
assert not il.AreIgnored("foo", "bleh")
|
||||
assert not il.AreIgnored("bleh", "bar")
|
||||
eq_(1, len(il))
|
||||
|
||||
|
||||
def test_multiple():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('foo', 'bleh')
|
||||
il.Ignore('bleh', 'bar')
|
||||
il.Ignore('aybabtu', 'bleh')
|
||||
assert il.AreIgnored('foo', 'bar')
|
||||
assert il.AreIgnored('bar', 'foo')
|
||||
assert il.AreIgnored('foo', 'bleh')
|
||||
assert il.AreIgnored('bleh', 'bar')
|
||||
assert not il.AreIgnored('aybabtu', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("foo", "bleh")
|
||||
il.Ignore("bleh", "bar")
|
||||
il.Ignore("aybabtu", "bleh")
|
||||
assert il.AreIgnored("foo", "bar")
|
||||
assert il.AreIgnored("bar", "foo")
|
||||
assert il.AreIgnored("foo", "bleh")
|
||||
assert il.AreIgnored("bleh", "bar")
|
||||
assert not il.AreIgnored("aybabtu", "bar")
|
||||
eq_(4, len(il))
|
||||
|
||||
|
||||
def test_clear():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Clear()
|
||||
assert not il.AreIgnored('foo', 'bar')
|
||||
assert not il.AreIgnored('bar', 'foo')
|
||||
assert not il.AreIgnored("foo", "bar")
|
||||
assert not il.AreIgnored("bar", "foo")
|
||||
eq_(0, len(il))
|
||||
|
||||
|
||||
def test_add_same_twice():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('bar', 'foo')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("bar", "foo")
|
||||
eq_(1, len(il))
|
||||
|
||||
|
||||
def test_save_to_xml():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('foo', 'bleh')
|
||||
il.Ignore('bleh', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("foo", "bleh")
|
||||
il.Ignore("bleh", "bar")
|
||||
f = io.BytesIO()
|
||||
il.save_to_xml(f)
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
eq_(root.tag, 'ignore_list')
|
||||
eq_(root.tag, "ignore_list")
|
||||
eq_(len(root), 2)
|
||||
eq_(len([c for c in root if c.tag == 'file']), 2)
|
||||
eq_(len([c for c in root if c.tag == "file"]), 2)
|
||||
f1, f2 = root[:]
|
||||
subchildren = [c for c in f1 if c.tag == 'file'] + [c for c in f2 if c.tag == 'file']
|
||||
subchildren = [c for c in f1 if c.tag == "file"] + [
|
||||
c for c in f2 if c.tag == "file"
|
||||
]
|
||||
eq_(len(subchildren), 3)
|
||||
|
||||
|
||||
def test_SaveThenLoad():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('foo', 'bleh')
|
||||
il.Ignore('bleh', 'bar')
|
||||
il.Ignore('\u00e9', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("foo", "bleh")
|
||||
il.Ignore("bleh", "bar")
|
||||
il.Ignore("\u00e9", "bar")
|
||||
f = io.BytesIO()
|
||||
il.save_to_xml(f)
|
||||
f.seek(0)
|
||||
il = IgnoreList()
|
||||
il.load_from_xml(f)
|
||||
eq_(4, len(il))
|
||||
assert il.AreIgnored('\u00e9', 'bar')
|
||||
assert il.AreIgnored("\u00e9", "bar")
|
||||
|
||||
|
||||
def test_LoadXML_with_empty_file_tags():
|
||||
f = io.BytesIO()
|
||||
f.write(b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>')
|
||||
f.write(
|
||||
b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>'
|
||||
)
|
||||
f.seek(0)
|
||||
il = IgnoreList()
|
||||
il.load_from_xml(f)
|
||||
eq_(0, len(il))
|
||||
|
||||
|
||||
def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('bar', 'baz')
|
||||
assert il.AreIgnored('bar', 'foo')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("bar", "baz")
|
||||
assert il.AreIgnored("bar", "foo")
|
||||
|
||||
|
||||
def test_no_dupes_when_a_child_is_a_key_somewhere_else():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('bar', 'baz')
|
||||
il.Ignore('bar', 'foo')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("bar", "baz")
|
||||
il.Ignore("bar", "foo")
|
||||
eq_(2, len(il))
|
||||
|
||||
|
||||
def test_iterate():
|
||||
#It must be possible to iterate through ignore list
|
||||
# It must be possible to iterate through ignore list
|
||||
il = IgnoreList()
|
||||
expected = [('foo', 'bar'), ('bar', 'baz'), ('foo', 'baz')]
|
||||
expected = [("foo", "bar"), ("bar", "baz"), ("foo", "baz")]
|
||||
for i in expected:
|
||||
il.Ignore(i[0], i[1])
|
||||
for i in il:
|
||||
expected.remove(i) #No exception should be raised
|
||||
assert not expected #expected should be empty
|
||||
expected.remove(i) # No exception should be raised
|
||||
assert not expected # expected should be empty
|
||||
|
||||
|
||||
def test_filter():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('bar', 'baz')
|
||||
il.Ignore('foo', 'baz')
|
||||
il.Filter(lambda f, s: f == 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("bar", "baz")
|
||||
il.Ignore("foo", "baz")
|
||||
il.Filter(lambda f, s: f == "bar")
|
||||
eq_(1, len(il))
|
||||
assert not il.AreIgnored('foo', 'bar')
|
||||
assert il.AreIgnored('bar', 'baz')
|
||||
assert not il.AreIgnored("foo", "bar")
|
||||
assert il.AreIgnored("bar", "baz")
|
||||
|
||||
|
||||
def test_save_with_non_ascii_items():
|
||||
il = IgnoreList()
|
||||
il.Ignore('\xac', '\xbf')
|
||||
il.Ignore("\xac", "\xbf")
|
||||
f = io.BytesIO()
|
||||
try:
|
||||
il.save_to_xml(f)
|
||||
except Exception as e:
|
||||
raise AssertionError(str(e))
|
||||
|
||||
|
||||
def test_len():
|
||||
il = IgnoreList()
|
||||
eq_(0, len(il))
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
eq_(1, len(il))
|
||||
|
||||
|
||||
def test_nonzero():
|
||||
il = IgnoreList()
|
||||
assert not il
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore("foo", "bar")
|
||||
assert il
|
||||
|
||||
|
||||
def test_remove():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('foo', 'baz')
|
||||
il.remove('bar', 'foo')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("foo", "baz")
|
||||
il.remove("bar", "foo")
|
||||
eq_(len(il), 1)
|
||||
assert not il.AreIgnored('foo', 'bar')
|
||||
assert not il.AreIgnored("foo", "bar")
|
||||
|
||||
|
||||
def test_remove_non_existant():
|
||||
il = IgnoreList()
|
||||
il.Ignore('foo', 'bar')
|
||||
il.Ignore('foo', 'baz')
|
||||
il.Ignore("foo", "bar")
|
||||
il.Ignore("foo", "baz")
|
||||
with raises(ValueError):
|
||||
il.remove('foo', 'bleh')
|
||||
il.remove("foo", "bleh")
|
||||
|
@ -8,33 +8,39 @@ from hscommon.testutil import eq_
|
||||
|
||||
from ..markable import MarkableList, Markable
|
||||
|
||||
|
||||
def gen():
|
||||
ml = MarkableList()
|
||||
ml.extend(list(range(10)))
|
||||
return ml
|
||||
|
||||
|
||||
def test_unmarked():
|
||||
ml = gen()
|
||||
for i in ml:
|
||||
assert not ml.is_marked(i)
|
||||
|
||||
|
||||
def test_mark():
|
||||
ml = gen()
|
||||
assert ml.mark(3)
|
||||
assert ml.is_marked(3)
|
||||
assert not ml.is_marked(2)
|
||||
|
||||
|
||||
def test_unmark():
|
||||
ml = gen()
|
||||
ml.mark(4)
|
||||
assert ml.unmark(4)
|
||||
assert not ml.is_marked(4)
|
||||
|
||||
|
||||
def test_unmark_unmarked():
|
||||
ml = gen()
|
||||
assert not ml.unmark(4)
|
||||
assert not ml.is_marked(4)
|
||||
|
||||
|
||||
def test_mark_twice_and_unmark():
|
||||
ml = gen()
|
||||
assert ml.mark(5)
|
||||
@ -42,6 +48,7 @@ def test_mark_twice_and_unmark():
|
||||
ml.unmark(5)
|
||||
assert not ml.is_marked(5)
|
||||
|
||||
|
||||
def test_mark_toggle():
|
||||
ml = gen()
|
||||
ml.mark_toggle(6)
|
||||
@ -51,22 +58,25 @@ def test_mark_toggle():
|
||||
ml.mark_toggle(6)
|
||||
assert ml.is_marked(6)
|
||||
|
||||
|
||||
def test_is_markable():
|
||||
class Foobar(Markable):
|
||||
def _is_markable(self, o):
|
||||
return o == 'foobar'
|
||||
return o == "foobar"
|
||||
|
||||
f = Foobar()
|
||||
assert not f.is_marked('foobar')
|
||||
assert not f.mark('foo')
|
||||
assert not f.is_marked('foo')
|
||||
f.mark_toggle('foo')
|
||||
assert not f.is_marked('foo')
|
||||
f.mark('foobar')
|
||||
assert f.is_marked('foobar')
|
||||
assert not f.is_marked("foobar")
|
||||
assert not f.mark("foo")
|
||||
assert not f.is_marked("foo")
|
||||
f.mark_toggle("foo")
|
||||
assert not f.is_marked("foo")
|
||||
f.mark("foobar")
|
||||
assert f.is_marked("foobar")
|
||||
ml = gen()
|
||||
ml.mark(11)
|
||||
assert not ml.is_marked(11)
|
||||
|
||||
|
||||
def test_change_notifications():
|
||||
class Foobar(Markable):
|
||||
def _did_mark(self, o):
|
||||
@ -77,13 +87,14 @@ def test_change_notifications():
|
||||
|
||||
f = Foobar()
|
||||
f.log = []
|
||||
f.mark('foo')
|
||||
f.mark('foo')
|
||||
f.mark_toggle('bar')
|
||||
f.unmark('foo')
|
||||
f.unmark('foo')
|
||||
f.mark_toggle('bar')
|
||||
eq_([(True, 'foo'), (True, 'bar'), (False, 'foo'), (False, 'bar')], f.log)
|
||||
f.mark("foo")
|
||||
f.mark("foo")
|
||||
f.mark_toggle("bar")
|
||||
f.unmark("foo")
|
||||
f.unmark("foo")
|
||||
f.mark_toggle("bar")
|
||||
eq_([(True, "foo"), (True, "bar"), (False, "foo"), (False, "bar")], f.log)
|
||||
|
||||
|
||||
def test_mark_count():
|
||||
ml = gen()
|
||||
@ -93,6 +104,7 @@ def test_mark_count():
|
||||
ml.mark(11)
|
||||
eq_(1, ml.mark_count)
|
||||
|
||||
|
||||
def test_mark_none():
|
||||
log = []
|
||||
ml = gen()
|
||||
@ -104,6 +116,7 @@ def test_mark_none():
|
||||
eq_(0, ml.mark_count)
|
||||
eq_([1, 2], log)
|
||||
|
||||
|
||||
def test_mark_all():
|
||||
ml = gen()
|
||||
eq_(0, ml.mark_count)
|
||||
@ -111,6 +124,7 @@ def test_mark_all():
|
||||
eq_(10, ml.mark_count)
|
||||
assert ml.is_marked(1)
|
||||
|
||||
|
||||
def test_mark_invert():
|
||||
ml = gen()
|
||||
ml.mark(1)
|
||||
@ -118,6 +132,7 @@ def test_mark_invert():
|
||||
assert not ml.is_marked(1)
|
||||
assert ml.is_marked(2)
|
||||
|
||||
|
||||
def test_mark_while_inverted():
|
||||
log = []
|
||||
ml = gen()
|
||||
@ -134,6 +149,7 @@ def test_mark_while_inverted():
|
||||
eq_(7, ml.mark_count)
|
||||
eq_([(True, 1), (False, 1), (True, 2), (True, 1), (True, 3)], log)
|
||||
|
||||
|
||||
def test_remove_mark_flag():
|
||||
ml = gen()
|
||||
ml.mark(1)
|
||||
@ -145,10 +161,12 @@ def test_remove_mark_flag():
|
||||
ml._remove_mark_flag(1)
|
||||
assert ml.is_marked(1)
|
||||
|
||||
|
||||
def test_is_marked_returns_false_if_object_not_markable():
|
||||
class MyMarkableList(MarkableList):
|
||||
def _is_markable(self, o):
|
||||
return o != 4
|
||||
|
||||
ml = MyMarkableList()
|
||||
ml.extend(list(range(10)))
|
||||
ml.mark_invert()
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2011/09/07
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import os.path as op
|
||||
@ -14,6 +14,7 @@ from ..engine import Group, Match
|
||||
|
||||
no = NamedObject
|
||||
|
||||
|
||||
def app_with_dupes(dupes):
|
||||
# Creates an app with specified dupes. dupes is a list of lists, each list in the list being
|
||||
# a dupe group. We cheat a little bit by creating dupe groups manually instead of running a
|
||||
@ -29,57 +30,63 @@ def app_with_dupes(dupes):
|
||||
app.app._results_changed()
|
||||
return app
|
||||
|
||||
#---
|
||||
|
||||
# ---
|
||||
def app_normal_results():
|
||||
# Just some results, with different extensions and size, for good measure.
|
||||
dupes = [
|
||||
[
|
||||
no('foo1.ext1', size=1, folder='folder1'),
|
||||
no('foo2.ext2', size=2, folder='folder2')
|
||||
no("foo1.ext1", size=1, folder="folder1"),
|
||||
no("foo2.ext2", size=2, folder="folder2"),
|
||||
],
|
||||
]
|
||||
return app_with_dupes(dupes)
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_kind_subcrit(app):
|
||||
# The subcriteria of the "Kind" criteria is a list of extensions contained in the dupes.
|
||||
app.select_pri_criterion("Kind")
|
||||
eq_(app.pdialog.criteria_list[:], ['ext1', 'ext2'])
|
||||
eq_(app.pdialog.criteria_list[:], ["ext1", "ext2"])
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_kind_reprioritization(app):
|
||||
# Just a simple test of the system as a whole.
|
||||
# select a criterion, and perform re-prioritization and see if it worked.
|
||||
app.select_pri_criterion("Kind")
|
||||
app.pdialog.criteria_list.select([1]) # ext2
|
||||
app.pdialog.criteria_list.select([1]) # ext2
|
||||
app.pdialog.add_selected()
|
||||
app.pdialog.perform_reprioritization()
|
||||
eq_(app.rtable[0].data['name'], 'foo2.ext2')
|
||||
eq_(app.rtable[0].data["name"], "foo2.ext2")
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_folder_subcrit(app):
|
||||
app.select_pri_criterion("Folder")
|
||||
eq_(app.pdialog.criteria_list[:], ['folder1', 'folder2'])
|
||||
eq_(app.pdialog.criteria_list[:], ["folder1", "folder2"])
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_folder_reprioritization(app):
|
||||
app.select_pri_criterion("Folder")
|
||||
app.pdialog.criteria_list.select([1]) # folder2
|
||||
app.pdialog.criteria_list.select([1]) # folder2
|
||||
app.pdialog.add_selected()
|
||||
app.pdialog.perform_reprioritization()
|
||||
eq_(app.rtable[0].data['name'], 'foo2.ext2')
|
||||
eq_(app.rtable[0].data["name"], "foo2.ext2")
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_prilist_display(app):
|
||||
# The prioritization list displays selected criteria correctly.
|
||||
app.select_pri_criterion("Kind")
|
||||
app.pdialog.criteria_list.select([1]) # ext2
|
||||
app.pdialog.criteria_list.select([1]) # ext2
|
||||
app.pdialog.add_selected()
|
||||
app.select_pri_criterion("Folder")
|
||||
app.pdialog.criteria_list.select([1]) # folder2
|
||||
app.pdialog.criteria_list.select([1]) # folder2
|
||||
app.pdialog.add_selected()
|
||||
app.select_pri_criterion("Size")
|
||||
app.pdialog.criteria_list.select([1]) # Lowest
|
||||
app.pdialog.criteria_list.select([1]) # Lowest
|
||||
app.pdialog.add_selected()
|
||||
expected = [
|
||||
"Kind (ext2)",
|
||||
@ -88,23 +95,26 @@ def test_prilist_display(app):
|
||||
]
|
||||
eq_(app.pdialog.prioritization_list[:], expected)
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_size_subcrit(app):
|
||||
app.select_pri_criterion("Size")
|
||||
eq_(app.pdialog.criteria_list[:], ['Highest', 'Lowest'])
|
||||
eq_(app.pdialog.criteria_list[:], ["Highest", "Lowest"])
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_size_reprioritization(app):
|
||||
app.select_pri_criterion("Size")
|
||||
app.pdialog.criteria_list.select([0]) # highest
|
||||
app.pdialog.criteria_list.select([0]) # highest
|
||||
app.pdialog.add_selected()
|
||||
app.pdialog.perform_reprioritization()
|
||||
eq_(app.rtable[0].data['name'], 'foo2.ext2')
|
||||
eq_(app.rtable[0].data["name"], "foo2.ext2")
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_reorder_prioritizations(app):
|
||||
app.add_pri_criterion("Kind", 0) # ext1
|
||||
app.add_pri_criterion("Kind", 1) # ext2
|
||||
app.add_pri_criterion("Kind", 0) # ext1
|
||||
app.add_pri_criterion("Kind", 1) # ext2
|
||||
app.pdialog.prioritization_list.move_indexes([1], 0)
|
||||
expected = [
|
||||
"Kind (ext2)",
|
||||
@ -112,6 +122,7 @@ def test_reorder_prioritizations(app):
|
||||
]
|
||||
eq_(app.pdialog.prioritization_list[:], expected)
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_remove_crit_from_list(app):
|
||||
app.add_pri_criterion("Kind", 0)
|
||||
@ -123,75 +134,72 @@ def test_remove_crit_from_list(app):
|
||||
]
|
||||
eq_(app.pdialog.prioritization_list[:], expected)
|
||||
|
||||
|
||||
@with_app(app_normal_results)
|
||||
def test_add_crit_without_selection(app):
|
||||
# Adding a criterion without having made a selection doesn't cause a crash.
|
||||
app.pdialog.add_selected() # no crash
|
||||
app.pdialog.add_selected() # no crash
|
||||
|
||||
#---
|
||||
|
||||
# ---
|
||||
def app_one_name_ends_with_number():
|
||||
dupes = [
|
||||
[
|
||||
no('foo.ext'),
|
||||
no('foo1.ext'),
|
||||
],
|
||||
[no("foo.ext"), no("foo1.ext")],
|
||||
]
|
||||
return app_with_dupes(dupes)
|
||||
|
||||
|
||||
@with_app(app_one_name_ends_with_number)
|
||||
def test_filename_reprioritization(app):
|
||||
app.add_pri_criterion("Filename", 0) # Ends with a number
|
||||
app.add_pri_criterion("Filename", 0) # Ends with a number
|
||||
app.pdialog.perform_reprioritization()
|
||||
eq_(app.rtable[0].data['name'], 'foo1.ext')
|
||||
eq_(app.rtable[0].data["name"], "foo1.ext")
|
||||
|
||||
#---
|
||||
|
||||
# ---
|
||||
def app_with_subfolders():
|
||||
dupes = [
|
||||
[
|
||||
no('foo1', folder='baz'),
|
||||
no('foo2', folder='foo/bar'),
|
||||
],
|
||||
[
|
||||
no('foo3', folder='baz'),
|
||||
no('foo4', folder='foo'),
|
||||
],
|
||||
[no("foo1", folder="baz"), no("foo2", folder="foo/bar")],
|
||||
[no("foo3", folder="baz"), no("foo4", folder="foo")],
|
||||
]
|
||||
return app_with_dupes(dupes)
|
||||
|
||||
|
||||
@with_app(app_with_subfolders)
|
||||
def test_folder_crit_is_sorted(app):
|
||||
# Folder subcriteria are sorted.
|
||||
app.select_pri_criterion("Folder")
|
||||
eq_(app.pdialog.criteria_list[:], ['baz', 'foo', op.join('foo', 'bar')])
|
||||
eq_(app.pdialog.criteria_list[:], ["baz", "foo", op.join("foo", "bar")])
|
||||
|
||||
|
||||
@with_app(app_with_subfolders)
|
||||
def test_folder_crit_includes_subfolders(app):
|
||||
# When selecting a folder crit, dupes in a subfolder are also considered as affected by that
|
||||
# crit.
|
||||
app.add_pri_criterion("Folder", 1) # foo
|
||||
app.add_pri_criterion("Folder", 1) # foo
|
||||
app.pdialog.perform_reprioritization()
|
||||
# Both foo and foo/bar dupes will be prioritized
|
||||
eq_(app.rtable[0].data['name'], 'foo2')
|
||||
eq_(app.rtable[2].data['name'], 'foo4')
|
||||
eq_(app.rtable[0].data["name"], "foo2")
|
||||
eq_(app.rtable[2].data["name"], "foo4")
|
||||
|
||||
|
||||
@with_app(app_with_subfolders)
|
||||
def test_display_something_on_empty_extensions(app):
|
||||
# When there's no extension, display "None" instead of nothing at all.
|
||||
app.select_pri_criterion("Kind")
|
||||
eq_(app.pdialog.criteria_list[:], ['None'])
|
||||
eq_(app.pdialog.criteria_list[:], ["None"])
|
||||
|
||||
#---
|
||||
|
||||
# ---
|
||||
def app_one_name_longer_than_the_other():
|
||||
dupes = [
|
||||
[
|
||||
no('shortest.ext'),
|
||||
no('loooongest.ext'),
|
||||
],
|
||||
[no("shortest.ext"), no("loooongest.ext")],
|
||||
]
|
||||
return app_with_dupes(dupes)
|
||||
|
||||
|
||||
@with_app(app_one_name_longer_than_the_other)
|
||||
def test_longest_filename_prioritization(app):
|
||||
app.add_pri_criterion("Filename", 2) # Longest
|
||||
app.add_pri_criterion("Filename", 2) # Longest
|
||||
app.pdialog.perform_reprioritization()
|
||||
eq_(app.rtable[0].data['name'], 'loooongest.ext')
|
||||
eq_(app.rtable[0].data["name"], "loooongest.ext")
|
||||
|
@ -1,13 +1,14 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2013-07-28
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import TestApp, GetTestGroups
|
||||
|
||||
|
||||
def app_with_results():
|
||||
app = TestApp()
|
||||
objects, matches, groups = GetTestGroups()
|
||||
@ -15,23 +16,26 @@ def app_with_results():
|
||||
app.rtable.refresh()
|
||||
return app
|
||||
|
||||
|
||||
def test_delta_flags_delta_mode_off():
|
||||
app = app_with_results()
|
||||
# When the delta mode is off, we never have delta values flags
|
||||
app.rtable.delta_values = False
|
||||
# Ref file, always false anyway
|
||||
assert not app.rtable[0].is_cell_delta('size')
|
||||
assert not app.rtable[0].is_cell_delta("size")
|
||||
# False because delta mode is off
|
||||
assert not app.rtable[1].is_cell_delta('size')
|
||||
|
||||
assert not app.rtable[1].is_cell_delta("size")
|
||||
|
||||
|
||||
def test_delta_flags_delta_mode_on_delta_columns():
|
||||
# When the delta mode is on, delta columns always have a delta flag, except for ref rows
|
||||
app = app_with_results()
|
||||
app.rtable.delta_values = True
|
||||
# Ref file, always false anyway
|
||||
assert not app.rtable[0].is_cell_delta('size')
|
||||
assert not app.rtable[0].is_cell_delta("size")
|
||||
# But for a dupe, the flag is on
|
||||
assert app.rtable[1].is_cell_delta('size')
|
||||
assert app.rtable[1].is_cell_delta("size")
|
||||
|
||||
|
||||
def test_delta_flags_delta_mode_on_non_delta_columns():
|
||||
# When the delta mode is on, non-delta columns have a delta flag if their value differs from
|
||||
@ -39,11 +43,12 @@ def test_delta_flags_delta_mode_on_non_delta_columns():
|
||||
app = app_with_results()
|
||||
app.rtable.delta_values = True
|
||||
# "bar bleh" != "foo bar", flag on
|
||||
assert app.rtable[1].is_cell_delta('name')
|
||||
assert app.rtable[1].is_cell_delta("name")
|
||||
# "ibabtu" row, but it's a ref, flag off
|
||||
assert not app.rtable[3].is_cell_delta('name')
|
||||
assert not app.rtable[3].is_cell_delta("name")
|
||||
# "ibabtu" == "ibabtu", flag off
|
||||
assert not app.rtable[4].is_cell_delta('name')
|
||||
assert not app.rtable[4].is_cell_delta("name")
|
||||
|
||||
|
||||
def test_delta_flags_delta_mode_on_non_delta_columns_case_insensitive():
|
||||
# Comparison that occurs for non-numeric columns to check whether they're delta is case
|
||||
@ -53,4 +58,4 @@ def test_delta_flags_delta_mode_on_non_delta_columns_case_insensitive():
|
||||
app.app.results.groups[1].dupes[0].name = "IBaBTU"
|
||||
app.rtable.delta_values = True
|
||||
# "ibAbtu" == "IBaBTU", flag off
|
||||
assert not app.rtable[4].is_cell_delta('name')
|
||||
assert not app.rtable[4].is_cell_delta("name")
|
||||
|
@ -17,6 +17,7 @@ from .. import engine
|
||||
from .base import NamedObject, GetTestGroups, DupeGuru
|
||||
from ..results import Results
|
||||
|
||||
|
||||
class TestCaseResultsEmpty:
|
||||
def setup_method(self, method):
|
||||
self.app = DupeGuru()
|
||||
@ -24,8 +25,8 @@ class TestCaseResultsEmpty:
|
||||
|
||||
def test_apply_invalid_filter(self):
|
||||
# If the applied filter is an invalid regexp, just ignore the filter.
|
||||
self.results.apply_filter('[') # invalid
|
||||
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
|
||||
self.results.apply_filter("[") # invalid
|
||||
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
|
||||
|
||||
def test_stat_line(self):
|
||||
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.", self.results.stat_line)
|
||||
@ -34,7 +35,7 @@ class TestCaseResultsEmpty:
|
||||
eq_(0, len(self.results.groups))
|
||||
|
||||
def test_get_group_of_duplicate(self):
|
||||
assert self.results.get_group_of_duplicate('foo') is None
|
||||
assert self.results.get_group_of_duplicate("foo") is None
|
||||
|
||||
def test_save_to_xml(self):
|
||||
f = io.BytesIO()
|
||||
@ -42,7 +43,7 @@ class TestCaseResultsEmpty:
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
eq_('results', root.tag)
|
||||
eq_("results", root.tag)
|
||||
|
||||
def test_is_modified(self):
|
||||
assert not self.results.is_modified
|
||||
@ -59,10 +60,10 @@ class TestCaseResultsEmpty:
|
||||
# would have been some kind of feedback to the user, but the work involved for something
|
||||
# that simply never happens (I never received a report of this crash, I experienced it
|
||||
# while fooling around) is too much. Instead, use standard name conflict resolution.
|
||||
folderpath = tmpdir.join('foo')
|
||||
folderpath = tmpdir.join("foo")
|
||||
folderpath.mkdir()
|
||||
self.results.save_to_xml(str(folderpath)) # no crash
|
||||
assert tmpdir.join('[000] foo').check()
|
||||
self.results.save_to_xml(str(folderpath)) # no crash
|
||||
assert tmpdir.join("[000] foo").check()
|
||||
|
||||
|
||||
class TestCaseResultsWithSomeGroups:
|
||||
@ -116,18 +117,22 @@ class TestCaseResultsWithSomeGroups:
|
||||
assert d is g.ref
|
||||
|
||||
def test_sort_groups(self):
|
||||
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
||||
self.results.make_ref(
|
||||
self.objects[1]
|
||||
) # We want to make the 1024 sized object to go ref.
|
||||
g1, g2 = self.groups
|
||||
self.results.sort_groups('size')
|
||||
self.results.sort_groups("size")
|
||||
assert self.results.groups[0] is g2
|
||||
assert self.results.groups[1] is g1
|
||||
self.results.sort_groups('size', False)
|
||||
self.results.sort_groups("size", False)
|
||||
assert self.results.groups[0] is g1
|
||||
assert self.results.groups[1] is g2
|
||||
|
||||
def test_set_groups_when_sorted(self):
|
||||
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
||||
self.results.sort_groups('size')
|
||||
self.results.make_ref(
|
||||
self.objects[1]
|
||||
) # We want to make the 1024 sized object to go ref.
|
||||
self.results.sort_groups("size")
|
||||
objects, matches, groups = GetTestGroups()
|
||||
g1, g2 = groups
|
||||
g1.switch_ref(objects[1])
|
||||
@ -158,9 +163,9 @@ class TestCaseResultsWithSomeGroups:
|
||||
o3.size = 3
|
||||
o4.size = 2
|
||||
o5.size = 1
|
||||
self.results.sort_dupes('size')
|
||||
self.results.sort_dupes("size")
|
||||
eq_([o5, o3, o2], self.results.dupes)
|
||||
self.results.sort_dupes('size', False)
|
||||
self.results.sort_dupes("size", False)
|
||||
eq_([o2, o3, o5], self.results.dupes)
|
||||
|
||||
def test_dupe_list_remember_sort(self):
|
||||
@ -170,25 +175,25 @@ class TestCaseResultsWithSomeGroups:
|
||||
o3.size = 3
|
||||
o4.size = 2
|
||||
o5.size = 1
|
||||
self.results.sort_dupes('size')
|
||||
self.results.sort_dupes("size")
|
||||
self.results.make_ref(o2)
|
||||
eq_([o5, o3, o1], self.results.dupes)
|
||||
|
||||
def test_dupe_list_sort_delta_values(self):
|
||||
o1, o2, o3, o4, o5 = self.objects
|
||||
o1.size = 10
|
||||
o2.size = 2 #-8
|
||||
o3.size = 3 #-7
|
||||
o2.size = 2 # -8
|
||||
o3.size = 3 # -7
|
||||
o4.size = 20
|
||||
o5.size = 1 #-19
|
||||
self.results.sort_dupes('size', delta=True)
|
||||
o5.size = 1 # -19
|
||||
self.results.sort_dupes("size", delta=True)
|
||||
eq_([o5, o2, o3], self.results.dupes)
|
||||
|
||||
def test_sort_empty_list(self):
|
||||
#There was an infinite loop when sorting an empty list.
|
||||
# There was an infinite loop when sorting an empty list.
|
||||
app = DupeGuru()
|
||||
r = app.results
|
||||
r.sort_dupes('name')
|
||||
r.sort_dupes("name")
|
||||
eq_([], r.dupes)
|
||||
|
||||
def test_dupe_list_update_on_remove_duplicates(self):
|
||||
@ -209,7 +214,7 @@ class TestCaseResultsWithSomeGroups:
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
assert not self.results.is_modified
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
f.seek(0)
|
||||
self.results.load_from_xml(f, get_file)
|
||||
assert not self.results.is_modified
|
||||
@ -236,7 +241,7 @@ class TestCaseResultsWithSomeGroups:
|
||||
# "aaa" makes our dupe go first in alphabetical order, but since we have the same value as
|
||||
# ref, we're going last.
|
||||
g2r.name = g2d1.name = "aaa"
|
||||
self.results.sort_dupes('name', delta=True)
|
||||
self.results.sort_dupes("name", delta=True)
|
||||
eq_("aaa", self.results.dupes[2].name)
|
||||
|
||||
def test_dupe_list_sort_delta_values_nonnumeric_case_insensitive(self):
|
||||
@ -244,9 +249,10 @@ class TestCaseResultsWithSomeGroups:
|
||||
g1r, g1d1, g1d2, g2r, g2d1 = self.objects
|
||||
g2r.name = "AaA"
|
||||
g2d1.name = "aAa"
|
||||
self.results.sort_dupes('name', delta=True)
|
||||
self.results.sort_dupes("name", delta=True)
|
||||
eq_("aAa", self.results.dupes[2].name)
|
||||
|
||||
|
||||
class TestCaseResultsWithSavedResults:
|
||||
def setup_method(self, method):
|
||||
self.app = DupeGuru()
|
||||
@ -266,7 +272,7 @@ class TestCaseResultsWithSavedResults:
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
self.results.load_from_xml(self.f, get_file)
|
||||
assert not self.results.is_modified
|
||||
|
||||
@ -299,7 +305,7 @@ class TestCaseResultsMarkings:
|
||||
self.results.mark(self.objects[2])
|
||||
self.results.mark(self.objects[4])
|
||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||
self.results.mark(self.objects[0]) #this is a ref, it can't be counted
|
||||
self.results.mark(self.objects[0]) # this is a ref, it can't be counted
|
||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||
self.results.groups = self.groups
|
||||
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||
@ -335,7 +341,7 @@ class TestCaseResultsMarkings:
|
||||
def log_object(o):
|
||||
log.append(o)
|
||||
if o is self.objects[1]:
|
||||
raise EnvironmentError('foobar')
|
||||
raise EnvironmentError("foobar")
|
||||
|
||||
log = []
|
||||
self.results.mark_all()
|
||||
@ -350,7 +356,7 @@ class TestCaseResultsMarkings:
|
||||
eq_(len(self.results.problems), 1)
|
||||
dupe, msg = self.results.problems[0]
|
||||
assert dupe is self.objects[1]
|
||||
eq_(msg, 'foobar')
|
||||
eq_(msg, "foobar")
|
||||
|
||||
def test_perform_on_marked_with_ref(self):
|
||||
def log_object(o):
|
||||
@ -408,20 +414,20 @@ class TestCaseResultsMarkings:
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
g1, g2 = root.getiterator('group')
|
||||
d1, d2, d3 = g1.getiterator('file')
|
||||
eq_('n', d1.get('marked'))
|
||||
eq_('n', d2.get('marked'))
|
||||
eq_('y', d3.get('marked'))
|
||||
d1, d2 = g2.getiterator('file')
|
||||
eq_('n', d1.get('marked'))
|
||||
eq_('y', d2.get('marked'))
|
||||
g1, g2 = root.getiterator("group")
|
||||
d1, d2, d3 = g1.getiterator("file")
|
||||
eq_("n", d1.get("marked"))
|
||||
eq_("n", d2.get("marked"))
|
||||
eq_("y", d3.get("marked"))
|
||||
d1, d2 = g2.getiterator("file")
|
||||
eq_("n", d1.get("marked"))
|
||||
eq_("y", d2.get("marked"))
|
||||
|
||||
def test_LoadXML(self):
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
self.results.mark(self.objects[1])
|
||||
self.results.mark_invert()
|
||||
f = io.BytesIO()
|
||||
@ -444,51 +450,51 @@ class TestCaseResultsXML:
|
||||
self.objects, self.matches, self.groups = GetTestGroups()
|
||||
self.results.groups = self.groups
|
||||
|
||||
def get_file(self, path): # use this as a callback for load_from_xml
|
||||
def get_file(self, path): # use this as a callback for load_from_xml
|
||||
return [o for o in self.objects if o.path == path][0]
|
||||
|
||||
def test_save_to_xml(self):
|
||||
self.objects[0].is_ref = True
|
||||
self.objects[0].words = [['foo', 'bar']]
|
||||
self.objects[0].words = [["foo", "bar"]]
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
eq_('results', root.tag)
|
||||
eq_("results", root.tag)
|
||||
eq_(2, len(root))
|
||||
eq_(2, len([c for c in root if c.tag == 'group']))
|
||||
eq_(2, len([c for c in root if c.tag == "group"]))
|
||||
g1, g2 = root
|
||||
eq_(6, len(g1))
|
||||
eq_(3, len([c for c in g1 if c.tag == 'file']))
|
||||
eq_(3, len([c for c in g1 if c.tag == 'match']))
|
||||
d1, d2, d3 = [c for c in g1 if c.tag == 'file']
|
||||
eq_(op.join('basepath', 'foo bar'), d1.get('path'))
|
||||
eq_(op.join('basepath', 'bar bleh'), d2.get('path'))
|
||||
eq_(op.join('basepath', 'foo bleh'), d3.get('path'))
|
||||
eq_('y', d1.get('is_ref'))
|
||||
eq_('n', d2.get('is_ref'))
|
||||
eq_('n', d3.get('is_ref'))
|
||||
eq_('foo,bar', d1.get('words'))
|
||||
eq_('bar,bleh', d2.get('words'))
|
||||
eq_('foo,bleh', d3.get('words'))
|
||||
eq_(3, len([c for c in g1 if c.tag == "file"]))
|
||||
eq_(3, len([c for c in g1 if c.tag == "match"]))
|
||||
d1, d2, d3 = [c for c in g1 if c.tag == "file"]
|
||||
eq_(op.join("basepath", "foo bar"), d1.get("path"))
|
||||
eq_(op.join("basepath", "bar bleh"), d2.get("path"))
|
||||
eq_(op.join("basepath", "foo bleh"), d3.get("path"))
|
||||
eq_("y", d1.get("is_ref"))
|
||||
eq_("n", d2.get("is_ref"))
|
||||
eq_("n", d3.get("is_ref"))
|
||||
eq_("foo,bar", d1.get("words"))
|
||||
eq_("bar,bleh", d2.get("words"))
|
||||
eq_("foo,bleh", d3.get("words"))
|
||||
eq_(3, len(g2))
|
||||
eq_(2, len([c for c in g2 if c.tag == 'file']))
|
||||
eq_(1, len([c for c in g2 if c.tag == 'match']))
|
||||
d1, d2 = [c for c in g2 if c.tag == 'file']
|
||||
eq_(op.join('basepath', 'ibabtu'), d1.get('path'))
|
||||
eq_(op.join('basepath', 'ibabtu'), d2.get('path'))
|
||||
eq_('n', d1.get('is_ref'))
|
||||
eq_('n', d2.get('is_ref'))
|
||||
eq_('ibabtu', d1.get('words'))
|
||||
eq_('ibabtu', d2.get('words'))
|
||||
eq_(2, len([c for c in g2 if c.tag == "file"]))
|
||||
eq_(1, len([c for c in g2 if c.tag == "match"]))
|
||||
d1, d2 = [c for c in g2 if c.tag == "file"]
|
||||
eq_(op.join("basepath", "ibabtu"), d1.get("path"))
|
||||
eq_(op.join("basepath", "ibabtu"), d2.get("path"))
|
||||
eq_("n", d1.get("is_ref"))
|
||||
eq_("n", d2.get("is_ref"))
|
||||
eq_("ibabtu", d1.get("words"))
|
||||
eq_("ibabtu", d2.get("words"))
|
||||
|
||||
def test_LoadXML(self):
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.objects[0].is_ref = True
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
f.seek(0)
|
||||
@ -504,23 +510,23 @@ class TestCaseResultsXML:
|
||||
assert g1[0] is self.objects[0]
|
||||
assert g1[1] is self.objects[1]
|
||||
assert g1[2] is self.objects[2]
|
||||
eq_(['foo', 'bar'], g1[0].words)
|
||||
eq_(['bar', 'bleh'], g1[1].words)
|
||||
eq_(['foo', 'bleh'], g1[2].words)
|
||||
eq_(["foo", "bar"], g1[0].words)
|
||||
eq_(["bar", "bleh"], g1[1].words)
|
||||
eq_(["foo", "bleh"], g1[2].words)
|
||||
eq_(2, len(g2))
|
||||
assert not g2[0].is_ref
|
||||
assert not g2[1].is_ref
|
||||
assert g2[0] is self.objects[3]
|
||||
assert g2[1] is self.objects[4]
|
||||
eq_(['ibabtu'], g2[0].words)
|
||||
eq_(['ibabtu'], g2[1].words)
|
||||
eq_(["ibabtu"], g2[0].words)
|
||||
eq_(["ibabtu"], g2[1].words)
|
||||
|
||||
def test_LoadXML_with_filename(self, tmpdir):
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
filename = str(tmpdir.join('dupeguru_results.xml'))
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
filename = str(tmpdir.join("dupeguru_results.xml"))
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
self.results.save_to_xml(filename)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
@ -529,11 +535,11 @@ class TestCaseResultsXML:
|
||||
|
||||
def test_LoadXML_with_some_files_that_dont_exist_anymore(self):
|
||||
def get_file(path):
|
||||
if path.endswith('ibabtu 2'):
|
||||
if path.endswith("ibabtu 2"):
|
||||
return None
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
f.seek(0)
|
||||
@ -547,36 +553,36 @@ class TestCaseResultsXML:
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
root = ET.Element('foobar') #The root element shouldn't matter, really.
|
||||
group_node = ET.SubElement(root, 'group')
|
||||
dupe_node = ET.SubElement(group_node, 'file') #Perfectly correct file
|
||||
dupe_node.set('path', op.join('basepath', 'foo bar'))
|
||||
dupe_node.set('is_ref', 'y')
|
||||
dupe_node.set('words', 'foo, bar')
|
||||
dupe_node = ET.SubElement(group_node, 'file') #is_ref missing, default to 'n'
|
||||
dupe_node.set('path', op.join('basepath', 'foo bleh'))
|
||||
dupe_node.set('words', 'foo, bleh')
|
||||
dupe_node = ET.SubElement(group_node, 'file') #words are missing, valid.
|
||||
dupe_node.set('path', op.join('basepath', 'bar bleh'))
|
||||
dupe_node = ET.SubElement(group_node, 'file') #path is missing, invalid.
|
||||
dupe_node.set('words', 'foo, bleh')
|
||||
dupe_node = ET.SubElement(group_node, 'foobar') #Invalid element name
|
||||
dupe_node.set('path', op.join('basepath', 'bar bleh'))
|
||||
dupe_node.set('is_ref', 'y')
|
||||
dupe_node.set('words', 'bar, bleh')
|
||||
match_node = ET.SubElement(group_node, 'match') # match pointing to a bad index
|
||||
match_node.set('first', '42')
|
||||
match_node.set('second', '45')
|
||||
match_node = ET.SubElement(group_node, 'match') # match with missing attrs
|
||||
match_node = ET.SubElement(group_node, 'match') # match with non-int values
|
||||
match_node.set('first', 'foo')
|
||||
match_node.set('second', 'bar')
|
||||
match_node.set('percentage', 'baz')
|
||||
group_node = ET.SubElement(root, 'foobar') #invalid group
|
||||
group_node = ET.SubElement(root, 'group') #empty group
|
||||
root = ET.Element("foobar") # The root element shouldn't matter, really.
|
||||
group_node = ET.SubElement(root, "group")
|
||||
dupe_node = ET.SubElement(group_node, "file") # Perfectly correct file
|
||||
dupe_node.set("path", op.join("basepath", "foo bar"))
|
||||
dupe_node.set("is_ref", "y")
|
||||
dupe_node.set("words", "foo, bar")
|
||||
dupe_node = ET.SubElement(group_node, "file") # is_ref missing, default to 'n'
|
||||
dupe_node.set("path", op.join("basepath", "foo bleh"))
|
||||
dupe_node.set("words", "foo, bleh")
|
||||
dupe_node = ET.SubElement(group_node, "file") # words are missing, valid.
|
||||
dupe_node.set("path", op.join("basepath", "bar bleh"))
|
||||
dupe_node = ET.SubElement(group_node, "file") # path is missing, invalid.
|
||||
dupe_node.set("words", "foo, bleh")
|
||||
dupe_node = ET.SubElement(group_node, "foobar") # Invalid element name
|
||||
dupe_node.set("path", op.join("basepath", "bar bleh"))
|
||||
dupe_node.set("is_ref", "y")
|
||||
dupe_node.set("words", "bar, bleh")
|
||||
match_node = ET.SubElement(group_node, "match") # match pointing to a bad index
|
||||
match_node.set("first", "42")
|
||||
match_node.set("second", "45")
|
||||
match_node = ET.SubElement(group_node, "match") # match with missing attrs
|
||||
match_node = ET.SubElement(group_node, "match") # match with non-int values
|
||||
match_node.set("first", "foo")
|
||||
match_node.set("second", "bar")
|
||||
match_node.set("percentage", "baz")
|
||||
group_node = ET.SubElement(root, "foobar") # invalid group
|
||||
group_node = ET.SubElement(root, "group") # empty group
|
||||
f = io.BytesIO()
|
||||
tree = ET.ElementTree(root)
|
||||
tree.write(f, encoding='utf-8')
|
||||
tree.write(f, encoding="utf-8")
|
||||
f.seek(0)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
@ -586,16 +592,18 @@ class TestCaseResultsXML:
|
||||
|
||||
def test_xml_non_ascii(self):
|
||||
def get_file(path):
|
||||
if path == op.join('basepath', '\xe9foo bar'):
|
||||
if path == op.join("basepath", "\xe9foo bar"):
|
||||
return objects[0]
|
||||
if path == op.join('basepath', 'bar bleh'):
|
||||
if path == op.join("basepath", "bar bleh"):
|
||||
return objects[1]
|
||||
|
||||
objects = [NamedObject("\xe9foo bar", True), NamedObject("bar bleh", True)]
|
||||
matches = engine.getmatches(objects) #we should have 5 matches
|
||||
groups = engine.get_groups(matches) #We should have 2 groups
|
||||
matches = engine.getmatches(objects) # we should have 5 matches
|
||||
groups = engine.get_groups(matches) # We should have 2 groups
|
||||
for g in groups:
|
||||
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
|
||||
g.prioritize(
|
||||
lambda x: objects.index(x)
|
||||
) # We want the dupes to be in the same order as the list is
|
||||
app = DupeGuru()
|
||||
results = Results(app)
|
||||
results.groups = groups
|
||||
@ -607,11 +615,11 @@ class TestCaseResultsXML:
|
||||
r.load_from_xml(f, get_file)
|
||||
g = r.groups[0]
|
||||
eq_("\xe9foo bar", g[0].name)
|
||||
eq_(['efoo', 'bar'], g[0].words)
|
||||
eq_(["efoo", "bar"], g[0].words)
|
||||
|
||||
def test_load_invalid_xml(self):
|
||||
f = io.BytesIO()
|
||||
f.write(b'<this is invalid')
|
||||
f.write(b"<this is invalid")
|
||||
f.seek(0)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
@ -623,7 +631,7 @@ class TestCaseResultsXML:
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
with raises(IOError):
|
||||
r.load_from_xml('does_not_exist.xml', None)
|
||||
r.load_from_xml("does_not_exist.xml", None)
|
||||
eq_(0, len(r.groups))
|
||||
|
||||
def test_remember_match_percentage(self):
|
||||
@ -643,12 +651,12 @@ class TestCaseResultsXML:
|
||||
results.load_from_xml(f, self.get_file)
|
||||
group = results.groups[0]
|
||||
d1, d2, d3 = group
|
||||
match = group.get_match_of(d2) #d1 - d2
|
||||
match = group.get_match_of(d2) # d1 - d2
|
||||
eq_(42, match[2])
|
||||
match = group.get_match_of(d3) #d1 - d3
|
||||
match = group.get_match_of(d3) # d1 - d3
|
||||
eq_(43, match[2])
|
||||
group.switch_ref(d2)
|
||||
match = group.get_match_of(d3) #d2 - d3
|
||||
match = group.get_match_of(d3) # d2 - d3
|
||||
eq_(46, match[2])
|
||||
|
||||
def test_save_and_load(self):
|
||||
@ -661,13 +669,13 @@ class TestCaseResultsXML:
|
||||
|
||||
def test_apply_filter_works_on_paths(self):
|
||||
# apply_filter() searches on the whole path, not just on the filename.
|
||||
self.results.apply_filter('basepath')
|
||||
self.results.apply_filter("basepath")
|
||||
eq_(len(self.results.groups), 2)
|
||||
|
||||
def test_save_xml_with_invalid_characters(self):
|
||||
# Don't crash when saving files that have invalid xml characters in their path
|
||||
self.objects[0].name = 'foo\x19'
|
||||
self.results.save_to_xml(io.BytesIO()) # don't crash
|
||||
self.objects[0].name = "foo\x19"
|
||||
self.results.save_to_xml(io.BytesIO()) # don't crash
|
||||
|
||||
|
||||
class TestCaseResultsFilter:
|
||||
@ -676,7 +684,7 @@ class TestCaseResultsFilter:
|
||||
self.results = self.app.results
|
||||
self.objects, self.matches, self.groups = GetTestGroups()
|
||||
self.results.groups = self.groups
|
||||
self.results.apply_filter(r'foo')
|
||||
self.results.apply_filter(r"foo")
|
||||
|
||||
def test_groups(self):
|
||||
eq_(1, len(self.results.groups))
|
||||
@ -694,7 +702,7 @@ class TestCaseResultsFilter:
|
||||
|
||||
def test_dupes_reconstructed_filtered(self):
|
||||
# make_ref resets self.__dupes to None. When it's reconstructed, we want it filtered
|
||||
dupe = self.results.dupes[0] #3rd object
|
||||
dupe = self.results.dupes[0] # 3rd object
|
||||
self.results.make_ref(dupe)
|
||||
eq_(1, len(self.results.dupes))
|
||||
assert self.results.dupes[0] is self.objects[0]
|
||||
@ -702,23 +710,23 @@ class TestCaseResultsFilter:
|
||||
def test_include_ref_dupes_in_filter(self):
|
||||
# When only the ref of a group match the filter, include it in the group
|
||||
self.results.apply_filter(None)
|
||||
self.results.apply_filter(r'foo bar')
|
||||
self.results.apply_filter(r"foo bar")
|
||||
eq_(1, len(self.results.groups))
|
||||
eq_(0, len(self.results.dupes))
|
||||
|
||||
def test_filters_build_on_one_another(self):
|
||||
self.results.apply_filter(r'bar')
|
||||
self.results.apply_filter(r"bar")
|
||||
eq_(1, len(self.results.groups))
|
||||
eq_(0, len(self.results.dupes))
|
||||
|
||||
def test_stat_line(self):
|
||||
expected = '0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo'
|
||||
expected = "0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo"
|
||||
eq_(expected, self.results.stat_line)
|
||||
self.results.apply_filter(r'bar')
|
||||
expected = '0 / 0 (0.00 B / 0.00 B) duplicates marked. filter: foo --> bar'
|
||||
self.results.apply_filter(r"bar")
|
||||
expected = "0 / 0 (0.00 B / 0.00 B) duplicates marked. filter: foo --> bar"
|
||||
eq_(expected, self.results.stat_line)
|
||||
self.results.apply_filter(None)
|
||||
expected = '0 / 3 (0.00 B / 1.01 KB) duplicates marked.'
|
||||
expected = "0 / 3 (0.00 B / 1.01 KB) duplicates marked."
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
def test_mark_count_is_filtered_as_well(self):
|
||||
@ -726,8 +734,8 @@ class TestCaseResultsFilter:
|
||||
# We don't want to perform mark_all() because we want the mark list to contain objects
|
||||
for dupe in self.results.dupes:
|
||||
self.results.mark(dupe)
|
||||
self.results.apply_filter(r'foo')
|
||||
expected = '1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo'
|
||||
self.results.apply_filter(r"foo")
|
||||
expected = "1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo"
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
def test_mark_all_only_affects_filtered_items(self):
|
||||
@ -739,22 +747,22 @@ class TestCaseResultsFilter:
|
||||
|
||||
def test_sort_groups(self):
|
||||
self.results.apply_filter(None)
|
||||
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
|
||||
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
|
||||
g1, g2 = self.groups
|
||||
self.results.apply_filter('a') # Matches both group
|
||||
self.results.sort_groups('size')
|
||||
self.results.apply_filter("a") # Matches both group
|
||||
self.results.sort_groups("size")
|
||||
assert self.results.groups[0] is g2
|
||||
assert self.results.groups[1] is g1
|
||||
self.results.apply_filter(None)
|
||||
assert self.results.groups[0] is g2
|
||||
assert self.results.groups[1] is g1
|
||||
self.results.sort_groups('size', False)
|
||||
self.results.apply_filter('a')
|
||||
self.results.sort_groups("size", False)
|
||||
self.results.apply_filter("a")
|
||||
assert self.results.groups[1] is g2
|
||||
assert self.results.groups[0] is g1
|
||||
|
||||
def test_set_group(self):
|
||||
#We want the new group to be filtered
|
||||
# We want the new group to be filtered
|
||||
self.objects, self.matches, self.groups = GetTestGroups()
|
||||
self.results.groups = self.groups
|
||||
eq_(1, len(self.results.groups))
|
||||
@ -764,12 +772,12 @@ class TestCaseResultsFilter:
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
filename = str(tmpdir.join('dupeguru_results.xml'))
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
filename = str(tmpdir.join("dupeguru_results.xml"))
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
self.results.save_to_xml(filename)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
r.apply_filter('foo')
|
||||
r.apply_filter("foo")
|
||||
r.load_from_xml(filename, get_file)
|
||||
eq_(2, len(r.groups))
|
||||
|
||||
@ -778,7 +786,7 @@ class TestCaseResultsFilter:
|
||||
self.results.apply_filter(None)
|
||||
eq_(2, len(self.results.groups))
|
||||
eq_(2, len(self.results.dupes))
|
||||
self.results.apply_filter('ibabtu')
|
||||
self.results.apply_filter("ibabtu")
|
||||
self.results.remove_duplicates([self.results.dupes[0]])
|
||||
self.results.apply_filter(None)
|
||||
eq_(1, len(self.results.groups))
|
||||
@ -786,7 +794,7 @@ class TestCaseResultsFilter:
|
||||
|
||||
def test_filter_is_case_insensitive(self):
|
||||
self.results.apply_filter(None)
|
||||
self.results.apply_filter('FOO')
|
||||
self.results.apply_filter("FOO")
|
||||
eq_(1, len(self.results.dupes))
|
||||
|
||||
def test_make_ref_on_filtered_out_doesnt_mess_stats(self):
|
||||
@ -794,13 +802,15 @@ class TestCaseResultsFilter:
|
||||
# When calling make_ref on such a dupe, the total size and dupecount stats gets messed up
|
||||
# because they are *not* counted in the stats in the first place.
|
||||
g1, g2 = self.groups
|
||||
bar_bleh = g1[1] # The "bar bleh" dupe is filtered out
|
||||
bar_bleh = g1[1] # The "bar bleh" dupe is filtered out
|
||||
self.results.make_ref(bar_bleh)
|
||||
# Now the stats should display *2* markable dupes (instead of 1)
|
||||
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked. filter: foo'
|
||||
expected = "0 / 2 (0.00 B / 2.00 B) duplicates marked. filter: foo"
|
||||
eq_(expected, self.results.stat_line)
|
||||
self.results.apply_filter(None) # Now let's make sure our unfiltered results aren't fucked up
|
||||
expected = '0 / 3 (0.00 B / 3.00 B) duplicates marked.'
|
||||
self.results.apply_filter(
|
||||
None
|
||||
) # Now let's make sure our unfiltered results aren't fucked up
|
||||
expected = "0 / 3 (0.00 B / 3.00 B) duplicates marked."
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
|
||||
@ -814,6 +824,5 @@ class TestCaseResultsRefFile:
|
||||
self.results.groups = self.groups
|
||||
|
||||
def test_stat_line(self):
|
||||
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked.'
|
||||
expected = "0 / 2 (0.00 B / 2.00 B) duplicates marked."
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
|
@ -14,6 +14,7 @@ from ..ignore import IgnoreList
|
||||
from ..scanner import Scanner, ScanType
|
||||
from ..me.scanner import ScannerME
|
||||
|
||||
|
||||
class NamedObject:
|
||||
def __init__(self, name="foobar", size=1, path=None):
|
||||
if path is None:
|
||||
@ -26,22 +27,25 @@ class NamedObject:
|
||||
self.words = getwords(name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<NamedObject %r %r>' % (self.name, self.path)
|
||||
return "<NamedObject %r %r>" % (self.name, self.path)
|
||||
|
||||
|
||||
no = NamedObject
|
||||
|
||||
|
||||
def pytest_funcarg__fake_fileexists(request):
|
||||
# This is a hack to avoid invalidating all previous tests since the scanner started to test
|
||||
# for file existence before doing the match grouping.
|
||||
monkeypatch = request.getfuncargvalue('monkeypatch')
|
||||
monkeypatch.setattr(Path, 'exists', lambda _: True)
|
||||
monkeypatch = request.getfuncargvalue("monkeypatch")
|
||||
monkeypatch.setattr(Path, "exists", lambda _: True)
|
||||
|
||||
|
||||
def test_empty(fake_fileexists):
|
||||
s = Scanner()
|
||||
r = s.get_dupe_groups([])
|
||||
eq_(r, [])
|
||||
|
||||
|
||||
def test_default_settings(fake_fileexists):
|
||||
s = Scanner()
|
||||
eq_(s.min_match_percentage, 80)
|
||||
@ -50,40 +54,54 @@ def test_default_settings(fake_fileexists):
|
||||
eq_(s.word_weighting, False)
|
||||
eq_(s.match_similar_words, False)
|
||||
|
||||
|
||||
def test_simple_with_default_settings(fake_fileexists):
|
||||
s = Scanner()
|
||||
f = [no('foo bar', path='p1'), no('foo bar', path='p2'), no('foo bleh')]
|
||||
f = [no("foo bar", path="p1"), no("foo bar", path="p2"), no("foo bleh")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
g = r[0]
|
||||
#'foo bleh' cannot be in the group because the default min match % is 80
|
||||
# 'foo bleh' cannot be in the group because the default min match % is 80
|
||||
eq_(len(g), 2)
|
||||
assert g.ref in f[:2]
|
||||
assert g.dupes[0] in f[:2]
|
||||
|
||||
|
||||
def test_simple_with_lower_min_match(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.min_match_percentage = 50
|
||||
f = [no('foo bar', path='p1'), no('foo bar', path='p2'), no('foo bleh')]
|
||||
f = [no("foo bar", path="p1"), no("foo bar", path="p2"), no("foo bleh")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
g = r[0]
|
||||
eq_(len(g), 3)
|
||||
|
||||
|
||||
def test_trim_all_ref_groups(fake_fileexists):
|
||||
# When all files of a group are ref, don't include that group in the results, but also don't
|
||||
# count the files from that group as discarded.
|
||||
s = Scanner()
|
||||
f = [no('foo', path='p1'), no('foo', path='p2'), no('bar', path='p1'), no('bar', path='p2')]
|
||||
f = [
|
||||
no("foo", path="p1"),
|
||||
no("foo", path="p2"),
|
||||
no("bar", path="p1"),
|
||||
no("bar", path="p2"),
|
||||
]
|
||||
f[2].is_ref = True
|
||||
f[3].is_ref = True
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
eq_(s.discarded_file_count, 0)
|
||||
|
||||
|
||||
def test_priorize(fake_fileexists):
|
||||
s = Scanner()
|
||||
f = [no('foo', path='p1'), no('foo', path='p2'), no('bar', path='p1'), no('bar', path='p2')]
|
||||
f = [
|
||||
no("foo", path="p1"),
|
||||
no("foo", path="p2"),
|
||||
no("bar", path="p1"),
|
||||
no("bar", path="p2"),
|
||||
]
|
||||
f[1].size = 2
|
||||
f[2].size = 3
|
||||
f[3].is_ref = True
|
||||
@ -94,17 +112,19 @@ def test_priorize(fake_fileexists):
|
||||
assert f[3] in (g1.ref, g2.ref)
|
||||
assert f[2] in (g1.dupes[0], g2.dupes[0])
|
||||
|
||||
|
||||
def test_content_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [no('foo'), no('bar'), no('bleh')]
|
||||
f[0].md5 = f[0].md5partial = 'foobar'
|
||||
f[1].md5 = f[1].md5partial = 'foobar'
|
||||
f[2].md5 = f[2].md5partial = 'bleh'
|
||||
f = [no("foo"), no("bar"), no("bleh")]
|
||||
f[0].md5 = f[0].md5partial = "foobar"
|
||||
f[1].md5 = f[1].md5partial = "foobar"
|
||||
f[2].md5 = f[2].md5partial = "bleh"
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
eq_(len(r[0]), 2)
|
||||
eq_(s.discarded_file_count, 0) # don't count the different md5 as discarded!
|
||||
eq_(s.discarded_file_count, 0) # don't count the different md5 as discarded!
|
||||
|
||||
|
||||
def test_content_scan_compare_sizes_first(fake_fileexists):
|
||||
class MyFile(no):
|
||||
@ -114,16 +134,17 @@ def test_content_scan_compare_sizes_first(fake_fileexists):
|
||||
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [MyFile('foo', 1), MyFile('bar', 2)]
|
||||
f = [MyFile("foo", 1), MyFile("bar", 2)]
|
||||
eq_(len(s.get_dupe_groups(f)), 0)
|
||||
|
||||
|
||||
def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [no('foo'), no('bar'), no('bleh')]
|
||||
f[0].md5 = f[0].md5partial = 'foobar'
|
||||
f[1].md5 = f[1].md5partial = 'foobar'
|
||||
f[2].md5 = f[2].md5partial = 'bleh'
|
||||
f = [no("foo"), no("bar"), no("bleh")]
|
||||
f[0].md5 = f[0].md5partial = "foobar"
|
||||
f[1].md5 = f[1].md5partial = "foobar"
|
||||
f[2].md5 = f[2].md5partial = "bleh"
|
||||
s.min_match_percentage = 101
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
@ -133,157 +154,180 @@ def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
|
||||
eq_(len(r), 1)
|
||||
eq_(len(r[0]), 2)
|
||||
|
||||
|
||||
def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
f = [no('foo'), no('bar')]
|
||||
f[0].md5 = f[0].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
|
||||
f[1].md5 = f[1].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
|
||||
f = [no("foo"), no("bar")]
|
||||
f[0].md5 = f[
|
||||
0
|
||||
].md5partial = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
f[1].md5 = f[
|
||||
1
|
||||
].md5partial = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
r = s.get_dupe_groups(f)
|
||||
r[0]
|
||||
|
||||
|
||||
def test_extension_is_not_counted_in_filename_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.min_match_percentage = 100
|
||||
f = [no('foo.bar'), no('foo.bleh')]
|
||||
f = [no("foo.bar"), no("foo.bleh")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
eq_(len(r[0]), 2)
|
||||
|
||||
|
||||
def test_job(fake_fileexists):
|
||||
def do_progress(progress, desc=''):
|
||||
def do_progress(progress, desc=""):
|
||||
log.append(progress)
|
||||
return True
|
||||
|
||||
s = Scanner()
|
||||
log = []
|
||||
f = [no('foo bar'), no('foo bar'), no('foo bleh')]
|
||||
f = [no("foo bar"), no("foo bar"), no("foo bleh")]
|
||||
s.get_dupe_groups(f, j=job.Job(1, do_progress))
|
||||
eq_(log[0], 0)
|
||||
eq_(log[-1], 100)
|
||||
|
||||
|
||||
def test_mix_file_kind(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.mix_file_kind = False
|
||||
f = [no('foo.1'), no('foo.2')]
|
||||
f = [no("foo.1"), no("foo.2")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 0)
|
||||
|
||||
|
||||
def test_word_weighting(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.min_match_percentage = 75
|
||||
s.word_weighting = True
|
||||
f = [no('foo bar'), no('foo bar bleh')]
|
||||
f = [no("foo bar"), no("foo bar bleh")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
g = r[0]
|
||||
m = g.get_match_of(g.dupes[0])
|
||||
eq_(m.percentage, 75) # 16 letters, 12 matching
|
||||
eq_(m.percentage, 75) # 16 letters, 12 matching
|
||||
|
||||
|
||||
def test_similar_words(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.match_similar_words = True
|
||||
f = [no('The White Stripes'), no('The Whites Stripe'), no('Limp Bizkit'), no('Limp Bizkitt')]
|
||||
f = [
|
||||
no("The White Stripes"),
|
||||
no("The Whites Stripe"),
|
||||
no("Limp Bizkit"),
|
||||
no("Limp Bizkitt"),
|
||||
]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 2)
|
||||
|
||||
|
||||
def test_fields(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Fields
|
||||
f = [no('The White Stripes - Little Ghost'), no('The White Stripes - Little Acorn')]
|
||||
f = [no("The White Stripes - Little Ghost"), no("The White Stripes - Little Acorn")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 0)
|
||||
|
||||
|
||||
def test_fields_no_order(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.FieldsNoOrder
|
||||
f = [no('The White Stripes - Little Ghost'), no('Little Ghost - The White Stripes')]
|
||||
f = [no("The White Stripes - Little Ghost"), no("Little Ghost - The White Stripes")]
|
||||
r = s.get_dupe_groups(f)
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_tag_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
o1.artist = 'The White Stripes'
|
||||
o1.title = 'The Air Near My Fingers'
|
||||
o2.artist = 'The White Stripes'
|
||||
o2.title = 'The Air Near My Fingers'
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.artist = "The White Stripes"
|
||||
o1.title = "The Air Near My Fingers"
|
||||
o2.artist = "The White Stripes"
|
||||
o2.title = "The Air Near My Fingers"
|
||||
r = s.get_dupe_groups([o1, o2])
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_tag_with_album_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
s.scanned_tags = set(['artist', 'album', 'title'])
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
o3 = no('bleh')
|
||||
o1.artist = 'The White Stripes'
|
||||
o1.title = 'The Air Near My Fingers'
|
||||
o1.album = 'Elephant'
|
||||
o2.artist = 'The White Stripes'
|
||||
o2.title = 'The Air Near My Fingers'
|
||||
o2.album = 'Elephant'
|
||||
o3.artist = 'The White Stripes'
|
||||
o3.title = 'The Air Near My Fingers'
|
||||
o3.album = 'foobar'
|
||||
s.scanned_tags = set(["artist", "album", "title"])
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o3 = no("bleh")
|
||||
o1.artist = "The White Stripes"
|
||||
o1.title = "The Air Near My Fingers"
|
||||
o1.album = "Elephant"
|
||||
o2.artist = "The White Stripes"
|
||||
o2.title = "The Air Near My Fingers"
|
||||
o2.album = "Elephant"
|
||||
o3.artist = "The White Stripes"
|
||||
o3.title = "The Air Near My Fingers"
|
||||
o3.album = "foobar"
|
||||
r = s.get_dupe_groups([o1, o2, o3])
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
s.scanned_tags = set(['artist', 'album', 'title'])
|
||||
s.scanned_tags = set(["artist", "album", "title"])
|
||||
s.min_match_percentage = 50
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
o1.artist = 'The White Stripes - a'
|
||||
o1.title = 'The Air Near My Fingers - a'
|
||||
o1.album = 'Elephant - a'
|
||||
o2.artist = 'The White Stripes - b'
|
||||
o2.title = 'The Air Near My Fingers - b'
|
||||
o2.album = 'Elephant - b'
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.artist = "The White Stripes - a"
|
||||
o1.title = "The Air Near My Fingers - a"
|
||||
o1.album = "Elephant - a"
|
||||
o2.artist = "The White Stripes - b"
|
||||
o2.title = "The Air Near My Fingers - b"
|
||||
o2.album = "Elephant - b"
|
||||
r = s.get_dupe_groups([o1, o2])
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_tag_scan_with_different_scanned(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
s.scanned_tags = set(['track', 'year'])
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
o1.artist = 'The White Stripes'
|
||||
o1.title = 'some title'
|
||||
o1.track = 'foo'
|
||||
o1.year = 'bar'
|
||||
o2.artist = 'The White Stripes'
|
||||
o2.title = 'another title'
|
||||
o2.track = 'foo'
|
||||
o2.year = 'bar'
|
||||
s.scanned_tags = set(["track", "year"])
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.artist = "The White Stripes"
|
||||
o1.title = "some title"
|
||||
o1.track = "foo"
|
||||
o1.year = "bar"
|
||||
o2.artist = "The White Stripes"
|
||||
o2.title = "another title"
|
||||
o2.track = "foo"
|
||||
o2.year = "bar"
|
||||
r = s.get_dupe_groups([o1, o2])
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_tag_scan_only_scans_existing_tags(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
s.scanned_tags = set(['artist', 'foo'])
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
o1.artist = 'The White Stripes'
|
||||
o1.foo = 'foo'
|
||||
o2.artist = 'The White Stripes'
|
||||
o2.foo = 'bar'
|
||||
s.scanned_tags = set(["artist", "foo"])
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.artist = "The White Stripes"
|
||||
o1.foo = "foo"
|
||||
o2.artist = "The White Stripes"
|
||||
o2.foo = "bar"
|
||||
r = s.get_dupe_groups([o1, o2])
|
||||
eq_(len(r), 1) # Because 'foo' is not scanned, they match
|
||||
eq_(len(r), 1) # Because 'foo' is not scanned, they match
|
||||
|
||||
|
||||
def test_tag_scan_converts_to_str(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
s.scanned_tags = set(['track'])
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
s.scanned_tags = set(["track"])
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.track = 42
|
||||
o2.track = 42
|
||||
try:
|
||||
@ -292,28 +336,30 @@ def test_tag_scan_converts_to_str(fake_fileexists):
|
||||
raise AssertionError()
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_tag_scan_non_ascii(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Tag
|
||||
s.scanned_tags = set(['title'])
|
||||
o1 = no('foo')
|
||||
o2 = no('bar')
|
||||
o1.title = 'foobar\u00e9'
|
||||
o2.title = 'foobar\u00e9'
|
||||
s.scanned_tags = set(["title"])
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.title = "foobar\u00e9"
|
||||
o2.title = "foobar\u00e9"
|
||||
try:
|
||||
r = s.get_dupe_groups([o1, o2])
|
||||
except UnicodeEncodeError:
|
||||
raise AssertionError()
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_ignore_list(fake_fileexists):
|
||||
s = Scanner()
|
||||
f1 = no('foobar')
|
||||
f2 = no('foobar')
|
||||
f3 = no('foobar')
|
||||
f1.path = Path('dir1/foobar')
|
||||
f2.path = Path('dir2/foobar')
|
||||
f3.path = Path('dir3/foobar')
|
||||
f1 = no("foobar")
|
||||
f2 = no("foobar")
|
||||
f3 = no("foobar")
|
||||
f1.path = Path("dir1/foobar")
|
||||
f2.path = Path("dir2/foobar")
|
||||
f3.path = Path("dir3/foobar")
|
||||
ignore_list = IgnoreList()
|
||||
ignore_list.Ignore(str(f1.path), str(f2.path))
|
||||
ignore_list.Ignore(str(f1.path), str(f3.path))
|
||||
@ -327,16 +373,17 @@ def test_ignore_list(fake_fileexists):
|
||||
# Ignored matches are not counted as discarded
|
||||
eq_(s.discarded_file_count, 0)
|
||||
|
||||
|
||||
def test_ignore_list_checks_for_unicode(fake_fileexists):
|
||||
#scanner was calling path_str for ignore list checks. Since the Path changes, it must
|
||||
#be unicode(path)
|
||||
# scanner was calling path_str for ignore list checks. Since the Path changes, it must
|
||||
# be unicode(path)
|
||||
s = Scanner()
|
||||
f1 = no('foobar')
|
||||
f2 = no('foobar')
|
||||
f3 = no('foobar')
|
||||
f1.path = Path('foo1\u00e9')
|
||||
f2.path = Path('foo2\u00e9')
|
||||
f3.path = Path('foo3\u00e9')
|
||||
f1 = no("foobar")
|
||||
f2 = no("foobar")
|
||||
f3 = no("foobar")
|
||||
f1.path = Path("foo1\u00e9")
|
||||
f2.path = Path("foo2\u00e9")
|
||||
f3.path = Path("foo3\u00e9")
|
||||
ignore_list = IgnoreList()
|
||||
ignore_list.Ignore(str(f1.path), str(f2.path))
|
||||
ignore_list.Ignore(str(f1.path), str(f3.path))
|
||||
@ -348,6 +395,7 @@ def test_ignore_list_checks_for_unicode(fake_fileexists):
|
||||
assert f2 in g
|
||||
assert f3 in g
|
||||
|
||||
|
||||
def test_file_evaluates_to_false(fake_fileexists):
|
||||
# A very wrong way to use any() was added at some point, causing resulting group list
|
||||
# to be empty.
|
||||
@ -355,19 +403,19 @@ def test_file_evaluates_to_false(fake_fileexists):
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
|
||||
s = Scanner()
|
||||
f1 = FalseNamedObject('foobar', path='p1')
|
||||
f2 = FalseNamedObject('foobar', path='p2')
|
||||
f1 = FalseNamedObject("foobar", path="p1")
|
||||
f2 = FalseNamedObject("foobar", path="p2")
|
||||
r = s.get_dupe_groups([f1, f2])
|
||||
eq_(len(r), 1)
|
||||
|
||||
|
||||
def test_size_threshold(fake_fileexists):
|
||||
# Only file equal or higher than the size_threshold in size are scanned
|
||||
s = Scanner()
|
||||
f1 = no('foo', 1, path='p1')
|
||||
f2 = no('foo', 2, path='p2')
|
||||
f3 = no('foo', 3, path='p3')
|
||||
f1 = no("foo", 1, path="p1")
|
||||
f2 = no("foo", 2, path="p2")
|
||||
f3 = no("foo", 3, path="p3")
|
||||
s.size_threshold = 2
|
||||
groups = s.get_dupe_groups([f1, f2, f3])
|
||||
eq_(len(groups), 1)
|
||||
@ -377,48 +425,52 @@ def test_size_threshold(fake_fileexists):
|
||||
assert f2 in group
|
||||
assert f3 in group
|
||||
|
||||
|
||||
def test_tie_breaker_path_deepness(fake_fileexists):
|
||||
# If there is a tie in prioritization, path deepness is used as a tie breaker
|
||||
s = Scanner()
|
||||
o1, o2 = no('foo'), no('foo')
|
||||
o1.path = Path('foo')
|
||||
o2.path = Path('foo/bar')
|
||||
o1, o2 = no("foo"), no("foo")
|
||||
o1.path = Path("foo")
|
||||
o2.path = Path("foo/bar")
|
||||
[group] = s.get_dupe_groups([o1, o2])
|
||||
assert group.ref is o2
|
||||
|
||||
|
||||
def test_tie_breaker_copy(fake_fileexists):
|
||||
# if copy is in the words used (even if it has a deeper path), it becomes a dupe
|
||||
s = Scanner()
|
||||
o1, o2 = no('foo bar Copy'), no('foo bar')
|
||||
o1.path = Path('deeper/path')
|
||||
o2.path = Path('foo')
|
||||
o1, o2 = no("foo bar Copy"), no("foo bar")
|
||||
o1.path = Path("deeper/path")
|
||||
o2.path = Path("foo")
|
||||
[group] = s.get_dupe_groups([o1, o2])
|
||||
assert group.ref is o2
|
||||
|
||||
|
||||
def test_tie_breaker_same_name_plus_digit(fake_fileexists):
|
||||
# if ref has the same words as dupe, but has some just one extra word which is a digit, it
|
||||
# becomes a dupe
|
||||
s = Scanner()
|
||||
o1 = no('foo bar 42')
|
||||
o2 = no('foo bar [42]')
|
||||
o3 = no('foo bar (42)')
|
||||
o4 = no('foo bar {42}')
|
||||
o5 = no('foo bar')
|
||||
o1 = no("foo bar 42")
|
||||
o2 = no("foo bar [42]")
|
||||
o3 = no("foo bar (42)")
|
||||
o4 = no("foo bar {42}")
|
||||
o5 = no("foo bar")
|
||||
# all numbered names have deeper paths, so they'll end up ref if the digits aren't correctly
|
||||
# used as tie breakers
|
||||
o1.path = Path('deeper/path')
|
||||
o2.path = Path('deeper/path')
|
||||
o3.path = Path('deeper/path')
|
||||
o4.path = Path('deeper/path')
|
||||
o5.path = Path('foo')
|
||||
o1.path = Path("deeper/path")
|
||||
o2.path = Path("deeper/path")
|
||||
o3.path = Path("deeper/path")
|
||||
o4.path = Path("deeper/path")
|
||||
o5.path = Path("foo")
|
||||
[group] = s.get_dupe_groups([o1, o2, o3, o4, o5])
|
||||
assert group.ref is o5
|
||||
|
||||
|
||||
def test_partial_group_match(fake_fileexists):
|
||||
# Count the number of discarded matches (when a file doesn't match all other dupes of the
|
||||
# group) in Scanner.discarded_file_count
|
||||
s = Scanner()
|
||||
o1, o2, o3 = no('a b'), no('a'), no('b')
|
||||
o1, o2, o3 = no("a b"), no("a"), no("b")
|
||||
s.min_match_percentage = 50
|
||||
[group] = s.get_dupe_groups([o1, o2, o3])
|
||||
eq_(len(group), 2)
|
||||
@ -431,6 +483,7 @@ def test_partial_group_match(fake_fileexists):
|
||||
assert o3 in group
|
||||
eq_(s.discarded_file_count, 1)
|
||||
|
||||
|
||||
def test_dont_group_files_that_dont_exist(tmpdir):
|
||||
# when creating groups, check that files exist first. It's possible that these files have
|
||||
# been moved during the scan by the user.
|
||||
@ -439,8 +492,8 @@ def test_dont_group_files_that_dont_exist(tmpdir):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.Contents
|
||||
p = Path(str(tmpdir))
|
||||
p['file1'].open('w').write('foo')
|
||||
p['file2'].open('w').write('foo')
|
||||
p["file1"].open("w").write("foo")
|
||||
p["file2"].open("w").write("foo")
|
||||
file1, file2 = fs.get_files(p)
|
||||
|
||||
def getmatches(*args, **kw):
|
||||
@ -451,6 +504,7 @@ def test_dont_group_files_that_dont_exist(tmpdir):
|
||||
|
||||
assert not s.get_dupe_groups([file1, file2])
|
||||
|
||||
|
||||
def test_folder_scan_exclude_subfolder_matches(fake_fileexists):
|
||||
# when doing a Folders scan type, don't include matches for folders whose parent folder already
|
||||
# match.
|
||||
@ -458,31 +512,33 @@ def test_folder_scan_exclude_subfolder_matches(fake_fileexists):
|
||||
s.scan_type = ScanType.Folders
|
||||
topf1 = no("top folder 1", size=42)
|
||||
topf1.md5 = topf1.md5partial = b"some_md5_1"
|
||||
topf1.path = Path('/topf1')
|
||||
topf1.path = Path("/topf1")
|
||||
topf2 = no("top folder 2", size=42)
|
||||
topf2.md5 = topf2.md5partial = b"some_md5_1"
|
||||
topf2.path = Path('/topf2')
|
||||
topf2.path = Path("/topf2")
|
||||
subf1 = no("sub folder 1", size=41)
|
||||
subf1.md5 = subf1.md5partial = b"some_md5_2"
|
||||
subf1.path = Path('/topf1/sub')
|
||||
subf1.path = Path("/topf1/sub")
|
||||
subf2 = no("sub folder 2", size=41)
|
||||
subf2.md5 = subf2.md5partial = b"some_md5_2"
|
||||
subf2.path = Path('/topf2/sub')
|
||||
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2])), 1) # only top folders
|
||||
subf2.path = Path("/topf2/sub")
|
||||
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2])), 1) # only top folders
|
||||
# however, if another folder matches a subfolder, keep in in the matches
|
||||
otherf = no("other folder", size=41)
|
||||
otherf.md5 = otherf.md5partial = b"some_md5_2"
|
||||
otherf.path = Path('/otherfolder')
|
||||
otherf.path = Path("/otherfolder")
|
||||
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2, otherf])), 2)
|
||||
|
||||
|
||||
def test_ignore_files_with_same_path(fake_fileexists):
|
||||
# It's possible that the scanner is fed with two file instances pointing to the same path. One
|
||||
# of these files has to be ignored
|
||||
s = Scanner()
|
||||
f1 = no('foobar', path='path1/foobar')
|
||||
f2 = no('foobar', path='path1/foobar')
|
||||
f1 = no("foobar", path="path1/foobar")
|
||||
f2 = no("foobar", path="path1/foobar")
|
||||
eq_(s.get_dupe_groups([f1, f2]), [])
|
||||
|
||||
|
||||
def test_dont_count_ref_files_as_discarded(fake_fileexists):
|
||||
# To speed up the scan, we don't bother comparing contents of files that are both ref files.
|
||||
# However, this causes problems in "discarded" counting and we make sure here that we don't
|
||||
@ -492,20 +548,20 @@ def test_dont_count_ref_files_as_discarded(fake_fileexists):
|
||||
o1 = no("foo", path="p1")
|
||||
o2 = no("foo", path="p2")
|
||||
o3 = no("foo", path="p3")
|
||||
o1.md5 = o1.md5partial = 'foobar'
|
||||
o2.md5 = o2.md5partial = 'foobar'
|
||||
o3.md5 = o3.md5partial = 'foobar'
|
||||
o1.md5 = o1.md5partial = "foobar"
|
||||
o2.md5 = o2.md5partial = "foobar"
|
||||
o3.md5 = o3.md5partial = "foobar"
|
||||
o1.is_ref = True
|
||||
o2.is_ref = True
|
||||
eq_(len(s.get_dupe_groups([o1, o2, o3])), 1)
|
||||
eq_(s.discarded_file_count, 0)
|
||||
|
||||
|
||||
def test_priorize_me(fake_fileexists):
|
||||
# in ScannerME, bitrate goes first (right after is_ref) in priorization
|
||||
s = ScannerME()
|
||||
o1, o2 = no('foo', path='p1'), no('foo', path='p2')
|
||||
o1, o2 = no("foo", path="p1"), no("foo", path="p2")
|
||||
o1.bitrate = 1
|
||||
o2.bitrate = 2
|
||||
[group] = s.get_dupe_groups([o1, o2])
|
||||
assert group.ref is o2
|
||||
|
||||
|
26
core/util.py
26
core/util.py
@ -8,35 +8,41 @@ import time
|
||||
|
||||
from hscommon.util import format_time_decimal
|
||||
|
||||
|
||||
def format_timestamp(t, delta):
|
||||
if delta:
|
||||
return format_time_decimal(t)
|
||||
else:
|
||||
if t > 0:
|
||||
return time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(t))
|
||||
return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(t))
|
||||
else:
|
||||
return '---'
|
||||
return "---"
|
||||
|
||||
|
||||
def format_words(w):
|
||||
def do_format(w):
|
||||
if isinstance(w, list):
|
||||
return '(%s)' % ', '.join(do_format(item) for item in w)
|
||||
return "(%s)" % ", ".join(do_format(item) for item in w)
|
||||
else:
|
||||
return w.replace('\n', ' ')
|
||||
return w.replace("\n", " ")
|
||||
|
||||
return ", ".join(do_format(item) for item in w)
|
||||
|
||||
return ', '.join(do_format(item) for item in w)
|
||||
|
||||
def format_perc(p):
|
||||
return "%0.0f" % p
|
||||
|
||||
|
||||
def format_dupe_count(c):
|
||||
return str(c) if c else '---'
|
||||
return str(c) if c else "---"
|
||||
|
||||
|
||||
def cmp_value(dupe, attrname):
|
||||
value = getattr(dupe, attrname, '')
|
||||
value = getattr(dupe, attrname, "")
|
||||
return value.lower() if isinstance(value, str) else value
|
||||
|
||||
def fix_surrogate_encoding(s, encoding='utf-8'):
|
||||
|
||||
def fix_surrogate_encoding(s, encoding="utf-8"):
|
||||
# ref #210. It's possible to end up with file paths that, while correct unicode strings, are
|
||||
# decoded with the 'surrogateescape' option, which make the string unencodable to utf-8. We fix
|
||||
# these strings here by trying to encode them and, if it fails, we do an encode/decode dance
|
||||
@ -49,8 +55,6 @@ def fix_surrogate_encoding(s, encoding='utf-8'):
|
||||
try:
|
||||
s.encode(encoding)
|
||||
except UnicodeEncodeError:
|
||||
return s.encode(encoding, 'replace').decode(encoding)
|
||||
return s.encode(encoding, "replace").decode(encoding)
|
||||
else:
|
||||
return s
|
||||
|
||||
|
||||
|
@ -26,7 +26,8 @@ import modulefinder
|
||||
from setuptools import setup, Extension
|
||||
|
||||
from .plat import ISWINDOWS
|
||||
from .util import modified_after, find_in_path, ensure_folder, delete_files_with_pattern
|
||||
from .util import ensure_folder, delete_files_with_pattern
|
||||
|
||||
|
||||
def print_and_do(cmd):
|
||||
"""Prints ``cmd`` and executes it in the shell.
|
||||
@ -35,6 +36,7 @@ def print_and_do(cmd):
|
||||
p = Popen(cmd, shell=True)
|
||||
return p.wait()
|
||||
|
||||
|
||||
def _perform(src, dst, action, actionname):
|
||||
if not op.lexists(src):
|
||||
print("Copying %s failed: it doesn't exist." % src)
|
||||
@ -44,26 +46,32 @@ def _perform(src, dst, action, actionname):
|
||||
shutil.rmtree(dst)
|
||||
else:
|
||||
os.remove(dst)
|
||||
print('%s %s --> %s' % (actionname, src, dst))
|
||||
print("%s %s --> %s" % (actionname, src, dst))
|
||||
action(src, dst)
|
||||
|
||||
|
||||
def copy_file_or_folder(src, dst):
|
||||
if op.isdir(src):
|
||||
shutil.copytree(src, dst, symlinks=True)
|
||||
else:
|
||||
shutil.copy(src, dst)
|
||||
|
||||
|
||||
def move(src, dst):
|
||||
_perform(src, dst, os.rename, 'Moving')
|
||||
_perform(src, dst, os.rename, "Moving")
|
||||
|
||||
|
||||
def copy(src, dst):
|
||||
_perform(src, dst, copy_file_or_folder, 'Copying')
|
||||
_perform(src, dst, copy_file_or_folder, "Copying")
|
||||
|
||||
|
||||
def symlink(src, dst):
|
||||
_perform(src, dst, os.symlink, 'Symlinking')
|
||||
_perform(src, dst, os.symlink, "Symlinking")
|
||||
|
||||
|
||||
def hardlink(src, dst):
|
||||
_perform(src, dst, os.link, 'Hardlinking')
|
||||
_perform(src, dst, os.link, "Hardlinking")
|
||||
|
||||
|
||||
def _perform_on_all(pattern, dst, action):
|
||||
# pattern is a glob pattern, example "folder/foo*". The file is moved directly in dst, no folder
|
||||
@ -73,12 +81,15 @@ def _perform_on_all(pattern, dst, action):
|
||||
destpath = op.join(dst, op.basename(fn))
|
||||
action(fn, destpath)
|
||||
|
||||
|
||||
def move_all(pattern, dst):
|
||||
_perform_on_all(pattern, dst, move)
|
||||
|
||||
|
||||
def copy_all(pattern, dst):
|
||||
_perform_on_all(pattern, dst, copy)
|
||||
|
||||
|
||||
def ensure_empty_folder(path):
|
||||
"""Make sure that the path exists and that it's an empty folder.
|
||||
"""
|
||||
@ -86,43 +97,54 @@ def ensure_empty_folder(path):
|
||||
shutil.rmtree(path)
|
||||
os.mkdir(path)
|
||||
|
||||
|
||||
def filereplace(filename, outfilename=None, **kwargs):
|
||||
"""Reads `filename`, replaces all {variables} in kwargs, and writes the result to `outfilename`.
|
||||
"""
|
||||
if outfilename is None:
|
||||
outfilename = filename
|
||||
fp = open(filename, 'rt', encoding='utf-8')
|
||||
fp = open(filename, "rt", encoding="utf-8")
|
||||
contents = fp.read()
|
||||
fp.close()
|
||||
# We can't use str.format() because in some files, there might be {} characters that mess with it.
|
||||
for key, item in kwargs.items():
|
||||
contents = contents.replace('{{{}}}'.format(key), item)
|
||||
fp = open(outfilename, 'wt', encoding='utf-8')
|
||||
contents = contents.replace("{{{}}}".format(key), item)
|
||||
fp = open(outfilename, "wt", encoding="utf-8")
|
||||
fp.write(contents)
|
||||
fp.close()
|
||||
|
||||
|
||||
def get_module_version(modulename):
|
||||
mod = importlib.import_module(modulename)
|
||||
return mod.__version__
|
||||
|
||||
|
||||
def setup_package_argparser(parser):
|
||||
parser.add_argument(
|
||||
'--sign', dest='sign_identity',
|
||||
help="Sign app under specified identity before packaging (OS X only)"
|
||||
"--sign",
|
||||
dest="sign_identity",
|
||||
help="Sign app under specified identity before packaging (OS X only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--nosign', action='store_true', dest='nosign',
|
||||
help="Don't sign the packaged app (OS X only)"
|
||||
"--nosign",
|
||||
action="store_true",
|
||||
dest="nosign",
|
||||
help="Don't sign the packaged app (OS X only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--src-pkg', action='store_true', dest='src_pkg',
|
||||
help="Build a tar.gz of the current source."
|
||||
"--src-pkg",
|
||||
action="store_true",
|
||||
dest="src_pkg",
|
||||
help="Build a tar.gz of the current source.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--arch-pkg', action='store_true', dest='arch_pkg',
|
||||
help="Force Arch Linux packaging type, regardless of distro name."
|
||||
"--arch-pkg",
|
||||
action="store_true",
|
||||
dest="arch_pkg",
|
||||
help="Force Arch Linux packaging type, regardless of distro name.",
|
||||
)
|
||||
|
||||
|
||||
# `args` come from an ArgumentParser updated with setup_package_argparser()
|
||||
def package_cocoa_app_in_dmg(app_path, destfolder, args):
|
||||
# Rather than signing our app in XCode during the build phase, we sign it during the package
|
||||
@ -130,7 +152,9 @@ def package_cocoa_app_in_dmg(app_path, destfolder, args):
|
||||
# a valid signature.
|
||||
if args.sign_identity:
|
||||
sign_identity = "Developer ID Application: {}".format(args.sign_identity)
|
||||
result = print_and_do('codesign --force --deep --sign "{}" "{}"'.format(sign_identity, app_path))
|
||||
result = print_and_do(
|
||||
'codesign --force --deep --sign "{}" "{}"'.format(sign_identity, app_path)
|
||||
)
|
||||
if result != 0:
|
||||
print("ERROR: Signing failed. Aborting packaging.")
|
||||
return
|
||||
@ -139,23 +163,31 @@ def package_cocoa_app_in_dmg(app_path, destfolder, args):
|
||||
return
|
||||
build_dmg(app_path, destfolder)
|
||||
|
||||
|
||||
def build_dmg(app_path, destfolder):
|
||||
"""Builds a DMG volume with application at ``app_path`` and puts it in ``dest_path``.
|
||||
|
||||
The name of the resulting DMG volume is determined by the app's name and version.
|
||||
"""
|
||||
print(repr(op.join(app_path, 'Contents', 'Info.plist')))
|
||||
plist = plistlib.readPlist(op.join(app_path, 'Contents', 'Info.plist'))
|
||||
print(repr(op.join(app_path, "Contents", "Info.plist")))
|
||||
plist = plistlib.readPlist(op.join(app_path, "Contents", "Info.plist"))
|
||||
workpath = tempfile.mkdtemp()
|
||||
dmgpath = op.join(workpath, plist['CFBundleName'])
|
||||
dmgpath = op.join(workpath, plist["CFBundleName"])
|
||||
os.mkdir(dmgpath)
|
||||
print_and_do('cp -R "%s" "%s"' % (app_path, dmgpath))
|
||||
print_and_do('ln -s /Applications "%s"' % op.join(dmgpath, 'Applications'))
|
||||
dmgname = '%s_osx_%s.dmg' % (plist['CFBundleName'].lower().replace(' ', '_'), plist['CFBundleVersion'].replace('.', '_'))
|
||||
print('Building %s' % dmgname)
|
||||
print_and_do('ln -s /Applications "%s"' % op.join(dmgpath, "Applications"))
|
||||
dmgname = "%s_osx_%s.dmg" % (
|
||||
plist["CFBundleName"].lower().replace(" ", "_"),
|
||||
plist["CFBundleVersion"].replace(".", "_"),
|
||||
)
|
||||
print("Building %s" % dmgname)
|
||||
# UDBZ = bzip compression. UDZO (zip compression) was used before, but it compresses much less.
|
||||
print_and_do('hdiutil create "%s" -format UDBZ -nocrossdev -srcdir "%s"' % (op.join(destfolder, dmgname), dmgpath))
|
||||
print('Build Complete')
|
||||
print_and_do(
|
||||
'hdiutil create "%s" -format UDBZ -nocrossdev -srcdir "%s"'
|
||||
% (op.join(destfolder, dmgname), dmgpath)
|
||||
)
|
||||
print("Build Complete")
|
||||
|
||||
|
||||
def copy_sysconfig_files_for_embed(destpath):
|
||||
# This normally shouldn't be needed for Python 3.3+.
|
||||
@ -163,24 +195,28 @@ def copy_sysconfig_files_for_embed(destpath):
|
||||
configh = sysconfig.get_config_h_filename()
|
||||
shutil.copy(makefile, destpath)
|
||||
shutil.copy(configh, destpath)
|
||||
with open(op.join(destpath, 'site.py'), 'w') as fp:
|
||||
fp.write("""
|
||||
with open(op.join(destpath, "site.py"), "w") as fp:
|
||||
fp.write(
|
||||
"""
|
||||
import os.path as op
|
||||
from distutils import sysconfig
|
||||
sysconfig.get_makefile_filename = lambda: op.join(op.dirname(__file__), 'Makefile')
|
||||
sysconfig.get_config_h_filename = lambda: op.join(op.dirname(__file__), 'pyconfig.h')
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def add_to_pythonpath(path):
|
||||
"""Adds ``path`` to both ``PYTHONPATH`` env and ``sys.path``.
|
||||
"""
|
||||
abspath = op.abspath(path)
|
||||
pythonpath = os.environ.get('PYTHONPATH', '')
|
||||
pathsep = ';' if ISWINDOWS else ':'
|
||||
pythonpath = os.environ.get("PYTHONPATH", "")
|
||||
pathsep = ";" if ISWINDOWS else ":"
|
||||
pythonpath = pathsep.join([abspath, pythonpath]) if pythonpath else abspath
|
||||
os.environ['PYTHONPATH'] = pythonpath
|
||||
os.environ["PYTHONPATH"] = pythonpath
|
||||
sys.path.insert(1, abspath)
|
||||
|
||||
|
||||
# This is a method to hack around those freakingly tricky data inclusion/exlusion rules
|
||||
# in setuptools. We copy the packages *without data* in a build folder and then build the plugin
|
||||
# from there.
|
||||
@ -195,14 +231,16 @@ def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
|
||||
create_links = False
|
||||
if not extra_ignores:
|
||||
extra_ignores = []
|
||||
ignore = shutil.ignore_patterns('.hg*', 'tests', 'testdata', 'modules', 'docs', 'locale', *extra_ignores)
|
||||
ignore = shutil.ignore_patterns(
|
||||
".hg*", "tests", "testdata", "modules", "docs", "locale", *extra_ignores
|
||||
)
|
||||
for package_name in packages_names:
|
||||
if op.exists(package_name):
|
||||
source_path = package_name
|
||||
else:
|
||||
mod = __import__(package_name)
|
||||
source_path = mod.__file__
|
||||
if mod.__file__.endswith('__init__.py'):
|
||||
if mod.__file__.endswith("__init__.py"):
|
||||
source_path = op.dirname(source_path)
|
||||
dest_name = op.basename(source_path)
|
||||
dest_path = op.join(dest, dest_name)
|
||||
@ -220,58 +258,81 @@ def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
|
||||
else:
|
||||
shutil.copy(source_path, dest_path)
|
||||
|
||||
def copy_qt_plugins(folder_names, dest): # This is only for Windows
|
||||
|
||||
def copy_qt_plugins(folder_names, dest): # This is only for Windows
|
||||
from PyQt5.QtCore import QLibraryInfo
|
||||
|
||||
qt_plugin_dir = QLibraryInfo.location(QLibraryInfo.PluginsPath)
|
||||
|
||||
def ignore(path, names):
|
||||
if path == qt_plugin_dir:
|
||||
return [n for n in names if n not in folder_names]
|
||||
else:
|
||||
return [n for n in names if not n.endswith('.dll')]
|
||||
return [n for n in names if not n.endswith(".dll")]
|
||||
|
||||
shutil.copytree(qt_plugin_dir, dest, ignore=ignore)
|
||||
|
||||
def build_debian_changelog(changelogpath, destfile, pkgname, from_version=None,
|
||||
distribution='precise', fix_version=None):
|
||||
|
||||
def build_debian_changelog(
|
||||
changelogpath,
|
||||
destfile,
|
||||
pkgname,
|
||||
from_version=None,
|
||||
distribution="precise",
|
||||
fix_version=None,
|
||||
):
|
||||
"""Builds a debian changelog out of a YAML changelog.
|
||||
|
||||
Use fix_version to patch the top changelog to that version (if, for example, there was a
|
||||
packaging error and you need to quickly fix it)
|
||||
"""
|
||||
|
||||
def desc2list(desc):
|
||||
# We take each item, enumerated with the '*' character, and transform it into a list.
|
||||
desc = desc.replace('\n', ' ')
|
||||
desc = desc.replace(' ', ' ')
|
||||
result = desc.split('*')
|
||||
desc = desc.replace("\n", " ")
|
||||
desc = desc.replace(" ", " ")
|
||||
result = desc.split("*")
|
||||
return [s.strip() for s in result if s.strip()]
|
||||
|
||||
ENTRY_MODEL = "{pkg} ({version}-1) {distribution}; urgency=low\n\n{changes}\n -- Virgil Dupras <hsoft@hardcoded.net> {date}\n\n"
|
||||
ENTRY_MODEL = (
|
||||
"{pkg} ({version}-1) {distribution}; urgency=low\n\n{changes}\n "
|
||||
"-- Virgil Dupras <hsoft@hardcoded.net> {date}\n\n"
|
||||
)
|
||||
CHANGE_MODEL = " * {description}\n"
|
||||
changelogs = read_changelog_file(changelogpath)
|
||||
if from_version:
|
||||
# We only want logs from a particular version
|
||||
for index, log in enumerate(changelogs):
|
||||
if log['version'] == from_version:
|
||||
changelogs = changelogs[:index+1]
|
||||
if log["version"] == from_version:
|
||||
changelogs = changelogs[: index + 1]
|
||||
break
|
||||
if fix_version:
|
||||
changelogs[0]['version'] = fix_version
|
||||
changelogs[0]["version"] = fix_version
|
||||
rendered_logs = []
|
||||
for log in changelogs:
|
||||
version = log['version']
|
||||
logdate = log['date']
|
||||
desc = log['description']
|
||||
rendered_date = logdate.strftime('%a, %d %b %Y 00:00:00 +0000')
|
||||
version = log["version"]
|
||||
logdate = log["date"]
|
||||
desc = log["description"]
|
||||
rendered_date = logdate.strftime("%a, %d %b %Y 00:00:00 +0000")
|
||||
rendered_descs = [CHANGE_MODEL.format(description=d) for d in desc2list(desc)]
|
||||
changes = ''.join(rendered_descs)
|
||||
rendered_log = ENTRY_MODEL.format(pkg=pkgname, version=version, changes=changes,
|
||||
date=rendered_date, distribution=distribution)
|
||||
changes = "".join(rendered_descs)
|
||||
rendered_log = ENTRY_MODEL.format(
|
||||
pkg=pkgname,
|
||||
version=version,
|
||||
changes=changes,
|
||||
date=rendered_date,
|
||||
distribution=distribution,
|
||||
)
|
||||
rendered_logs.append(rendered_log)
|
||||
result = ''.join(rendered_logs)
|
||||
fp = open(destfile, 'w')
|
||||
result = "".join(rendered_logs)
|
||||
fp = open(destfile, "w")
|
||||
fp.write(result)
|
||||
fp.close()
|
||||
|
||||
re_changelog_header = re.compile(r'=== ([\d.b]*) \(([\d\-]*)\)')
|
||||
|
||||
re_changelog_header = re.compile(r"=== ([\d.b]*) \(([\d\-]*)\)")
|
||||
|
||||
|
||||
def read_changelog_file(filename):
|
||||
def iter_by_three(it):
|
||||
while True:
|
||||
@ -283,25 +344,31 @@ def read_changelog_file(filename):
|
||||
return
|
||||
yield version, date, description
|
||||
|
||||
with open(filename, 'rt', encoding='utf-8') as fp:
|
||||
with open(filename, "rt", encoding="utf-8") as fp:
|
||||
contents = fp.read()
|
||||
splitted = re_changelog_header.split(contents)[1:] # the first item is empty
|
||||
splitted = re_changelog_header.split(contents)[1:] # the first item is empty
|
||||
# splitted = [version1, date1, desc1, version2, date2, ...]
|
||||
result = []
|
||||
for version, date_str, description in iter_by_three(iter(splitted)):
|
||||
date = datetime.strptime(date_str, '%Y-%m-%d').date()
|
||||
d = {'date': date, 'date_str': date_str, 'version': version, 'description': description.strip()}
|
||||
date = datetime.strptime(date_str, "%Y-%m-%d").date()
|
||||
d = {
|
||||
"date": date,
|
||||
"date_str": date_str,
|
||||
"version": version,
|
||||
"description": description.strip(),
|
||||
}
|
||||
result.append(d)
|
||||
return result
|
||||
|
||||
|
||||
class OSXAppStructure:
|
||||
def __init__(self, dest):
|
||||
self.dest = dest
|
||||
self.contents = op.join(dest, 'Contents')
|
||||
self.macos = op.join(self.contents, 'MacOS')
|
||||
self.resources = op.join(self.contents, 'Resources')
|
||||
self.frameworks = op.join(self.contents, 'Frameworks')
|
||||
self.infoplist = op.join(self.contents, 'Info.plist')
|
||||
self.contents = op.join(dest, "Contents")
|
||||
self.macos = op.join(self.contents, "MacOS")
|
||||
self.resources = op.join(self.contents, "Resources")
|
||||
self.frameworks = op.join(self.contents, "Frameworks")
|
||||
self.infoplist = op.join(self.contents, "Info.plist")
|
||||
|
||||
def create(self, infoplist):
|
||||
ensure_empty_folder(self.dest)
|
||||
@ -309,11 +376,11 @@ class OSXAppStructure:
|
||||
os.mkdir(self.resources)
|
||||
os.mkdir(self.frameworks)
|
||||
copy(infoplist, self.infoplist)
|
||||
open(op.join(self.contents, 'PkgInfo'), 'wt').write("APPLxxxx")
|
||||
open(op.join(self.contents, "PkgInfo"), "wt").write("APPLxxxx")
|
||||
|
||||
def copy_executable(self, executable):
|
||||
info = plistlib.readPlist(self.infoplist)
|
||||
self.executablename = info['CFBundleExecutable']
|
||||
self.executablename = info["CFBundleExecutable"]
|
||||
self.executablepath = op.join(self.macos, self.executablename)
|
||||
copy(executable, self.executablepath)
|
||||
|
||||
@ -329,8 +396,14 @@ class OSXAppStructure:
|
||||
copy(path, framework_dest)
|
||||
|
||||
|
||||
def create_osx_app_structure(dest, executable, infoplist, resources=None, frameworks=None,
|
||||
symlink_resources=False):
|
||||
def create_osx_app_structure(
|
||||
dest,
|
||||
executable,
|
||||
infoplist,
|
||||
resources=None,
|
||||
frameworks=None,
|
||||
symlink_resources=False,
|
||||
):
|
||||
# `dest`: A path to the destination .app folder
|
||||
# `executable`: the path of the executable file that goes in "MacOS"
|
||||
# `infoplist`: The path to your Info.plist file.
|
||||
@ -343,13 +416,14 @@ def create_osx_app_structure(dest, executable, infoplist, resources=None, framew
|
||||
app.copy_resources(*resources, use_symlinks=symlink_resources)
|
||||
app.copy_frameworks(*frameworks)
|
||||
|
||||
|
||||
class OSXFrameworkStructure:
|
||||
def __init__(self, dest):
|
||||
self.dest = dest
|
||||
self.contents = op.join(dest, 'Versions', 'A')
|
||||
self.resources = op.join(self.contents, 'Resources')
|
||||
self.headers = op.join(self.contents, 'Headers')
|
||||
self.infoplist = op.join(self.resources, 'Info.plist')
|
||||
self.contents = op.join(dest, "Versions", "A")
|
||||
self.resources = op.join(self.contents, "Resources")
|
||||
self.headers = op.join(self.contents, "Headers")
|
||||
self.infoplist = op.join(self.resources, "Info.plist")
|
||||
self._update_executable_path()
|
||||
|
||||
def _update_executable_path(self):
|
||||
@ -357,7 +431,7 @@ class OSXFrameworkStructure:
|
||||
self.executablename = self.executablepath = None
|
||||
return
|
||||
info = plistlib.readPlist(self.infoplist)
|
||||
self.executablename = info['CFBundleExecutable']
|
||||
self.executablename = info["CFBundleExecutable"]
|
||||
self.executablepath = op.join(self.contents, self.executablename)
|
||||
|
||||
def create(self, infoplist):
|
||||
@ -371,10 +445,10 @@ class OSXFrameworkStructure:
|
||||
def create_symlinks(self):
|
||||
# Only call this after create() and copy_executable()
|
||||
rel = lambda path: op.relpath(path, self.dest)
|
||||
os.symlink('A', op.join(self.dest, 'Versions', 'Current'))
|
||||
os.symlink("A", op.join(self.dest, "Versions", "Current"))
|
||||
os.symlink(rel(self.executablepath), op.join(self.dest, self.executablename))
|
||||
os.symlink(rel(self.headers), op.join(self.dest, 'Headers'))
|
||||
os.symlink(rel(self.resources), op.join(self.dest, 'Resources'))
|
||||
os.symlink(rel(self.headers), op.join(self.dest, "Headers"))
|
||||
os.symlink(rel(self.resources), op.join(self.dest, "Resources"))
|
||||
|
||||
def copy_executable(self, executable):
|
||||
copy(executable, self.executablepath)
|
||||
@ -393,23 +467,28 @@ class OSXFrameworkStructure:
|
||||
|
||||
|
||||
def copy_embeddable_python_dylib(dst):
|
||||
runtime = op.join(sysconfig.get_config_var('PYTHONFRAMEWORKPREFIX'), sysconfig.get_config_var('LDLIBRARY'))
|
||||
filedest = op.join(dst, 'Python')
|
||||
runtime = op.join(
|
||||
sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"),
|
||||
sysconfig.get_config_var("LDLIBRARY"),
|
||||
)
|
||||
filedest = op.join(dst, "Python")
|
||||
shutil.copy(runtime, filedest)
|
||||
os.chmod(filedest, 0o774) # We need write permission to use install_name_tool
|
||||
cmd = 'install_name_tool -id @rpath/Python %s' % filedest
|
||||
os.chmod(filedest, 0o774) # We need write permission to use install_name_tool
|
||||
cmd = "install_name_tool -id @rpath/Python %s" % filedest
|
||||
print_and_do(cmd)
|
||||
|
||||
|
||||
def collect_stdlib_dependencies(script, dest_folder, extra_deps=None):
|
||||
sysprefix = sys.prefix # could be a virtualenv
|
||||
real_lib_prefix = sysconfig.get_config_var('LIBDEST')
|
||||
sysprefix = sys.prefix # could be a virtualenv
|
||||
real_lib_prefix = sysconfig.get_config_var("LIBDEST")
|
||||
|
||||
def is_stdlib_path(path):
|
||||
# A module path is only a stdlib path if it's in either sys.prefix or
|
||||
# sysconfig.get_config_var('prefix') (the 2 are different if we are in a virtualenv) and if
|
||||
# there's no "site-package in the path.
|
||||
if not path:
|
||||
return False
|
||||
if 'site-package' in path:
|
||||
if "site-package" in path:
|
||||
return False
|
||||
if not (path.startswith(sysprefix) or path.startswith(real_lib_prefix)):
|
||||
return False
|
||||
@ -425,13 +504,17 @@ def collect_stdlib_dependencies(script, dest_folder, extra_deps=None):
|
||||
relpath = op.relpath(p, real_lib_prefix)
|
||||
elif p.startswith(sysprefix):
|
||||
relpath = op.relpath(p, sysprefix)
|
||||
assert relpath.startswith('lib/python3.') # we want to get rid of that lib/python3.x part
|
||||
relpath = relpath[len('lib/python3.X/'):]
|
||||
assert relpath.startswith(
|
||||
"lib/python3."
|
||||
) # we want to get rid of that lib/python3.x part
|
||||
relpath = relpath[len("lib/python3.X/") :]
|
||||
else:
|
||||
raise AssertionError()
|
||||
if relpath.startswith('lib-dynload'): # We copy .so files in lib-dynload directly in our dest
|
||||
relpath = relpath[len('lib-dynload/'):]
|
||||
if relpath.startswith('encodings') or relpath.startswith('distutils'):
|
||||
if relpath.startswith(
|
||||
"lib-dynload"
|
||||
): # We copy .so files in lib-dynload directly in our dest
|
||||
relpath = relpath[len("lib-dynload/") :]
|
||||
if relpath.startswith("encodings") or relpath.startswith("distutils"):
|
||||
# We force their inclusion later.
|
||||
continue
|
||||
dest_path = op.join(dest_folder, relpath)
|
||||
@ -440,34 +523,47 @@ def collect_stdlib_dependencies(script, dest_folder, extra_deps=None):
|
||||
# stringprep is used by encodings.
|
||||
# We use real_lib_prefix with distutils because virtualenv messes with it and we need to refer
|
||||
# to the original distutils folder.
|
||||
FORCED_INCLUSION = ['encodings', 'stringprep', op.join(real_lib_prefix, 'distutils')]
|
||||
FORCED_INCLUSION = [
|
||||
"encodings",
|
||||
"stringprep",
|
||||
op.join(real_lib_prefix, "distutils"),
|
||||
]
|
||||
if extra_deps:
|
||||
FORCED_INCLUSION += extra_deps
|
||||
copy_packages(FORCED_INCLUSION, dest_folder)
|
||||
# There's a couple of rather big exe files in the distutils folder that we absolutely don't
|
||||
# need. Remove them.
|
||||
delete_files_with_pattern(op.join(dest_folder, 'distutils'), '*.exe')
|
||||
delete_files_with_pattern(op.join(dest_folder, "distutils"), "*.exe")
|
||||
# And, finally, create an empty "site.py" that Python needs around on startup.
|
||||
open(op.join(dest_folder, 'site.py'), 'w').close()
|
||||
open(op.join(dest_folder, "site.py"), "w").close()
|
||||
|
||||
|
||||
def fix_qt_resource_file(path):
|
||||
# pyrcc5 under Windows, if the locale is non-english, can produce a source file with a date
|
||||
# containing accented characters. If it does, the encoding is wrong and it prevents the file
|
||||
# from being correctly frozen by cx_freeze. To work around that, we open the file, strip all
|
||||
# comments, and save.
|
||||
with open(path, 'rb') as fp:
|
||||
with open(path, "rb") as fp:
|
||||
contents = fp.read()
|
||||
lines = contents.split(b'\n')
|
||||
lines = [l for l in lines if not l.startswith(b'#')]
|
||||
with open(path, 'wb') as fp:
|
||||
fp.write(b'\n'.join(lines))
|
||||
lines = contents.split(b"\n")
|
||||
lines = [l for l in lines if not l.startswith(b"#")]
|
||||
with open(path, "wb") as fp:
|
||||
fp.write(b"\n".join(lines))
|
||||
|
||||
def build_cocoa_ext(extname, dest, source_files, extra_frameworks=(), extra_includes=()):
|
||||
|
||||
def build_cocoa_ext(
|
||||
extname, dest, source_files, extra_frameworks=(), extra_includes=()
|
||||
):
|
||||
extra_link_args = ["-framework", "CoreFoundation", "-framework", "Foundation"]
|
||||
for extra in extra_frameworks:
|
||||
extra_link_args += ['-framework', extra]
|
||||
ext = Extension(extname, source_files, extra_link_args=extra_link_args, include_dirs=extra_includes)
|
||||
setup(script_args=['build_ext', '--inplace'], ext_modules=[ext])
|
||||
extra_link_args += ["-framework", extra]
|
||||
ext = Extension(
|
||||
extname,
|
||||
source_files,
|
||||
extra_link_args=extra_link_args,
|
||||
include_dirs=extra_includes,
|
||||
)
|
||||
setup(script_args=["build_ext", "--inplace"], ext_modules=[ext])
|
||||
# Our problem here is to get the fully qualified filename of the resulting .so but I couldn't
|
||||
# find a documented way to do so. The only thing I could find is this below :(
|
||||
fn = ext._file_name
|
||||
|
@ -8,26 +8,24 @@ import argparse
|
||||
|
||||
from setuptools import setup, Extension
|
||||
|
||||
|
||||
def get_parser():
|
||||
parser = argparse.ArgumentParser(description="Build an arbitrary Python extension.")
|
||||
parser.add_argument(
|
||||
'source_files', nargs='+',
|
||||
help="List of source files to compile"
|
||||
)
|
||||
parser.add_argument(
|
||||
'name', nargs=1,
|
||||
help="Name of the resulting extension"
|
||||
"source_files", nargs="+", help="List of source files to compile"
|
||||
)
|
||||
parser.add_argument("name", nargs=1, help="Name of the resulting extension")
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
args = get_parser().parse_args()
|
||||
print("Building {}...".format(args.name[0]))
|
||||
ext = Extension(args.name[0], args.source_files)
|
||||
setup(
|
||||
script_args=['build_ext', '--inplace'],
|
||||
ext_modules=[ext],
|
||||
script_args=["build_ext", "--inplace"], ext_modules=[ext],
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -2,8 +2,8 @@
|
||||
# Created On: 2008-01-08
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
"""When you have to deal with names that have to be unique and can conflict together, you can use
|
||||
@ -16,14 +16,15 @@ import shutil
|
||||
|
||||
from .path import Path, pathify
|
||||
|
||||
#This matches [123], but not [12] (3 digits being the minimum).
|
||||
#It also matches [1234] [12345] etc..
|
||||
#And only at the start of the string
|
||||
re_conflict = re.compile(r'^\[\d{3}\d*\] ')
|
||||
# This matches [123], but not [12] (3 digits being the minimum).
|
||||
# It also matches [1234] [12345] etc..
|
||||
# And only at the start of the string
|
||||
re_conflict = re.compile(r"^\[\d{3}\d*\] ")
|
||||
|
||||
|
||||
def get_conflicted_name(other_names, name):
|
||||
"""Returns name with a ``[000]`` number in front of it.
|
||||
|
||||
|
||||
The number between brackets depends on how many conlicted filenames
|
||||
there already are in other_names.
|
||||
"""
|
||||
@ -32,23 +33,26 @@ def get_conflicted_name(other_names, name):
|
||||
return name
|
||||
i = 0
|
||||
while True:
|
||||
newname = '[%03d] %s' % (i, name)
|
||||
newname = "[%03d] %s" % (i, name)
|
||||
if newname not in other_names:
|
||||
return newname
|
||||
i += 1
|
||||
|
||||
|
||||
def get_unconflicted_name(name):
|
||||
"""Returns ``name`` without ``[]`` brackets.
|
||||
|
||||
|
||||
Brackets which, of course, might have been added by func:`get_conflicted_name`.
|
||||
"""
|
||||
return re_conflict.sub('',name,1)
|
||||
return re_conflict.sub("", name, 1)
|
||||
|
||||
|
||||
def is_conflicted(name):
|
||||
"""Returns whether ``name`` is prepended with a bracketed number.
|
||||
"""
|
||||
return re_conflict.match(name) is not None
|
||||
|
||||
|
||||
@pathify
|
||||
def _smart_move_or_copy(operation, source_path: Path, dest_path: Path):
|
||||
"""Use move() or copy() to move and copy file with the conflict management.
|
||||
@ -61,19 +65,24 @@ def _smart_move_or_copy(operation, source_path: Path, dest_path: Path):
|
||||
newname = get_conflicted_name(os.listdir(str(dest_dir_path)), filename)
|
||||
dest_path = dest_dir_path[newname]
|
||||
operation(str(source_path), str(dest_path))
|
||||
|
||||
|
||||
|
||||
def smart_move(source_path, dest_path):
|
||||
"""Same as :func:`smart_copy`, but it moves files instead.
|
||||
"""
|
||||
_smart_move_or_copy(shutil.move, source_path, dest_path)
|
||||
|
||||
|
||||
def smart_copy(source_path, dest_path):
|
||||
"""Copies ``source_path`` to ``dest_path``, recursively and with conflict resolution.
|
||||
"""
|
||||
try:
|
||||
_smart_move_or_copy(shutil.copy, source_path, dest_path)
|
||||
except IOError as e:
|
||||
if e.errno in {21, 13}: # it's a directory, code is 21 on OS X / Linux and 13 on Windows
|
||||
if e.errno in {
|
||||
21,
|
||||
13,
|
||||
}: # it's a directory, code is 21 on OS X / Linux and 13 on Windows
|
||||
_smart_move_or_copy(shutil.copytree, source_path, dest_path)
|
||||
else:
|
||||
raise
|
||||
raise
|
||||
|
@ -1,14 +1,15 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2011-04-19
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
# Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/
|
||||
def stacktraces():
|
||||
code = []
|
||||
@ -18,5 +19,5 @@ def stacktraces():
|
||||
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
|
||||
if line:
|
||||
code.append(" %s" % (line.strip()))
|
||||
|
||||
return "\n".join(code)
|
||||
|
||||
return "\n".join(code)
|
||||
|
@ -9,25 +9,30 @@
|
||||
import os.path as op
|
||||
import logging
|
||||
|
||||
|
||||
class SpecialFolder:
|
||||
AppData = 1
|
||||
Cache = 2
|
||||
|
||||
|
||||
def open_url(url):
|
||||
"""Open ``url`` with the default browser.
|
||||
"""
|
||||
_open_url(url)
|
||||
|
||||
|
||||
def open_path(path):
|
||||
"""Open ``path`` with its associated application.
|
||||
"""
|
||||
_open_path(str(path))
|
||||
|
||||
|
||||
def reveal_path(path):
|
||||
"""Open the folder containing ``path`` with the default file browser.
|
||||
"""
|
||||
_reveal_path(str(path))
|
||||
|
||||
|
||||
def special_folder_path(special_folder, appname=None):
|
||||
"""Returns the path of ``special_folder``.
|
||||
|
||||
@ -38,12 +43,14 @@ def special_folder_path(special_folder, appname=None):
|
||||
"""
|
||||
return _special_folder_path(special_folder, appname)
|
||||
|
||||
|
||||
try:
|
||||
# Normally, we would simply do "from cocoa import proxy", but due to a bug in pytest (currently
|
||||
# at v2.4.2), our test suite is broken when we do that. This below is a workaround until that
|
||||
# bug is fixed.
|
||||
import cocoa
|
||||
if not hasattr(cocoa, 'proxy'):
|
||||
|
||||
if not hasattr(cocoa, "proxy"):
|
||||
raise ImportError()
|
||||
proxy = cocoa.proxy
|
||||
_open_url = proxy.openURL_
|
||||
@ -56,13 +63,15 @@ try:
|
||||
else:
|
||||
base = proxy.getAppdataPath()
|
||||
if not appname:
|
||||
appname = proxy.bundleInfo_('CFBundleName')
|
||||
appname = proxy.bundleInfo_("CFBundleName")
|
||||
return op.join(base, appname)
|
||||
|
||||
|
||||
except ImportError:
|
||||
try:
|
||||
from PyQt5.QtCore import QUrl, QStandardPaths
|
||||
from PyQt5.QtGui import QDesktopServices
|
||||
|
||||
def _open_url(url):
|
||||
QDesktopServices.openUrl(QUrl(url))
|
||||
|
||||
@ -79,10 +88,12 @@ except ImportError:
|
||||
else:
|
||||
qtfolder = QStandardPaths.DataLocation
|
||||
return QStandardPaths.standardLocations(qtfolder)[0]
|
||||
|
||||
except ImportError:
|
||||
# We're either running tests, and these functions don't matter much or we're in a really
|
||||
# weird situation. Let's just have dummy fallbacks.
|
||||
logging.warning("Can't setup desktop functions!")
|
||||
|
||||
def _open_path(path):
|
||||
pass
|
||||
|
||||
@ -90,4 +101,4 @@ except ImportError:
|
||||
pass
|
||||
|
||||
def _special_folder_path(special_folder, appname=None):
|
||||
return '/tmp'
|
||||
return "/tmp"
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2011-08-05
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from sys import maxsize as INF
|
||||
@ -11,73 +11,74 @@ from math import sqrt
|
||||
|
||||
VERY_SMALL = 0.0000001
|
||||
|
||||
|
||||
class Point:
|
||||
def __init__(self, x, y):
|
||||
self.x = x
|
||||
self.y = y
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return '<Point {:2.2f}, {:2.2f}>'.format(*self)
|
||||
|
||||
return "<Point {:2.2f}, {:2.2f}>".format(*self)
|
||||
|
||||
def __iter__(self):
|
||||
yield self.x
|
||||
yield self.y
|
||||
|
||||
|
||||
def distance_to(self, other):
|
||||
return Line(self, other).length()
|
||||
|
||||
|
||||
|
||||
class Line:
|
||||
def __init__(self, p1, p2):
|
||||
self.p1 = p1
|
||||
self.p2 = p2
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return '<Line {}, {}>'.format(*self)
|
||||
|
||||
return "<Line {}, {}>".format(*self)
|
||||
|
||||
def __iter__(self):
|
||||
yield self.p1
|
||||
yield self.p2
|
||||
|
||||
|
||||
def dx(self):
|
||||
return self.p2.x - self.p1.x
|
||||
|
||||
|
||||
def dy(self):
|
||||
return self.p2.y - self.p1.y
|
||||
|
||||
|
||||
def length(self):
|
||||
return sqrt(self.dx() ** 2 + self.dy() ** 2)
|
||||
|
||||
|
||||
def slope(self):
|
||||
if self.dx() == 0:
|
||||
return INF if self.dy() > 0 else -INF
|
||||
else:
|
||||
return self.dy() / self.dx()
|
||||
|
||||
|
||||
def intersection_point(self, other):
|
||||
# with help from http://paulbourke.net/geometry/lineline2d/
|
||||
if abs(self.slope() - other.slope()) < VERY_SMALL:
|
||||
# parallel. Even if coincident, we return nothing
|
||||
return None
|
||||
|
||||
|
||||
A, B = self
|
||||
C, D = other
|
||||
|
||||
denom = (D.y-C.y) * (B.x-A.x) - (D.x-C.x) * (B.y-A.y)
|
||||
|
||||
denom = (D.y - C.y) * (B.x - A.x) - (D.x - C.x) * (B.y - A.y)
|
||||
if denom == 0:
|
||||
return None
|
||||
numera = (D.x-C.x) * (A.y-C.y) - (D.y-C.y) * (A.x-C.x)
|
||||
numerb = (B.x-A.x) * (A.y-C.y) - (B.y-A.y) * (A.x-C.x)
|
||||
|
||||
mua = numera / denom;
|
||||
mub = numerb / denom;
|
||||
numera = (D.x - C.x) * (A.y - C.y) - (D.y - C.y) * (A.x - C.x)
|
||||
numerb = (B.x - A.x) * (A.y - C.y) - (B.y - A.y) * (A.x - C.x)
|
||||
|
||||
mua = numera / denom
|
||||
mub = numerb / denom
|
||||
if (0 <= mua <= 1) and (0 <= mub <= 1):
|
||||
x = A.x + mua * (B.x - A.x)
|
||||
y = A.y + mua * (B.y - A.y)
|
||||
return Point(x, y)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
class Rect:
|
||||
def __init__(self, x, y, w, h):
|
||||
@ -85,43 +86,43 @@ class Rect:
|
||||
self.y = y
|
||||
self.w = w
|
||||
self.h = h
|
||||
|
||||
|
||||
def __iter__(self):
|
||||
yield self.x
|
||||
yield self.y
|
||||
yield self.w
|
||||
yield self.h
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return '<Rect {:2.2f}, {:2.2f}, {:2.2f}, {:2.2f}>'.format(*self)
|
||||
|
||||
return "<Rect {:2.2f}, {:2.2f}, {:2.2f}, {:2.2f}>".format(*self)
|
||||
|
||||
@classmethod
|
||||
def from_center(cls, center, width, height):
|
||||
x = center.x - width / 2
|
||||
y = center.y - height / 2
|
||||
return cls(x, y, width, height)
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_corners(cls, pt1, pt2):
|
||||
x1, y1 = pt1
|
||||
x2, y2 = pt2
|
||||
return cls(min(x1, x2), min(y1, y2), abs(x1-x2), abs(y1-y2))
|
||||
|
||||
return cls(min(x1, x2), min(y1, y2), abs(x1 - x2), abs(y1 - y2))
|
||||
|
||||
def center(self):
|
||||
return Point(self.x + self.w/2, self.y + self.h/2)
|
||||
|
||||
return Point(self.x + self.w / 2, self.y + self.h / 2)
|
||||
|
||||
def contains_point(self, point):
|
||||
x, y = point
|
||||
(x1, y1), (x2, y2) = self.corners()
|
||||
return (x1 <= x <= x2) and (y1 <= y <= y2)
|
||||
|
||||
|
||||
def contains_rect(self, rect):
|
||||
pt1, pt2 = rect.corners()
|
||||
return self.contains_point(pt1) and self.contains_point(pt2)
|
||||
|
||||
|
||||
def corners(self):
|
||||
return Point(self.x, self.y), Point(self.x+self.w, self.y+self.h)
|
||||
|
||||
return Point(self.x, self.y), Point(self.x + self.w, self.y + self.h)
|
||||
|
||||
def intersects(self, other):
|
||||
r1pt1, r1pt2 = self.corners()
|
||||
r2pt1, r2pt2 = other.corners()
|
||||
@ -136,7 +137,7 @@ class Rect:
|
||||
else:
|
||||
yinter = r2pt2.y >= r1pt1.y
|
||||
return yinter
|
||||
|
||||
|
||||
def lines(self):
|
||||
pt1, pt4 = self.corners()
|
||||
pt2 = Point(pt4.x, pt1.y)
|
||||
@ -146,7 +147,7 @@ class Rect:
|
||||
l3 = Line(pt3, pt4)
|
||||
l4 = Line(pt1, pt3)
|
||||
return l1, l2, l3, l4
|
||||
|
||||
|
||||
def scaled_rect(self, dx, dy):
|
||||
"""Returns a rect that has the same borders at self, but grown/shrunk by dx/dy on each side.
|
||||
"""
|
||||
@ -156,7 +157,7 @@ class Rect:
|
||||
w += dx * 2
|
||||
h += dy * 2
|
||||
return Rect(x, y, w, h)
|
||||
|
||||
|
||||
def united(self, other):
|
||||
"""Returns the bounding rectangle of this rectangle and `other`.
|
||||
"""
|
||||
@ -166,53 +167,52 @@ class Rect:
|
||||
corner1 = Point(min(ulcorner1.x, ulcorner2.x), min(ulcorner1.y, ulcorner2.y))
|
||||
corner2 = Point(max(lrcorner1.x, lrcorner2.x), max(lrcorner1.y, lrcorner2.y))
|
||||
return Rect.from_corners(corner1, corner2)
|
||||
|
||||
#--- Properties
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def top(self):
|
||||
return self.y
|
||||
|
||||
|
||||
@top.setter
|
||||
def top(self, value):
|
||||
self.y = value
|
||||
|
||||
|
||||
@property
|
||||
def bottom(self):
|
||||
return self.y + self.h
|
||||
|
||||
|
||||
@bottom.setter
|
||||
def bottom(self, value):
|
||||
self.y = value - self.h
|
||||
|
||||
|
||||
@property
|
||||
def left(self):
|
||||
return self.x
|
||||
|
||||
|
||||
@left.setter
|
||||
def left(self, value):
|
||||
self.x = value
|
||||
|
||||
|
||||
@property
|
||||
def right(self):
|
||||
return self.x + self.w
|
||||
|
||||
|
||||
@right.setter
|
||||
def right(self, value):
|
||||
self.x = value - self.w
|
||||
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return self.w
|
||||
|
||||
|
||||
@width.setter
|
||||
def width(self, value):
|
||||
self.w = value
|
||||
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self.h
|
||||
|
||||
|
||||
@height.setter
|
||||
def height(self, value):
|
||||
self.h = value
|
||||
|
||||
|
@ -4,13 +4,16 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
|
||||
def noop(*args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class NoopGUI:
|
||||
def __getattr__(self, func_name):
|
||||
return noop
|
||||
|
||||
|
||||
class GUIObject:
|
||||
"""Cross-toolkit "model" representation of a GUI layer object.
|
||||
|
||||
@ -32,6 +35,7 @@ class GUIObject:
|
||||
However, sometimes you want to be able to re-bind another view. In this case, set the
|
||||
``multibind`` flag to ``True`` and the safeguard will be disabled.
|
||||
"""
|
||||
|
||||
def __init__(self, multibind=False):
|
||||
self._view = None
|
||||
self._multibind = multibind
|
||||
@ -77,4 +81,3 @@ class GUIObject:
|
||||
# Instead of None, we put a NoopGUI() there to avoid rogue view callback raising an
|
||||
# exception.
|
||||
self._view = NoopGUI()
|
||||
|
||||
|
@ -1,21 +1,23 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-07-25
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import copy
|
||||
|
||||
from .base import GUIObject
|
||||
|
||||
|
||||
class Column:
|
||||
"""Holds column attributes such as its name, width, visibility, etc.
|
||||
|
||||
|
||||
These attributes are then used to correctly configure the column on the "view" side.
|
||||
"""
|
||||
def __init__(self, name, display='', visible=True, optional=False):
|
||||
|
||||
def __init__(self, name, display="", visible=True, optional=False):
|
||||
#: "programmatical" (not for display) name. Used as a reference in a couple of place, such
|
||||
#: as :meth:`Columns.column_by_name`.
|
||||
self.name = name
|
||||
@ -39,52 +41,57 @@ class Column:
|
||||
self.default_visible = visible
|
||||
#: Whether the column can have :attr:`visible` set to false.
|
||||
self.optional = optional
|
||||
|
||||
|
||||
|
||||
class ColumnsView:
|
||||
"""Expected interface for :class:`Columns`'s view.
|
||||
|
||||
|
||||
*Not actually used in the code. For documentation purposes only.*
|
||||
|
||||
|
||||
Our view, the columns controller of a table or outline, is expected to properly respond to
|
||||
callbacks.
|
||||
"""
|
||||
|
||||
def restore_columns(self):
|
||||
"""Update all columns according to the model.
|
||||
|
||||
|
||||
When this is called, our view has to update the columns title, order and visibility of all
|
||||
columns.
|
||||
"""
|
||||
|
||||
|
||||
def set_column_visible(self, colname, visible):
|
||||
"""Update visibility of column ``colname``.
|
||||
|
||||
|
||||
Called when the user toggles the visibility of a column, we must update the column
|
||||
``colname``'s visibility status to ``visible``.
|
||||
"""
|
||||
|
||||
|
||||
class PrefAccessInterface:
|
||||
"""Expected interface for :class:`Columns`'s prefaccess.
|
||||
|
||||
|
||||
*Not actually used in the code. For documentation purposes only.*
|
||||
"""
|
||||
|
||||
def get_default(self, key, fallback_value):
|
||||
"""Retrieve the value for ``key`` in the currently running app's preference store.
|
||||
|
||||
|
||||
If the key doesn't exist, return ``fallback_value``.
|
||||
"""
|
||||
|
||||
|
||||
def set_default(self, key, value):
|
||||
"""Set the value ``value`` for ``key`` in the currently running app's preference store.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class Columns(GUIObject):
|
||||
"""Cross-toolkit GUI-enabled column set for tables or outlines.
|
||||
|
||||
|
||||
Manages a column set's order, visibility and width. We also manage the persistence of these
|
||||
attributes so that we can restore them on the next run.
|
||||
|
||||
|
||||
Subclasses :class:`.GUIObject`. Expected view: :class:`ColumnsView`.
|
||||
|
||||
|
||||
:param table: The table the columns belong to. It's from there that we retrieve our column
|
||||
configuration and it must have a ``COLUMNS`` attribute which is a list of
|
||||
:class:`Column`. We also call :meth:`~.GUITable.save_edits` on it from time to
|
||||
@ -97,6 +104,7 @@ class Columns(GUIObject):
|
||||
a prefix. Preferences are saved under more than one name, but they will all
|
||||
have that same prefix.
|
||||
"""
|
||||
|
||||
def __init__(self, table, prefaccess=None, savename=None):
|
||||
GUIObject.__init__(self)
|
||||
self.table = table
|
||||
@ -108,84 +116,88 @@ class Columns(GUIObject):
|
||||
column.logical_index = i
|
||||
column.ordered_index = i
|
||||
self.coldata = {col.name: col for col in self.column_list}
|
||||
|
||||
#--- Private
|
||||
|
||||
# --- Private
|
||||
def _get_colname_attr(self, colname, attrname, default):
|
||||
try:
|
||||
return getattr(self.coldata[colname], attrname)
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
|
||||
def _set_colname_attr(self, colname, attrname, value):
|
||||
try:
|
||||
col = self.coldata[colname]
|
||||
setattr(col, attrname, value)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def _optional_columns(self):
|
||||
return [c for c in self.column_list if c.optional]
|
||||
|
||||
#--- Override
|
||||
|
||||
# --- Override
|
||||
def _view_updated(self):
|
||||
self.restore_columns()
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
def column_by_index(self, index):
|
||||
"""Return the :class:`Column` having the :attr:`~Column.logical_index` ``index``.
|
||||
"""
|
||||
return self.column_list[index]
|
||||
|
||||
|
||||
def column_by_name(self, name):
|
||||
"""Return the :class:`Column` having the :attr:`~Column.name` ``name``.
|
||||
"""
|
||||
return self.coldata[name]
|
||||
|
||||
|
||||
def columns_count(self):
|
||||
"""Returns the number of columns in our set.
|
||||
"""
|
||||
return len(self.column_list)
|
||||
|
||||
|
||||
def column_display(self, colname):
|
||||
"""Returns display name for column named ``colname``, or ``''`` if there's none.
|
||||
"""
|
||||
return self._get_colname_attr(colname, 'display', '')
|
||||
|
||||
return self._get_colname_attr(colname, "display", "")
|
||||
|
||||
def column_is_visible(self, colname):
|
||||
"""Returns visibility for column named ``colname``, or ``True`` if there's none.
|
||||
"""
|
||||
return self._get_colname_attr(colname, 'visible', True)
|
||||
|
||||
return self._get_colname_attr(colname, "visible", True)
|
||||
|
||||
def column_width(self, colname):
|
||||
"""Returns width for column named ``colname``, or ``0`` if there's none.
|
||||
"""
|
||||
return self._get_colname_attr(colname, 'width', 0)
|
||||
|
||||
return self._get_colname_attr(colname, "width", 0)
|
||||
|
||||
def columns_to_right(self, colname):
|
||||
"""Returns the list of all columns to the right of ``colname``.
|
||||
|
||||
|
||||
"right" meaning "having a higher :attr:`Column.ordered_index`" in our left-to-right
|
||||
civilization.
|
||||
"""
|
||||
column = self.coldata[colname]
|
||||
index = column.ordered_index
|
||||
return [col.name for col in self.column_list if (col.visible and col.ordered_index > index)]
|
||||
|
||||
return [
|
||||
col.name
|
||||
for col in self.column_list
|
||||
if (col.visible and col.ordered_index > index)
|
||||
]
|
||||
|
||||
def menu_items(self):
|
||||
"""Returns a list of items convenient for quick visibility menu generation.
|
||||
|
||||
|
||||
Returns a list of ``(display_name, is_marked)`` items for each optional column in the
|
||||
current view (``is_marked`` means that it's visible).
|
||||
|
||||
|
||||
You can use this to generate a menu to let the user toggle the visibility of an optional
|
||||
column. That is why we only show optional column, because the visibility of mandatory
|
||||
columns can't be toggled.
|
||||
"""
|
||||
return [(c.display, c.visible) for c in self._optional_columns()]
|
||||
|
||||
|
||||
def move_column(self, colname, index):
|
||||
"""Moves column ``colname`` to ``index``.
|
||||
|
||||
|
||||
The column will be placed just in front of the column currently having that index, or to the
|
||||
end of the list if there's none.
|
||||
"""
|
||||
@ -193,7 +205,7 @@ class Columns(GUIObject):
|
||||
colnames.remove(colname)
|
||||
colnames.insert(index, colname)
|
||||
self.set_column_order(colnames)
|
||||
|
||||
|
||||
def reset_to_defaults(self):
|
||||
"""Reset all columns' width and visibility to their default values.
|
||||
"""
|
||||
@ -202,12 +214,12 @@ class Columns(GUIObject):
|
||||
col.visible = col.default_visible
|
||||
col.width = col.default_width
|
||||
self.view.restore_columns()
|
||||
|
||||
|
||||
def resize_column(self, colname, newwidth):
|
||||
"""Set column ``colname``'s width to ``newwidth``.
|
||||
"""
|
||||
self._set_colname_attr(colname, 'width', newwidth)
|
||||
|
||||
self._set_colname_attr(colname, "width", newwidth)
|
||||
|
||||
def restore_columns(self):
|
||||
"""Restore's column persistent attributes from the last :meth:`save_columns`.
|
||||
"""
|
||||
@ -218,72 +230,73 @@ class Columns(GUIObject):
|
||||
self.view.restore_columns()
|
||||
return
|
||||
for col in self.column_list:
|
||||
pref_name = '{}.Columns.{}'.format(self.savename, col.name)
|
||||
pref_name = "{}.Columns.{}".format(self.savename, col.name)
|
||||
coldata = self.prefaccess.get_default(pref_name, fallback_value={})
|
||||
if 'index' in coldata:
|
||||
col.ordered_index = coldata['index']
|
||||
if 'width' in coldata:
|
||||
col.width = coldata['width']
|
||||
if col.optional and 'visible' in coldata:
|
||||
col.visible = coldata['visible']
|
||||
if "index" in coldata:
|
||||
col.ordered_index = coldata["index"]
|
||||
if "width" in coldata:
|
||||
col.width = coldata["width"]
|
||||
if col.optional and "visible" in coldata:
|
||||
col.visible = coldata["visible"]
|
||||
self.view.restore_columns()
|
||||
|
||||
|
||||
def save_columns(self):
|
||||
"""Save column attributes in persistent storage for restoration in :meth:`restore_columns`.
|
||||
"""
|
||||
if not (self.prefaccess and self.savename and self.coldata):
|
||||
return
|
||||
for col in self.column_list:
|
||||
pref_name = '{}.Columns.{}'.format(self.savename, col.name)
|
||||
coldata = {'index': col.ordered_index, 'width': col.width}
|
||||
pref_name = "{}.Columns.{}".format(self.savename, col.name)
|
||||
coldata = {"index": col.ordered_index, "width": col.width}
|
||||
if col.optional:
|
||||
coldata['visible'] = col.visible
|
||||
coldata["visible"] = col.visible
|
||||
self.prefaccess.set_default(pref_name, coldata)
|
||||
|
||||
|
||||
def set_column_order(self, colnames):
|
||||
"""Change the columns order so it matches the order in ``colnames``.
|
||||
|
||||
|
||||
:param colnames: A list of column names in the desired order.
|
||||
"""
|
||||
colnames = (name for name in colnames if name in self.coldata)
|
||||
for i, colname in enumerate(colnames):
|
||||
col = self.coldata[colname]
|
||||
col.ordered_index = i
|
||||
|
||||
|
||||
def set_column_visible(self, colname, visible):
|
||||
"""Set the visibility of column ``colname``.
|
||||
"""
|
||||
self.table.save_edits() # the table on the GUI side will stop editing when the columns change
|
||||
self._set_colname_attr(colname, 'visible', visible)
|
||||
self.table.save_edits() # the table on the GUI side will stop editing when the columns change
|
||||
self._set_colname_attr(colname, "visible", visible)
|
||||
self.view.set_column_visible(colname, visible)
|
||||
|
||||
|
||||
def set_default_width(self, colname, width):
|
||||
"""Set the default width or column ``colname``.
|
||||
"""
|
||||
self._set_colname_attr(colname, 'default_width', width)
|
||||
|
||||
self._set_colname_attr(colname, "default_width", width)
|
||||
|
||||
def toggle_menu_item(self, index):
|
||||
"""Toggles the visibility of an optional column.
|
||||
|
||||
|
||||
You know, that optional column menu you've generated in :meth:`menu_items`? Well, ``index``
|
||||
is the index of them menu item in *that* menu that the user has clicked on to toggle it.
|
||||
|
||||
|
||||
Returns whether the column in question ends up being visible or not.
|
||||
"""
|
||||
col = self._optional_columns()[index]
|
||||
self.set_column_visible(col.name, not col.visible)
|
||||
return col.visible
|
||||
|
||||
#--- Properties
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def ordered_columns(self):
|
||||
"""List of :class:`Column` in visible order.
|
||||
"""
|
||||
return [col for col in sorted(self.column_list, key=lambda col: col.ordered_index)]
|
||||
|
||||
return [
|
||||
col for col in sorted(self.column_list, key=lambda col: col.ordered_index)
|
||||
]
|
||||
|
||||
@property
|
||||
def colnames(self):
|
||||
"""List of column names in visible order.
|
||||
"""
|
||||
return [col.name for col in self.ordered_columns]
|
||||
|
||||
|
@ -8,6 +8,7 @@ from ..jobprogress.performer import ThreadedJobPerformer
|
||||
from .base import GUIObject
|
||||
from .text_field import TextField
|
||||
|
||||
|
||||
class ProgressWindowView:
|
||||
"""Expected interface for :class:`ProgressWindow`'s view.
|
||||
|
||||
@ -18,6 +19,7 @@ class ProgressWindowView:
|
||||
|
||||
It's also expected to call :meth:`ProgressWindow.cancel` when the cancel button is clicked.
|
||||
"""
|
||||
|
||||
def show(self):
|
||||
"""Show the dialog.
|
||||
"""
|
||||
@ -36,6 +38,7 @@ class ProgressWindowView:
|
||||
:param int progress: a value between ``0`` and ``100``.
|
||||
"""
|
||||
|
||||
|
||||
class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
"""Cross-toolkit GUI-enabled progress window.
|
||||
|
||||
@ -58,6 +61,7 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
if you want to. If the function returns ``True``, ``finish_func()`` will be
|
||||
called as if the job terminated normally.
|
||||
"""
|
||||
|
||||
def __init__(self, finish_func, error_func=None):
|
||||
# finish_func(jobid) is the function that is called when a job is completed.
|
||||
GUIObject.__init__(self)
|
||||
@ -124,10 +128,9 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
# target is a function with its first argument being a Job. It can then be followed by other
|
||||
# arguments which are passed as `args`.
|
||||
self.jobid = jobid
|
||||
self.progressdesc_textfield.text = ''
|
||||
self.progressdesc_textfield.text = ""
|
||||
j = self.create_job()
|
||||
args = tuple([j] + list(args))
|
||||
self.run_threaded(target, args)
|
||||
self.jobdesc_textfield.text = title
|
||||
self.view.show()
|
||||
|
||||
|
@ -1,92 +1,96 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2011-09-06
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from collections import Sequence, MutableSequence
|
||||
|
||||
from .base import GUIObject
|
||||
|
||||
|
||||
class Selectable(Sequence):
|
||||
"""Mix-in for a ``Sequence`` that manages its selection status.
|
||||
|
||||
|
||||
When mixed in with a ``Sequence``, we enable it to manage its selection status. The selection
|
||||
is held as a list of ``int`` indexes. Multiple selection is supported.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._selected_indexes = []
|
||||
|
||||
#--- Private
|
||||
|
||||
# --- Private
|
||||
def _check_selection_range(self):
|
||||
if not self:
|
||||
self._selected_indexes = []
|
||||
if not self._selected_indexes:
|
||||
return
|
||||
self._selected_indexes = [index for index in self._selected_indexes if index < len(self)]
|
||||
self._selected_indexes = [
|
||||
index for index in self._selected_indexes if index < len(self)
|
||||
]
|
||||
if not self._selected_indexes:
|
||||
self._selected_indexes = [len(self) - 1]
|
||||
|
||||
#--- Virtual
|
||||
|
||||
# --- Virtual
|
||||
def _update_selection(self):
|
||||
"""(Virtual) Updates the model's selection appropriately.
|
||||
|
||||
|
||||
Called after selection has been updated. Takes the table's selection and does appropriates
|
||||
updates on the view and/or model. Common sense would dictate that when the selection doesn't
|
||||
change, we don't update anything (and thus don't call ``_update_selection()`` at all), but
|
||||
there are cases where it's false. For example, if our list updates its items but doesn't
|
||||
change its selection, we probably want to update the model's selection.
|
||||
|
||||
|
||||
By default, does nothing.
|
||||
|
||||
|
||||
Important note: This is only called on :meth:`select`, not on changes to
|
||||
:attr:`selected_indexes`.
|
||||
"""
|
||||
# A redesign of how this whole thing works is probably in order, but not now, there's too
|
||||
# much breakage at once involved.
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
def select(self, indexes):
|
||||
"""Update selection to ``indexes``.
|
||||
|
||||
|
||||
:meth:`_update_selection` is called afterwards.
|
||||
|
||||
|
||||
:param list indexes: List of ``int`` that is to become the new selection.
|
||||
"""
|
||||
if isinstance(indexes, int):
|
||||
indexes = [indexes]
|
||||
self.selected_indexes = indexes
|
||||
self._update_selection()
|
||||
|
||||
#--- Properties
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def selected_index(self):
|
||||
"""Points to the first selected index.
|
||||
|
||||
*int*. *get/set*.
|
||||
|
||||
|
||||
*int*. *get/set*.
|
||||
|
||||
Thin wrapper around :attr:`selected_indexes`. ``None`` if selection is empty. Using this
|
||||
property only makes sense if your selectable sequence supports single selection only.
|
||||
"""
|
||||
return self._selected_indexes[0] if self._selected_indexes else None
|
||||
|
||||
|
||||
@selected_index.setter
|
||||
def selected_index(self, value):
|
||||
self.selected_indexes = [value]
|
||||
|
||||
|
||||
@property
|
||||
def selected_indexes(self):
|
||||
"""List of selected indexes.
|
||||
|
||||
|
||||
*list of int*. *get/set*.
|
||||
|
||||
|
||||
When setting the value, automatically removes out-of-bounds indexes. The list is kept
|
||||
sorted.
|
||||
"""
|
||||
return self._selected_indexes
|
||||
|
||||
|
||||
@selected_indexes.setter
|
||||
def selected_indexes(self, value):
|
||||
self._selected_indexes = value
|
||||
@ -96,53 +100,54 @@ class Selectable(Sequence):
|
||||
|
||||
class SelectableList(MutableSequence, Selectable):
|
||||
"""A list that can manage selection of its items.
|
||||
|
||||
|
||||
Subclasses :class:`Selectable`. Behaves like a ``list``.
|
||||
"""
|
||||
|
||||
def __init__(self, items=None):
|
||||
Selectable.__init__(self)
|
||||
if items:
|
||||
self._items = list(items)
|
||||
else:
|
||||
self._items = []
|
||||
|
||||
|
||||
def __delitem__(self, key):
|
||||
self._items.__delitem__(key)
|
||||
self._check_selection_range()
|
||||
self._on_change()
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._items.__getitem__(key)
|
||||
|
||||
|
||||
def __len__(self):
|
||||
return len(self._items)
|
||||
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._items.__setitem__(key, value)
|
||||
self._on_change()
|
||||
|
||||
#--- Override
|
||||
|
||||
# --- Override
|
||||
def append(self, item):
|
||||
self._items.append(item)
|
||||
self._on_change()
|
||||
|
||||
|
||||
def insert(self, index, item):
|
||||
self._items.insert(index, item)
|
||||
self._on_change()
|
||||
|
||||
|
||||
def remove(self, row):
|
||||
self._items.remove(row)
|
||||
self._check_selection_range()
|
||||
self._on_change()
|
||||
|
||||
#--- Virtual
|
||||
|
||||
# --- Virtual
|
||||
def _on_change(self):
|
||||
"""(Virtual) Called whenever the contents of the list changes.
|
||||
|
||||
|
||||
By default, does nothing.
|
||||
"""
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
def search_by_prefix(self, prefix):
|
||||
# XXX Why the heck is this method here?
|
||||
prefix = prefix.lower()
|
||||
@ -150,59 +155,62 @@ class SelectableList(MutableSequence, Selectable):
|
||||
if s.lower().startswith(prefix):
|
||||
return index
|
||||
return -1
|
||||
|
||||
|
||||
|
||||
class GUISelectableListView:
|
||||
"""Expected interface for :class:`GUISelectableList`'s view.
|
||||
|
||||
|
||||
*Not actually used in the code. For documentation purposes only.*
|
||||
|
||||
|
||||
Our view, some kind of list view or combobox, is expected to sync with the list's contents by
|
||||
appropriately behave to all callbacks in this interface.
|
||||
"""
|
||||
|
||||
def refresh(self):
|
||||
"""Refreshes the contents of the list widget.
|
||||
|
||||
|
||||
Ensures that the contents of the list widget is synced with the model.
|
||||
"""
|
||||
|
||||
|
||||
def update_selection(self):
|
||||
"""Update selection status.
|
||||
|
||||
|
||||
Ensures that the list widget's selection is in sync with the model.
|
||||
"""
|
||||
|
||||
|
||||
class GUISelectableList(SelectableList, GUIObject):
|
||||
"""Cross-toolkit GUI-enabled list view.
|
||||
|
||||
|
||||
Represents a UI element presenting the user with a selectable list of items.
|
||||
|
||||
|
||||
Subclasses :class:`SelectableList` and :class:`.GUIObject`. Expected view:
|
||||
:class:`GUISelectableListView`.
|
||||
|
||||
|
||||
:param iterable items: If specified, items to fill the list with initially.
|
||||
"""
|
||||
|
||||
def __init__(self, items=None):
|
||||
SelectableList.__init__(self, items)
|
||||
GUIObject.__init__(self)
|
||||
|
||||
|
||||
def _view_updated(self):
|
||||
"""Refreshes the view contents with :meth:`GUISelectableListView.refresh`.
|
||||
|
||||
|
||||
Overrides :meth:`~hscommon.gui.base.GUIObject._view_updated`.
|
||||
"""
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
def _update_selection(self):
|
||||
"""Refreshes the view selection with :meth:`GUISelectableListView.update_selection`.
|
||||
|
||||
|
||||
Overrides :meth:`Selectable._update_selection`.
|
||||
"""
|
||||
self.view.update_selection()
|
||||
|
||||
|
||||
def _on_change(self):
|
||||
"""Refreshes the view contents with :meth:`GUISelectableListView.refresh`.
|
||||
|
||||
|
||||
Overrides :meth:`SelectableList._on_change`.
|
||||
"""
|
||||
self.view.refresh()
|
||||
|
@ -11,6 +11,7 @@ from collections import MutableSequence, namedtuple
|
||||
from .base import GUIObject
|
||||
from .selectable_list import Selectable
|
||||
|
||||
|
||||
# We used to directly subclass list, but it caused problems at some point with deepcopy
|
||||
class Table(MutableSequence, Selectable):
|
||||
"""Sortable and selectable sequence of :class:`Row`.
|
||||
@ -24,6 +25,7 @@ class Table(MutableSequence, Selectable):
|
||||
|
||||
Subclasses :class:`.Selectable`.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
Selectable.__init__(self)
|
||||
self._rows = []
|
||||
@ -101,7 +103,7 @@ class Table(MutableSequence, Selectable):
|
||||
if self._footer is not None:
|
||||
self._rows.append(self._footer)
|
||||
|
||||
#--- Properties
|
||||
# --- Properties
|
||||
@property
|
||||
def footer(self):
|
||||
"""If set, a row that always stay at the bottom of the table.
|
||||
@ -216,6 +218,7 @@ class GUITableView:
|
||||
|
||||
Whenever the user changes the selection, we expect the view to call :meth:`Table.select`.
|
||||
"""
|
||||
|
||||
def refresh(self):
|
||||
"""Refreshes the contents of the table widget.
|
||||
|
||||
@ -238,7 +241,9 @@ class GUITableView:
|
||||
"""
|
||||
|
||||
|
||||
SortDescriptor = namedtuple('SortDescriptor', 'column desc')
|
||||
SortDescriptor = namedtuple("SortDescriptor", "column desc")
|
||||
|
||||
|
||||
class GUITable(Table, GUIObject):
|
||||
"""Cross-toolkit GUI-enabled table view.
|
||||
|
||||
@ -254,6 +259,7 @@ class GUITable(Table, GUIObject):
|
||||
Subclasses :class:`Table` and :class:`.GUIObject`. Expected view:
|
||||
:class:`GUITableView`.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
GUIObject.__init__(self)
|
||||
Table.__init__(self)
|
||||
@ -261,7 +267,7 @@ class GUITable(Table, GUIObject):
|
||||
self.edited = None
|
||||
self._sort_descriptor = None
|
||||
|
||||
#--- Virtual
|
||||
# --- Virtual
|
||||
def _do_add(self):
|
||||
"""(Virtual) Creates a new row, adds it in the table.
|
||||
|
||||
@ -309,7 +315,7 @@ class GUITable(Table, GUIObject):
|
||||
else:
|
||||
self.select([len(self) - 1])
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
def add(self):
|
||||
"""Add a new row in edit mode.
|
||||
|
||||
@ -444,6 +450,7 @@ class Row:
|
||||
|
||||
Of course, this is only default behavior. This can be overriden.
|
||||
"""
|
||||
|
||||
def __init__(self, table):
|
||||
super(Row, self).__init__()
|
||||
self.table = table
|
||||
@ -454,7 +461,7 @@ class Row:
|
||||
assert self.table.edited is None
|
||||
self.table.edited = self
|
||||
|
||||
#--- Virtual
|
||||
# --- Virtual
|
||||
def can_edit(self):
|
||||
"""(Virtual) Whether the whole row can be edited.
|
||||
|
||||
@ -489,11 +496,11 @@ class Row:
|
||||
there's none, raises ``AttributeError``.
|
||||
"""
|
||||
try:
|
||||
return getattr(self, '_' + column_name)
|
||||
return getattr(self, "_" + column_name)
|
||||
except AttributeError:
|
||||
return getattr(self, column_name)
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
def can_edit_cell(self, column_name):
|
||||
"""Returns whether cell for column ``column_name`` can be edited.
|
||||
|
||||
@ -511,18 +518,18 @@ class Row:
|
||||
return False
|
||||
# '_' is in case column is a python keyword
|
||||
if not hasattr(self, column_name):
|
||||
if hasattr(self, column_name + '_'):
|
||||
column_name = column_name + '_'
|
||||
if hasattr(self, column_name + "_"):
|
||||
column_name = column_name + "_"
|
||||
else:
|
||||
return False
|
||||
if hasattr(self, 'can_edit_' + column_name):
|
||||
return getattr(self, 'can_edit_' + column_name)
|
||||
if hasattr(self, "can_edit_" + column_name):
|
||||
return getattr(self, "can_edit_" + column_name)
|
||||
# If the row has a settable property, we can edit the cell
|
||||
rowclass = self.__class__
|
||||
prop = getattr(rowclass, column_name, None)
|
||||
if prop is None:
|
||||
return False
|
||||
return bool(getattr(prop, 'fset', None))
|
||||
return bool(getattr(prop, "fset", None))
|
||||
|
||||
def get_cell_value(self, attrname):
|
||||
"""Get cell value for ``attrname``.
|
||||
@ -530,8 +537,8 @@ class Row:
|
||||
By default, does a simple ``getattr()``, but it is used to allow subclasses to have
|
||||
alternative value storage mechanisms.
|
||||
"""
|
||||
if attrname == 'from':
|
||||
attrname = 'from_'
|
||||
if attrname == "from":
|
||||
attrname = "from_"
|
||||
return getattr(self, attrname)
|
||||
|
||||
def set_cell_value(self, attrname, value):
|
||||
@ -540,7 +547,6 @@ class Row:
|
||||
By default, does a simple ``setattr()``, but it is used to allow subclasses to have
|
||||
alternative value storage mechanisms.
|
||||
"""
|
||||
if attrname == 'from':
|
||||
attrname = 'from_'
|
||||
if attrname == "from":
|
||||
attrname = "from_"
|
||||
setattr(self, attrname, value)
|
||||
|
||||
|
@ -1,102 +1,106 @@
|
||||
# Created On: 2012/01/23
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import GUIObject
|
||||
from ..util import nonone
|
||||
|
||||
|
||||
class TextFieldView:
|
||||
"""Expected interface for :class:`TextField`'s view.
|
||||
|
||||
|
||||
*Not actually used in the code. For documentation purposes only.*
|
||||
|
||||
|
||||
Our view is expected to sync with :attr:`TextField.text` "both ways", that is, update the
|
||||
model's text when the user types something, but also update the text field when :meth:`refresh`
|
||||
is called.
|
||||
"""
|
||||
|
||||
def refresh(self):
|
||||
"""Refreshes the contents of the input widget.
|
||||
|
||||
|
||||
Ensures that the contents of the input widget is actually :attr:`TextField.text`.
|
||||
"""
|
||||
|
||||
|
||||
class TextField(GUIObject):
|
||||
"""Cross-toolkit text field.
|
||||
|
||||
|
||||
Represents a UI element allowing the user to input a text value. Its main attribute is
|
||||
:attr:`text` which acts as the store of the said value.
|
||||
|
||||
|
||||
When our model value isn't a string, we have a built-in parsing/formatting mechanism allowing
|
||||
us to directly retrieve/set our non-string value through :attr:`value`.
|
||||
|
||||
|
||||
Subclasses :class:`.GUIObject`. Expected view: :class:`TextFieldView`.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
GUIObject.__init__(self)
|
||||
self._text = ''
|
||||
self._text = ""
|
||||
self._value = None
|
||||
|
||||
#--- Virtual
|
||||
|
||||
# --- Virtual
|
||||
def _parse(self, text):
|
||||
"""(Virtual) Parses ``text`` to put into :attr:`value`.
|
||||
|
||||
|
||||
Returns the parsed version of ``text``. Called whenever :attr:`text` changes.
|
||||
"""
|
||||
return text
|
||||
|
||||
|
||||
def _format(self, value):
|
||||
"""(Virtual) Formats ``value`` to put into :attr:`text`.
|
||||
|
||||
|
||||
Returns the formatted version of ``value``. Called whenever :attr:`value` changes.
|
||||
"""
|
||||
return value
|
||||
|
||||
|
||||
def _update(self, newvalue):
|
||||
"""(Virtual) Called whenever we have a new value.
|
||||
|
||||
|
||||
Whenever our text/value store changes to a new value (different from the old one), this
|
||||
method is called. By default, it does nothing but you can override it if you want.
|
||||
"""
|
||||
|
||||
#--- Override
|
||||
|
||||
# --- Override
|
||||
def _view_updated(self):
|
||||
self.view.refresh()
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
def refresh(self):
|
||||
"""Triggers a view :meth:`~TextFieldView.refresh`.
|
||||
"""
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""The text that is currently displayed in the widget.
|
||||
|
||||
|
||||
*str*. *get/set*.
|
||||
|
||||
|
||||
This property can be set. When it is, :meth:`refresh` is called and the view is synced with
|
||||
our value. Always in sync with :attr:`value`.
|
||||
"""
|
||||
return self._text
|
||||
|
||||
|
||||
@text.setter
|
||||
def text(self, newtext):
|
||||
self.value = self._parse(nonone(newtext, ''))
|
||||
|
||||
self.value = self._parse(nonone(newtext, ""))
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
"""The "parsed" representation of :attr:`text`.
|
||||
|
||||
|
||||
*arbitrary type*. *get/set*.
|
||||
|
||||
|
||||
By default, it's a mirror of :attr:`text`, but a subclass can override :meth:`_parse` and
|
||||
:meth:`_format` to have anything else. Always in sync with :attr:`text`.
|
||||
"""
|
||||
return self._value
|
||||
|
||||
|
||||
@value.setter
|
||||
def value(self, newvalue):
|
||||
if newvalue == self._value:
|
||||
@ -105,4 +109,3 @@ class TextField(GUIObject):
|
||||
self._text = self._format(newvalue)
|
||||
self._update(self._value)
|
||||
self.refresh()
|
||||
|
||||
|
@ -1,16 +1,17 @@
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from collections import MutableSequence
|
||||
|
||||
from .base import GUIObject
|
||||
|
||||
|
||||
class Node(MutableSequence):
|
||||
"""Pretty bland node implementation to be used in a :class:`Tree`.
|
||||
|
||||
|
||||
It has a :attr:`parent`, behaves like a list, its content being its children. Link integrity
|
||||
is somewhat enforced (adding a child to a node will set the child's :attr:`parent`, but that's
|
||||
pretty much as far as we go, integrity-wise. Nodes don't tend to move around much in a GUI
|
||||
@ -19,57 +20,58 @@ class Node(MutableSequence):
|
||||
Nodes are designed to be subclassed and given meaningful attributes (those you'll want to
|
||||
display in your tree view), but they all have a :attr:`name`, which is given on initialization.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._parent = None
|
||||
self._path = None
|
||||
self._children = []
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return '<Node %r>' % self.name
|
||||
|
||||
#--- MutableSequence overrides
|
||||
return "<Node %r>" % self.name
|
||||
|
||||
# --- MutableSequence overrides
|
||||
def __delitem__(self, key):
|
||||
self._children.__delitem__(key)
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._children.__getitem__(key)
|
||||
|
||||
|
||||
def __len__(self):
|
||||
return len(self._children)
|
||||
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._children.__setitem__(key, value)
|
||||
|
||||
|
||||
def append(self, node):
|
||||
self._children.append(node)
|
||||
node._parent = self
|
||||
node._path = None
|
||||
|
||||
|
||||
def insert(self, index, node):
|
||||
self._children.insert(index, node)
|
||||
node._parent = self
|
||||
node._path = None
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
def clear(self):
|
||||
"""Clears the node of all its children.
|
||||
"""
|
||||
del self[:]
|
||||
|
||||
|
||||
def find(self, predicate, include_self=True):
|
||||
"""Return the first child to match ``predicate``.
|
||||
|
||||
|
||||
See :meth:`findall`.
|
||||
"""
|
||||
try:
|
||||
return next(self.findall(predicate, include_self=include_self))
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
|
||||
def findall(self, predicate, include_self=True):
|
||||
"""Yield all children matching ``predicate``.
|
||||
|
||||
|
||||
:param predicate: ``f(node) --> bool``
|
||||
:param include_self: Whether we can return ``self`` or we return only children.
|
||||
"""
|
||||
@ -78,10 +80,10 @@ class Node(MutableSequence):
|
||||
for child in self:
|
||||
for found in child.findall(predicate, include_self=True):
|
||||
yield found
|
||||
|
||||
|
||||
def get_node(self, index_path):
|
||||
"""Returns the node at ``index_path``.
|
||||
|
||||
|
||||
:param index_path: a list of int indexes leading to our node. See :attr:`path`.
|
||||
"""
|
||||
result = self
|
||||
@ -89,40 +91,40 @@ class Node(MutableSequence):
|
||||
for index in index_path:
|
||||
result = result[index]
|
||||
return result
|
||||
|
||||
|
||||
def get_path(self, target_node):
|
||||
"""Returns the :attr:`path` of ``target_node``.
|
||||
|
||||
|
||||
If ``target_node`` is ``None``, returns ``None``.
|
||||
"""
|
||||
if target_node is None:
|
||||
return None
|
||||
return target_node.path
|
||||
|
||||
|
||||
@property
|
||||
def children_count(self):
|
||||
"""Same as ``len(self)``.
|
||||
"""
|
||||
return len(self)
|
||||
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Name for the node, supplied on init.
|
||||
"""
|
||||
return self._name
|
||||
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
"""Parent of the node.
|
||||
|
||||
|
||||
If ``None``, we have a root node.
|
||||
"""
|
||||
return self._parent
|
||||
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
"""A list of node indexes leading from the root node to ``self``.
|
||||
|
||||
|
||||
The path of a node is always related to its :attr:`root`. It's the sequences of index that
|
||||
we have to take to get to our node, starting from the root. For example, if
|
||||
``node.path == [1, 2, 3, 4]``, it means that ``node.root[1][2][3][4] is node``.
|
||||
@ -133,112 +135,113 @@ class Node(MutableSequence):
|
||||
else:
|
||||
self._path = self._parent.path + [self._parent.index(self)]
|
||||
return self._path
|
||||
|
||||
|
||||
@property
|
||||
def root(self):
|
||||
"""Root node of current node.
|
||||
|
||||
|
||||
To get it, we recursively follow our :attr:`parent` chain until we have ``None``.
|
||||
"""
|
||||
if self._parent is None:
|
||||
return self
|
||||
else:
|
||||
return self._parent.root
|
||||
|
||||
|
||||
|
||||
class Tree(Node, GUIObject):
|
||||
"""Cross-toolkit GUI-enabled tree view.
|
||||
|
||||
|
||||
This class is a bit too thin to be used as a tree view controller out of the box and HS apps
|
||||
that subclasses it each add quite a bit of logic to it to make it workable. Making this more
|
||||
usable out of the box is a work in progress.
|
||||
|
||||
|
||||
This class is here (in addition to being a :class:`Node`) mostly to handle selection.
|
||||
|
||||
|
||||
Subclasses :class:`Node` (it is the root node of all its children) and :class:`.GUIObject`.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
Node.__init__(self, '')
|
||||
Node.__init__(self, "")
|
||||
GUIObject.__init__(self)
|
||||
#: Where we store selected nodes (as a list of :class:`Node`)
|
||||
self._selected_nodes = []
|
||||
|
||||
#--- Virtual
|
||||
|
||||
# --- Virtual
|
||||
def _select_nodes(self, nodes):
|
||||
"""(Virtual) Customize node selection behavior.
|
||||
|
||||
|
||||
By default, simply set :attr:`_selected_nodes`.
|
||||
"""
|
||||
self._selected_nodes = nodes
|
||||
|
||||
#--- Override
|
||||
|
||||
# --- Override
|
||||
def _view_updated(self):
|
||||
self.view.refresh()
|
||||
|
||||
|
||||
def clear(self):
|
||||
self._selected_nodes = []
|
||||
Node.clear(self)
|
||||
|
||||
#--- Public
|
||||
|
||||
# --- Public
|
||||
@property
|
||||
def selected_node(self):
|
||||
"""Currently selected node.
|
||||
|
||||
|
||||
*:class:`Node`*. *get/set*.
|
||||
|
||||
|
||||
First of :attr:`selected_nodes`. ``None`` if empty.
|
||||
"""
|
||||
return self._selected_nodes[0] if self._selected_nodes else None
|
||||
|
||||
|
||||
@selected_node.setter
|
||||
def selected_node(self, node):
|
||||
if node is not None:
|
||||
self._select_nodes([node])
|
||||
else:
|
||||
self._select_nodes([])
|
||||
|
||||
|
||||
@property
|
||||
def selected_nodes(self):
|
||||
"""List of selected nodes in the tree.
|
||||
|
||||
|
||||
*List of :class:`Node`*. *get/set*.
|
||||
|
||||
|
||||
We use nodes instead of indexes to store selection because it's simpler when it's time to
|
||||
manage selection of multiple node levels.
|
||||
"""
|
||||
return self._selected_nodes
|
||||
|
||||
|
||||
@selected_nodes.setter
|
||||
def selected_nodes(self, nodes):
|
||||
self._select_nodes(nodes)
|
||||
|
||||
|
||||
@property
|
||||
def selected_path(self):
|
||||
"""Currently selected path.
|
||||
|
||||
|
||||
*:attr:`Node.path`*. *get/set*.
|
||||
|
||||
|
||||
First of :attr:`selected_paths`. ``None`` if empty.
|
||||
"""
|
||||
return self.get_path(self.selected_node)
|
||||
|
||||
|
||||
@selected_path.setter
|
||||
def selected_path(self, index_path):
|
||||
if index_path is not None:
|
||||
self.selected_paths = [index_path]
|
||||
else:
|
||||
self._select_nodes([])
|
||||
|
||||
|
||||
@property
|
||||
def selected_paths(self):
|
||||
"""List of selected paths in the tree.
|
||||
|
||||
|
||||
*List of :attr:`Node.path`*. *get/set*
|
||||
|
||||
|
||||
Computed from :attr:`selected_nodes`.
|
||||
"""
|
||||
return list(map(self.get_path, self._selected_nodes))
|
||||
|
||||
|
||||
@selected_paths.setter
|
||||
def selected_paths(self, index_paths):
|
||||
nodes = []
|
||||
@ -248,4 +251,3 @@ class Tree(Node, GUIObject):
|
||||
except IndexError:
|
||||
pass
|
||||
self._select_nodes(nodes)
|
||||
|
||||
|
@ -6,15 +6,19 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
|
||||
class JobCancelled(Exception):
|
||||
"The user has cancelled the job"
|
||||
|
||||
|
||||
class JobInProgressError(Exception):
|
||||
"A job is already being performed, you can't perform more than one at the same time."
|
||||
|
||||
|
||||
class JobCountError(Exception):
|
||||
"The number of jobs started have exceeded the number of jobs allowed"
|
||||
|
||||
|
||||
class Job:
|
||||
"""Manages a job's progression and return it's progression through a callback.
|
||||
|
||||
@ -30,14 +34,15 @@ class Job:
|
||||
Another one is that nothing stops you from calling add_progress right after
|
||||
SkipJob.
|
||||
"""
|
||||
#---Magic functions
|
||||
|
||||
# ---Magic functions
|
||||
def __init__(self, job_proportions, callback):
|
||||
"""Initialize the Job with 'jobcount' jobs. Start every job with
|
||||
start_job(). Every time the job progress is updated, 'callback' is called
|
||||
'callback' takes a 'progress' int param, and a optional 'desc'
|
||||
parameter. Callback must return false if the job must be cancelled.
|
||||
"""
|
||||
if not hasattr(callback, '__call__'):
|
||||
if not hasattr(callback, "__call__"):
|
||||
raise TypeError("'callback' MUST be set when creating a Job")
|
||||
if isinstance(job_proportions, int):
|
||||
job_proportions = [1] * job_proportions
|
||||
@ -49,12 +54,12 @@ class Job:
|
||||
self._progress = 0
|
||||
self._currmax = 1
|
||||
|
||||
#---Private
|
||||
def _subjob_callback(self, progress, desc=''):
|
||||
# ---Private
|
||||
def _subjob_callback(self, progress, desc=""):
|
||||
"""This is the callback passed to children jobs.
|
||||
"""
|
||||
self.set_progress(progress, desc)
|
||||
return True #if JobCancelled has to be raised, it will be at the highest level
|
||||
return True # if JobCancelled has to be raised, it will be at the highest level
|
||||
|
||||
def _do_update(self, desc):
|
||||
"""Calls the callback function with a % progress as a parameter.
|
||||
@ -67,18 +72,18 @@ class Job:
|
||||
total_progress = self._jobcount * self._currmax
|
||||
progress = ((passed_progress + current_progress) * 100) // total_progress
|
||||
else:
|
||||
progress = -1 # indeterminate
|
||||
progress = -1 # indeterminate
|
||||
# It's possible that callback doesn't support a desc arg
|
||||
result = self._callback(progress, desc) if desc else self._callback(progress)
|
||||
if not result:
|
||||
raise JobCancelled()
|
||||
|
||||
#---Public
|
||||
def add_progress(self, progress=1, desc=''):
|
||||
# ---Public
|
||||
def add_progress(self, progress=1, desc=""):
|
||||
self.set_progress(self._progress + progress, desc)
|
||||
|
||||
def check_if_cancelled(self):
|
||||
self._do_update('')
|
||||
self._do_update("")
|
||||
|
||||
def iter_with_progress(self, iterable, desc_format=None, every=1, count=None):
|
||||
"""Iterate through ``iterable`` while automatically adding progress.
|
||||
@ -89,7 +94,7 @@ class Job:
|
||||
"""
|
||||
if count is None:
|
||||
count = len(iterable)
|
||||
desc = ''
|
||||
desc = ""
|
||||
if desc_format:
|
||||
desc = desc_format % (0, count)
|
||||
self.start_job(count, desc)
|
||||
@ -103,7 +108,7 @@ class Job:
|
||||
desc = desc_format % (count, count)
|
||||
self.set_progress(100, desc)
|
||||
|
||||
def start_job(self, max_progress=100, desc=''):
|
||||
def start_job(self, max_progress=100, desc=""):
|
||||
"""Begin work on the next job. You must not call start_job more than
|
||||
'jobcount' (in __init__) times.
|
||||
'max' is the job units you are to perform.
|
||||
@ -118,7 +123,7 @@ class Job:
|
||||
self._currmax = max(1, max_progress)
|
||||
self._do_update(desc)
|
||||
|
||||
def start_subjob(self, job_proportions, desc=''):
|
||||
def start_subjob(self, job_proportions, desc=""):
|
||||
"""Starts a sub job. Use this when you want to split a job into
|
||||
multiple smaller jobs. Pretty handy when starting a process where you
|
||||
know how many subjobs you will have, but don't know the work unit count
|
||||
@ -128,7 +133,7 @@ class Job:
|
||||
self.start_job(100, desc)
|
||||
return Job(job_proportions, self._subjob_callback)
|
||||
|
||||
def set_progress(self, progress, desc=''):
|
||||
def set_progress(self, progress, desc=""):
|
||||
"""Sets the progress of the current job to 'progress', and call the
|
||||
callback
|
||||
"""
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-11-19
|
||||
# Copyright 2011 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from threading import Thread
|
||||
@ -11,29 +11,31 @@ import sys
|
||||
|
||||
from .job import Job, JobInProgressError, JobCancelled
|
||||
|
||||
|
||||
class ThreadedJobPerformer:
|
||||
"""Run threaded jobs and track progress.
|
||||
|
||||
To run a threaded job, first create a job with _create_job(), then call _run_threaded(), with
|
||||
|
||||
To run a threaded job, first create a job with _create_job(), then call _run_threaded(), with
|
||||
your work function as a parameter.
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
j = self._create_job()
|
||||
self._run_threaded(self.some_work_func, (arg1, arg2, j))
|
||||
"""
|
||||
|
||||
_job_running = False
|
||||
last_error = None
|
||||
|
||||
#--- Protected
|
||||
|
||||
# --- Protected
|
||||
def create_job(self):
|
||||
if self._job_running:
|
||||
raise JobInProgressError()
|
||||
self.last_progress = -1
|
||||
self.last_desc = ''
|
||||
self.last_desc = ""
|
||||
self.job_cancelled = False
|
||||
return Job(1, self._update_progress)
|
||||
|
||||
|
||||
def _async_run(self, *args):
|
||||
target = args[0]
|
||||
args = tuple(args[1:])
|
||||
@ -49,24 +51,23 @@ class ThreadedJobPerformer:
|
||||
finally:
|
||||
self._job_running = False
|
||||
self.last_progress = None
|
||||
|
||||
|
||||
def reraise_if_error(self):
|
||||
"""Reraises the error that happened in the thread if any.
|
||||
|
||||
|
||||
Call this after the caller of run_threaded detected that self._job_running returned to False
|
||||
"""
|
||||
if self.last_error is not None:
|
||||
raise self.last_error.with_traceback(self.last_traceback)
|
||||
|
||||
def _update_progress(self, newprogress, newdesc=''):
|
||||
|
||||
def _update_progress(self, newprogress, newdesc=""):
|
||||
self.last_progress = newprogress
|
||||
if newdesc:
|
||||
self.last_desc = newdesc
|
||||
return not self.job_cancelled
|
||||
|
||||
|
||||
def run_threaded(self, target, args=()):
|
||||
if self._job_running:
|
||||
raise JobInProgressError()
|
||||
args = (target, ) + args
|
||||
args = (target,) + args
|
||||
Thread(target=self._async_run, args=args).start()
|
||||
|
||||
|
@ -11,17 +11,18 @@ from PyQt5.QtWidgets import QProgressDialog
|
||||
|
||||
from . import performer
|
||||
|
||||
|
||||
class Progress(QProgressDialog, performer.ThreadedJobPerformer):
|
||||
finished = pyqtSignal(['QString'])
|
||||
finished = pyqtSignal(["QString"])
|
||||
|
||||
def __init__(self, parent):
|
||||
flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint
|
||||
QProgressDialog.__init__(self, '', "Cancel", 0, 100, parent, flags)
|
||||
QProgressDialog.__init__(self, "", "Cancel", 0, 100, parent, flags)
|
||||
self.setModal(True)
|
||||
self.setAutoReset(False)
|
||||
self.setAutoClose(False)
|
||||
self._timer = QTimer()
|
||||
self._jobid = ''
|
||||
self._jobid = ""
|
||||
self._timer.timeout.connect(self.updateProgress)
|
||||
|
||||
def updateProgress(self):
|
||||
@ -44,9 +45,8 @@ class Progress(QProgressDialog, performer.ThreadedJobPerformer):
|
||||
def run(self, jobid, title, target, args=()):
|
||||
self._jobid = jobid
|
||||
self.reset()
|
||||
self.setLabelText('')
|
||||
self.setLabelText("")
|
||||
self.run_threaded(target, args)
|
||||
self.setWindowTitle(title)
|
||||
self.show()
|
||||
self._timer.start(500)
|
||||
|
||||
|
110
hscommon/loc.py
110
hscommon/loc.py
@ -7,26 +7,29 @@ import tempfile
|
||||
import polib
|
||||
|
||||
from . import pygettext
|
||||
from .util import modified_after, dedupe, ensure_folder, ensure_file
|
||||
from .build import print_and_do, ensure_empty_folder, copy
|
||||
from .util import modified_after, dedupe, ensure_folder
|
||||
from .build import print_and_do, ensure_empty_folder
|
||||
|
||||
LC_MESSAGES = 'LC_MESSAGES'
|
||||
LC_MESSAGES = "LC_MESSAGES"
|
||||
|
||||
# There isn't a 1-on-1 exact fit between .po language codes and cocoa ones
|
||||
PO2COCOA = {
|
||||
'pl_PL': 'pl',
|
||||
'pt_BR': 'pt-BR',
|
||||
'zh_CN': 'zh-Hans',
|
||||
"pl_PL": "pl",
|
||||
"pt_BR": "pt-BR",
|
||||
"zh_CN": "zh-Hans",
|
||||
}
|
||||
|
||||
COCOA2PO = {v: k for k, v in PO2COCOA.items()}
|
||||
|
||||
|
||||
def get_langs(folder):
|
||||
return [name for name in os.listdir(folder) if op.isdir(op.join(folder, name))]
|
||||
|
||||
|
||||
def files_with_ext(folder, ext):
|
||||
return [op.join(folder, fn) for fn in os.listdir(folder) if fn.endswith(ext)]
|
||||
|
||||
|
||||
def generate_pot(folders, outpath, keywords, merge=False):
|
||||
if merge and not op.exists(outpath):
|
||||
merge = False
|
||||
@ -37,21 +40,23 @@ def generate_pot(folders, outpath, keywords, merge=False):
|
||||
pyfiles = []
|
||||
for folder in folders:
|
||||
for root, dirs, filenames in os.walk(folder):
|
||||
keep = [fn for fn in filenames if fn.endswith('.py')]
|
||||
keep = [fn for fn in filenames if fn.endswith(".py")]
|
||||
pyfiles += [op.join(root, fn) for fn in keep]
|
||||
pygettext.main(pyfiles, outpath=genpath, keywords=keywords)
|
||||
if merge:
|
||||
merge_po_and_preserve(genpath, outpath)
|
||||
os.remove(genpath)
|
||||
|
||||
|
||||
def compile_all_po(base_folder):
|
||||
langs = get_langs(base_folder)
|
||||
for lang in langs:
|
||||
pofolder = op.join(base_folder, lang, LC_MESSAGES)
|
||||
pofiles = files_with_ext(pofolder, '.po')
|
||||
pofiles = files_with_ext(pofolder, ".po")
|
||||
for pofile in pofiles:
|
||||
p = polib.pofile(pofile)
|
||||
p.save_as_mofile(pofile[:-3] + '.mo')
|
||||
p.save_as_mofile(pofile[:-3] + ".mo")
|
||||
|
||||
|
||||
def merge_locale_dir(target, mergeinto):
|
||||
langs = get_langs(target)
|
||||
@ -59,22 +64,24 @@ def merge_locale_dir(target, mergeinto):
|
||||
if not op.exists(op.join(mergeinto, lang)):
|
||||
continue
|
||||
mofolder = op.join(target, lang, LC_MESSAGES)
|
||||
mofiles = files_with_ext(mofolder, '.mo')
|
||||
mofiles = files_with_ext(mofolder, ".mo")
|
||||
for mofile in mofiles:
|
||||
shutil.copy(mofile, op.join(mergeinto, lang, LC_MESSAGES))
|
||||
|
||||
|
||||
def merge_pots_into_pos(folder):
|
||||
# We're going to take all pot files in `folder` and for each lang, merge it with the po file
|
||||
# with the same name.
|
||||
potfiles = files_with_ext(folder, '.pot')
|
||||
potfiles = files_with_ext(folder, ".pot")
|
||||
for potfile in potfiles:
|
||||
refpot = polib.pofile(potfile)
|
||||
refname = op.splitext(op.basename(potfile))[0]
|
||||
for lang in get_langs(folder):
|
||||
po = polib.pofile(op.join(folder, lang, LC_MESSAGES, refname + '.po'))
|
||||
po = polib.pofile(op.join(folder, lang, LC_MESSAGES, refname + ".po"))
|
||||
po.merge(refpot)
|
||||
po.save()
|
||||
|
||||
|
||||
def merge_po_and_preserve(source, dest):
|
||||
# Merges source entries into dest, but keep old entries intact
|
||||
sourcepo = polib.pofile(source)
|
||||
@ -86,36 +93,41 @@ def merge_po_and_preserve(source, dest):
|
||||
destpo.append(entry)
|
||||
destpo.save()
|
||||
|
||||
|
||||
def normalize_all_pos(base_folder):
|
||||
"""Normalize the format of .po files in base_folder.
|
||||
|
||||
|
||||
When getting POs from external sources, such as Transifex, we end up with spurious diffs because
|
||||
of a difference in the way line wrapping is handled. It wouldn't be a big deal if it happened
|
||||
once, but these spurious diffs keep overwriting each other, and it's annoying.
|
||||
|
||||
|
||||
Our PO files will keep polib's format. Call this function to ensure that freshly pulled POs
|
||||
are of the right format before committing them.
|
||||
"""
|
||||
langs = get_langs(base_folder)
|
||||
for lang in langs:
|
||||
pofolder = op.join(base_folder, lang, LC_MESSAGES)
|
||||
pofiles = files_with_ext(pofolder, '.po')
|
||||
pofiles = files_with_ext(pofolder, ".po")
|
||||
for pofile in pofiles:
|
||||
p = polib.pofile(pofile)
|
||||
p.save()
|
||||
|
||||
#--- Cocoa
|
||||
|
||||
# --- Cocoa
|
||||
def all_lproj_paths(folder):
|
||||
return files_with_ext(folder, '.lproj')
|
||||
return files_with_ext(folder, ".lproj")
|
||||
|
||||
|
||||
def escape_cocoa_strings(s):
|
||||
return s.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
|
||||
return s.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
|
||||
|
||||
|
||||
def unescape_cocoa_strings(s):
|
||||
return s.replace('\\\\', '\\').replace('\\"', '"').replace('\\n', '\n')
|
||||
return s.replace("\\\\", "\\").replace('\\"', '"').replace("\\n", "\n")
|
||||
|
||||
|
||||
def strings2pot(target, dest):
|
||||
with open(target, 'rt', encoding='utf-8') as fp:
|
||||
with open(target, "rt", encoding="utf-8") as fp:
|
||||
contents = fp.read()
|
||||
# We're reading an en.lproj file. We only care about the righthand part of the translation.
|
||||
re_trans = re.compile(r'".*" = "(.*)";')
|
||||
@ -131,17 +143,21 @@ def strings2pot(target, dest):
|
||||
entry = polib.POEntry(msgid=s)
|
||||
po.append(entry)
|
||||
# we don't know or care about a line number so we put 0
|
||||
entry.occurrences.append((target, '0'))
|
||||
entry.occurrences.append((target, "0"))
|
||||
entry.occurrences = dedupe(entry.occurrences)
|
||||
po.save(dest)
|
||||
|
||||
|
||||
def allstrings2pot(lprojpath, dest, excludes=None):
|
||||
allstrings = files_with_ext(lprojpath, '.strings')
|
||||
allstrings = files_with_ext(lprojpath, ".strings")
|
||||
if excludes:
|
||||
allstrings = [p for p in allstrings if op.splitext(op.basename(p))[0] not in excludes]
|
||||
allstrings = [
|
||||
p for p in allstrings if op.splitext(op.basename(p))[0] not in excludes
|
||||
]
|
||||
for strings_path in allstrings:
|
||||
strings2pot(strings_path, dest)
|
||||
|
||||
|
||||
def po2strings(pofile, en_strings, dest):
|
||||
# Takes en_strings and replace all righthand parts of "foo" = "bar"; entries with translations
|
||||
# in pofile, then puts the result in dest.
|
||||
@ -150,9 +166,10 @@ def po2strings(pofile, en_strings, dest):
|
||||
return
|
||||
ensure_folder(op.dirname(dest))
|
||||
print("Creating {} from {}".format(dest, pofile))
|
||||
with open(en_strings, 'rt', encoding='utf-8') as fp:
|
||||
with open(en_strings, "rt", encoding="utf-8") as fp:
|
||||
contents = fp.read()
|
||||
re_trans = re.compile(r'(?<= = ").*(?=";\n)')
|
||||
|
||||
def repl(match):
|
||||
s = match.group(0)
|
||||
unescaped = unescape_cocoa_strings(s)
|
||||
@ -162,10 +179,12 @@ def po2strings(pofile, en_strings, dest):
|
||||
return s
|
||||
trans = entry.msgstr
|
||||
return escape_cocoa_strings(trans) if trans else s
|
||||
|
||||
contents = re_trans.sub(repl, contents)
|
||||
with open(dest, 'wt', encoding='utf-8') as fp:
|
||||
with open(dest, "wt", encoding="utf-8") as fp:
|
||||
fp.write(contents)
|
||||
|
||||
|
||||
def generate_cocoa_strings_from_code(code_folder, dest_folder):
|
||||
# Uses the "genstrings" command to generate strings file from all .m files in "code_folder".
|
||||
# The strings file (their name depends on the localization table used in the source) will be
|
||||
@ -173,36 +192,49 @@ def generate_cocoa_strings_from_code(code_folder, dest_folder):
|
||||
# genstrings produces utf-16 files with comments. After having generated the files, we convert
|
||||
# them to utf-8 and remove the comments.
|
||||
ensure_empty_folder(dest_folder)
|
||||
print_and_do('genstrings -o "{}" `find "{}" -name *.m | xargs`'.format(dest_folder, code_folder))
|
||||
print_and_do(
|
||||
'genstrings -o "{}" `find "{}" -name *.m | xargs`'.format(
|
||||
dest_folder, code_folder
|
||||
)
|
||||
)
|
||||
for stringsfile in os.listdir(dest_folder):
|
||||
stringspath = op.join(dest_folder, stringsfile)
|
||||
with open(stringspath, 'rt', encoding='utf-16') as fp:
|
||||
with open(stringspath, "rt", encoding="utf-16") as fp:
|
||||
content = fp.read()
|
||||
content = re.sub('/\*.*?\*/', '', content)
|
||||
content = re.sub('\n{2,}', '\n', content)
|
||||
content = re.sub(r"/\*.*?\*/", "", content)
|
||||
content = re.sub(r"\n{2,}", "\n", content)
|
||||
# I have no idea why, but genstrings seems to have problems with "%" character in strings
|
||||
# and inserts (number)$ after it. Find these bogus inserts and remove them.
|
||||
content = re.sub('%\d\$', '%', content)
|
||||
with open(stringspath, 'wt', encoding='utf-8') as fp:
|
||||
content = re.sub(r"%\d\$", "%", content)
|
||||
with open(stringspath, "wt", encoding="utf-8") as fp:
|
||||
fp.write(content)
|
||||
|
||||
|
||||
def generate_cocoa_strings_from_xib(xib_folder):
|
||||
xibs = [op.join(xib_folder, fn) for fn in os.listdir(xib_folder) if fn.endswith('.xib')]
|
||||
xibs = [
|
||||
op.join(xib_folder, fn) for fn in os.listdir(xib_folder) if fn.endswith(".xib")
|
||||
]
|
||||
for xib in xibs:
|
||||
dest = xib.replace('.xib', '.strings')
|
||||
print_and_do('ibtool {} --generate-strings-file {}'.format(xib, dest))
|
||||
print_and_do('iconv -f utf-16 -t utf-8 {0} | tee {0}'.format(dest))
|
||||
dest = xib.replace(".xib", ".strings")
|
||||
print_and_do("ibtool {} --generate-strings-file {}".format(xib, dest))
|
||||
print_and_do("iconv -f utf-16 -t utf-8 {0} | tee {0}".format(dest))
|
||||
|
||||
|
||||
def localize_stringsfile(stringsfile, dest_root_folder):
|
||||
stringsfile_name = op.basename(stringsfile)
|
||||
for lang in get_langs('locale'):
|
||||
pofile = op.join('locale', lang, 'LC_MESSAGES', 'ui.po')
|
||||
for lang in get_langs("locale"):
|
||||
pofile = op.join("locale", lang, "LC_MESSAGES", "ui.po")
|
||||
cocoa_lang = PO2COCOA.get(lang, lang)
|
||||
dest_lproj = op.join(dest_root_folder, cocoa_lang + '.lproj')
|
||||
dest_lproj = op.join(dest_root_folder, cocoa_lang + ".lproj")
|
||||
ensure_folder(dest_lproj)
|
||||
po2strings(pofile, stringsfile, op.join(dest_lproj, stringsfile_name))
|
||||
|
||||
|
||||
def localize_all_stringsfiles(src_folder, dest_root_folder):
|
||||
stringsfiles = [op.join(src_folder, fn) for fn in os.listdir(src_folder) if fn.endswith('.strings')]
|
||||
stringsfiles = [
|
||||
op.join(src_folder, fn)
|
||||
for fn in os.listdir(src_folder)
|
||||
if fn.endswith(".strings")
|
||||
]
|
||||
for path in stringsfiles:
|
||||
localize_stringsfile(path, dest_root_folder)
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
"""Very simple inter-object notification system.
|
||||
@ -14,55 +14,58 @@ the method with the same name as the broadcasted message is called on the listen
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class Broadcaster:
|
||||
"""Broadcasts messages that are received by all listeners.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.listeners = set()
|
||||
|
||||
|
||||
def add_listener(self, listener):
|
||||
self.listeners.add(listener)
|
||||
|
||||
|
||||
def notify(self, msg):
|
||||
"""Notify all connected listeners of ``msg``.
|
||||
|
||||
|
||||
That means that each listeners will have their method with the same name as ``msg`` called.
|
||||
"""
|
||||
for listener in self.listeners.copy(): # listeners can change during iteration
|
||||
if listener in self.listeners: # disconnected during notification
|
||||
for listener in self.listeners.copy(): # listeners can change during iteration
|
||||
if listener in self.listeners: # disconnected during notification
|
||||
listener.dispatch(msg)
|
||||
|
||||
|
||||
def remove_listener(self, listener):
|
||||
self.listeners.discard(listener)
|
||||
|
||||
|
||||
|
||||
class Listener:
|
||||
"""A listener is initialized with the broadcaster it's going to listen to. Initially, it is not connected.
|
||||
"""
|
||||
|
||||
def __init__(self, broadcaster):
|
||||
self.broadcaster = broadcaster
|
||||
self._bound_notifications = defaultdict(list)
|
||||
|
||||
|
||||
def bind_messages(self, messages, func):
|
||||
"""Binds multiple message to the same function.
|
||||
|
||||
|
||||
Often, we perform the same thing on multiple messages. Instead of having the same function
|
||||
repeated again and agin in our class, we can use this method to bind multiple messages to
|
||||
the same function.
|
||||
"""
|
||||
for message in messages:
|
||||
self._bound_notifications[message].append(func)
|
||||
|
||||
|
||||
def connect(self):
|
||||
"""Connects the listener to its broadcaster.
|
||||
"""
|
||||
self.broadcaster.add_listener(self)
|
||||
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnects the listener from its broadcaster.
|
||||
"""
|
||||
self.broadcaster.remove_listener(self)
|
||||
|
||||
|
||||
def dispatch(self, msg):
|
||||
if msg in self._bound_notifications:
|
||||
for func in self._bound_notifications[msg]:
|
||||
@ -70,20 +73,19 @@ class Listener:
|
||||
if hasattr(self, msg):
|
||||
method = getattr(self, msg)
|
||||
method()
|
||||
|
||||
|
||||
|
||||
class Repeater(Broadcaster, Listener):
|
||||
REPEATED_NOTIFICATIONS = None
|
||||
|
||||
|
||||
def __init__(self, broadcaster):
|
||||
Broadcaster.__init__(self)
|
||||
Listener.__init__(self, broadcaster)
|
||||
|
||||
|
||||
def _repeat_message(self, msg):
|
||||
if not self.REPEATED_NOTIFICATIONS or msg in self.REPEATED_NOTIFICATIONS:
|
||||
self.notify(msg)
|
||||
|
||||
|
||||
def dispatch(self, msg):
|
||||
Listener.dispatch(self, msg)
|
||||
self._repeat_message(msg)
|
||||
|
||||
|
109
hscommon/path.py
109
hscommon/path.py
@ -2,8 +2,8 @@
|
||||
# Created On: 2006/02/21
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import logging
|
||||
@ -15,19 +15,21 @@ from itertools import takewhile
|
||||
from functools import wraps
|
||||
from inspect import signature
|
||||
|
||||
|
||||
class Path(tuple):
|
||||
"""A handy class to work with paths.
|
||||
|
||||
|
||||
We subclass ``tuple``, each element of the tuple represents an element of the path.
|
||||
|
||||
|
||||
* ``Path('/foo/bar/baz')[1]`` --> ``'bar'``
|
||||
* ``Path('/foo/bar/baz')[1:2]`` --> ``Path('bar/baz')``
|
||||
* ``Path('/foo/bar')['baz']`` --> ``Path('/foo/bar/baz')``
|
||||
* ``str(Path('/foo/bar/baz'))`` --> ``'/foo/bar/baz'``
|
||||
"""
|
||||
|
||||
# Saves a little bit of memory usage
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
def __new__(cls, value, separator=None):
|
||||
def unicode_if_needed(s):
|
||||
if isinstance(s, str):
|
||||
@ -38,7 +40,7 @@ class Path(tuple):
|
||||
except UnicodeDecodeError:
|
||||
logging.warning("Could not decode %r", s)
|
||||
raise
|
||||
|
||||
|
||||
if isinstance(value, Path):
|
||||
return value
|
||||
if not separator:
|
||||
@ -47,44 +49,53 @@ class Path(tuple):
|
||||
value = unicode_if_needed(value)
|
||||
if isinstance(value, str):
|
||||
if value:
|
||||
if (separator not in value) and ('/' in value):
|
||||
separator = '/'
|
||||
if (separator not in value) and ("/" in value):
|
||||
separator = "/"
|
||||
value = value.split(separator)
|
||||
else:
|
||||
value = ()
|
||||
else:
|
||||
if any(isinstance(x, bytes) for x in value):
|
||||
value = [unicode_if_needed(x) for x in value]
|
||||
#value is a tuple/list
|
||||
# value is a tuple/list
|
||||
if any(separator in x for x in value):
|
||||
#We have a component with a separator in it. Let's rejoin it, and generate another path.
|
||||
# We have a component with a separator in it. Let's rejoin it, and generate another path.
|
||||
return Path(separator.join(value), separator)
|
||||
if (len(value) > 1) and (not value[-1]):
|
||||
value = value[:-1] #We never want a path to end with a '' (because Path() can be called with a trailing slash ending path)
|
||||
value = value[
|
||||
:-1
|
||||
] # We never want a path to end with a '' (because Path() can be called with a trailing slash ending path)
|
||||
return tuple.__new__(cls, value)
|
||||
|
||||
|
||||
def __add__(self, other):
|
||||
other = Path(other)
|
||||
if other and (not other[0]):
|
||||
other = other[1:]
|
||||
return Path(tuple.__add__(self, other))
|
||||
|
||||
|
||||
def __contains__(self, item):
|
||||
if isinstance(item, Path):
|
||||
return item[:len(self)] == self
|
||||
return item[: len(self)] == self
|
||||
else:
|
||||
return tuple.__contains__(self, item)
|
||||
|
||||
|
||||
def __eq__(self, other):
|
||||
return tuple.__eq__(self, Path(other))
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, slice):
|
||||
if isinstance(key.start, Path):
|
||||
equal_elems = list(takewhile(lambda pair: pair[0] == pair[1], zip(self, key.start)))
|
||||
equal_elems = list(
|
||||
takewhile(lambda pair: pair[0] == pair[1], zip(self, key.start))
|
||||
)
|
||||
key = slice(len(equal_elems), key.stop, key.step)
|
||||
if isinstance(key.stop, Path):
|
||||
equal_elems = list(takewhile(lambda pair: pair[0] == pair[1], zip(reversed(self), reversed(key.stop))))
|
||||
equal_elems = list(
|
||||
takewhile(
|
||||
lambda pair: pair[0] == pair[1],
|
||||
zip(reversed(self), reversed(key.stop)),
|
||||
)
|
||||
)
|
||||
stop = -len(equal_elems) if equal_elems else None
|
||||
key = slice(key.start, stop, key.step)
|
||||
return Path(tuple.__getitem__(self, key))
|
||||
@ -92,31 +103,31 @@ class Path(tuple):
|
||||
return self + key
|
||||
else:
|
||||
return tuple.__getitem__(self, key)
|
||||
|
||||
|
||||
def __hash__(self):
|
||||
return tuple.__hash__(self)
|
||||
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
def __radd__(self, other):
|
||||
return Path(other) + self
|
||||
|
||||
|
||||
def __str__(self):
|
||||
if len(self) == 1:
|
||||
first = self[0]
|
||||
if (len(first) == 2) and (first[1] == ':'): #Windows drive letter
|
||||
return first + '\\'
|
||||
elif not len(first): #root directory
|
||||
return '/'
|
||||
if (len(first) == 2) and (first[1] == ":"): # Windows drive letter
|
||||
return first + "\\"
|
||||
elif not len(first): # root directory
|
||||
return "/"
|
||||
return os.sep.join(self)
|
||||
|
||||
|
||||
def has_drive_letter(self):
|
||||
if not self:
|
||||
return False
|
||||
first = self[0]
|
||||
return (len(first) == 2) and (first[1] == ':')
|
||||
|
||||
return (len(first) == 2) and (first[1] == ":")
|
||||
|
||||
def is_parent_of(self, other):
|
||||
"""Whether ``other`` is a subpath of ``self``.
|
||||
|
||||
@ -133,29 +144,29 @@ class Path(tuple):
|
||||
return self[1:]
|
||||
else:
|
||||
return self
|
||||
|
||||
|
||||
def tobytes(self):
|
||||
return str(self).encode(sys.getfilesystemencoding())
|
||||
|
||||
|
||||
def parent(self):
|
||||
"""Returns the parent path.
|
||||
|
||||
|
||||
``Path('/foo/bar/baz').parent()`` --> ``Path('/foo/bar')``
|
||||
"""
|
||||
return self[:-1]
|
||||
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Last element of the path (filename), with extension.
|
||||
|
||||
|
||||
``Path('/foo/bar/baz').name`` --> ``'baz'``
|
||||
"""
|
||||
return self[-1]
|
||||
|
||||
|
||||
# OS method wrappers
|
||||
def exists(self):
|
||||
return op.exists(str(self))
|
||||
|
||||
|
||||
def copy(self, dest_path):
|
||||
return shutil.copy(str(self), str(dest_path))
|
||||
|
||||
@ -200,36 +211,44 @@ class Path(tuple):
|
||||
|
||||
def stat(self):
|
||||
return os.stat(str(self))
|
||||
|
||||
|
||||
|
||||
def pathify(f):
|
||||
"""Ensure that every annotated :class:`Path` arguments are actually paths.
|
||||
|
||||
|
||||
When a function is decorated with ``@pathify``, every argument with annotated as Path will be
|
||||
converted to a Path if it wasn't already. Example::
|
||||
|
||||
|
||||
@pathify
|
||||
def foo(path: Path, otherarg):
|
||||
return path.listdir()
|
||||
|
||||
|
||||
Calling ``foo('/bar', 0)`` will convert ``'/bar'`` to ``Path('/bar')``.
|
||||
"""
|
||||
sig = signature(f)
|
||||
pindexes = {i for i, p in enumerate(sig.parameters.values()) if p.annotation is Path}
|
||||
pindexes = {
|
||||
i for i, p in enumerate(sig.parameters.values()) if p.annotation is Path
|
||||
}
|
||||
pkeys = {k: v for k, v in sig.parameters.items() if v.annotation is Path}
|
||||
|
||||
def path_or_none(p):
|
||||
return None if p is None else Path(p)
|
||||
|
||||
|
||||
@wraps(f)
|
||||
def wrapped(*args, **kwargs):
|
||||
args = tuple((path_or_none(a) if i in pindexes else a) for i, a in enumerate(args))
|
||||
args = tuple(
|
||||
(path_or_none(a) if i in pindexes else a) for i, a in enumerate(args)
|
||||
)
|
||||
kwargs = {k: (path_or_none(v) if k in pkeys else v) for k, v in kwargs.items()}
|
||||
return f(*args, **kwargs)
|
||||
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def log_io_error(func):
|
||||
""" Catches OSError, IOError and WindowsError and log them
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(path, *args, **kwargs):
|
||||
try:
|
||||
@ -239,5 +258,5 @@ def log_io_error(func):
|
||||
classname = e.__class__.__name__
|
||||
funcname = func.__name__
|
||||
logging.warn(msg.format(classname, funcname, str(path), str(e)))
|
||||
|
||||
|
||||
return wrapper
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Created On: 2011/09/22
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
# Yes, I know, there's the 'platform' unit for this kind of stuff, but the thing is that I got a
|
||||
@ -11,6 +11,6 @@
|
||||
|
||||
import sys
|
||||
|
||||
ISWINDOWS = sys.platform == 'win32'
|
||||
ISOSX = sys.platform == 'darwin'
|
||||
ISLINUX = sys.platform.startswith('linux')
|
||||
ISWINDOWS = sys.platform == "win32"
|
||||
ISOSX = sys.platform == "darwin"
|
||||
ISLINUX = sys.platform.startswith("linux")
|
||||
|
@ -18,20 +18,17 @@ import os
|
||||
import imp
|
||||
import sys
|
||||
import glob
|
||||
import time
|
||||
import token
|
||||
import tokenize
|
||||
import operator
|
||||
|
||||
__version__ = '1.5'
|
||||
__version__ = "1.5"
|
||||
|
||||
default_keywords = ['_']
|
||||
DEFAULTKEYWORDS = ', '.join(default_keywords)
|
||||
default_keywords = ["_"]
|
||||
DEFAULTKEYWORDS = ", ".join(default_keywords)
|
||||
|
||||
EMPTYSTRING = ''
|
||||
EMPTYSTRING = ""
|
||||
|
||||
|
||||
|
||||
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
|
||||
# there.
|
||||
pot_header = """
|
||||
@ -41,17 +38,17 @@ msgstr ""
|
||||
"Content-Transfer-Encoding: utf-8\\n"
|
||||
"""
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
|
||||
def usage(code, msg=""):
|
||||
print(__doc__ % globals(), file=sys.stderr)
|
||||
if msg:
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
||||
escapes = []
|
||||
|
||||
|
||||
def make_escapes(pass_iso8859):
|
||||
global escapes
|
||||
if pass_iso8859:
|
||||
@ -66,11 +63,11 @@ def make_escapes(pass_iso8859):
|
||||
escapes.append(chr(i))
|
||||
else:
|
||||
escapes.append("\\%03o" % i)
|
||||
escapes[ord('\\')] = '\\\\'
|
||||
escapes[ord('\t')] = '\\t'
|
||||
escapes[ord('\r')] = '\\r'
|
||||
escapes[ord('\n')] = '\\n'
|
||||
escapes[ord('\"')] = '\\"'
|
||||
escapes[ord("\\")] = "\\\\"
|
||||
escapes[ord("\t")] = "\\t"
|
||||
escapes[ord("\r")] = "\\r"
|
||||
escapes[ord("\n")] = "\\n"
|
||||
escapes[ord('"')] = '\\"'
|
||||
|
||||
|
||||
def escape(s):
|
||||
@ -83,26 +80,26 @@ def escape(s):
|
||||
|
||||
def safe_eval(s):
|
||||
# unwrap quotes, safely
|
||||
return eval(s, {'__builtins__':{}}, {})
|
||||
return eval(s, {"__builtins__": {}}, {})
|
||||
|
||||
|
||||
def normalize(s):
|
||||
# This converts the various Python string types into a format that is
|
||||
# appropriate for .po files, namely much closer to C style.
|
||||
lines = s.split('\n')
|
||||
lines = s.split("\n")
|
||||
if len(lines) == 1:
|
||||
s = '"' + escape(s) + '"'
|
||||
else:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
lines[-1] = lines[-1] + "\n"
|
||||
for i in range(len(lines)):
|
||||
lines[i] = escape(lines[i])
|
||||
lineterm = '\\n"\n"'
|
||||
s = '""\n"' + lineterm.join(lines) + '"'
|
||||
return s
|
||||
|
||||
|
||||
|
||||
def containsAny(str, set):
|
||||
"""Check whether 'str' contains ANY of the chars in 'set'"""
|
||||
return 1 in [c in str for c in set]
|
||||
@ -111,20 +108,24 @@ def containsAny(str, set):
|
||||
def _visit_pyfiles(list, dirname, names):
|
||||
"""Helper for getFilesForName()."""
|
||||
# get extension for python source files
|
||||
if '_py_ext' not in globals():
|
||||
if "_py_ext" not in globals():
|
||||
global _py_ext
|
||||
_py_ext = [triple[0] for triple in imp.get_suffixes()
|
||||
if triple[2] == imp.PY_SOURCE][0]
|
||||
_py_ext = [
|
||||
triple[0] for triple in imp.get_suffixes() if triple[2] == imp.PY_SOURCE
|
||||
][0]
|
||||
|
||||
# don't recurse into CVS directories
|
||||
if 'CVS' in names:
|
||||
names.remove('CVS')
|
||||
if "CVS" in names:
|
||||
names.remove("CVS")
|
||||
|
||||
# add all *.py files to list
|
||||
list.extend(
|
||||
[os.path.join(dirname, file) for file in names
|
||||
if os.path.splitext(file)[1] == _py_ext]
|
||||
)
|
||||
[
|
||||
os.path.join(dirname, file)
|
||||
for file in names
|
||||
if os.path.splitext(file)[1] == _py_ext
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
@ -135,13 +136,14 @@ def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
extension module.
|
||||
"""
|
||||
# split off top-most name
|
||||
parts = dotted_name.split('.', 1)
|
||||
parts = dotted_name.split(".", 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
# we have a dotted path, import top-level package
|
||||
try:
|
||||
file, pathname, description = imp.find_module(parts[0], pathlist)
|
||||
if file: file.close()
|
||||
if file:
|
||||
file.close()
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@ -154,8 +156,7 @@ def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
else:
|
||||
# plain name
|
||||
try:
|
||||
file, pathname, description = imp.find_module(
|
||||
dotted_name, pathlist)
|
||||
file, pathname, description = imp.find_module(dotted_name, pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
|
||||
@ -195,7 +196,7 @@ def getFilesForName(name):
|
||||
|
||||
return []
|
||||
|
||||
|
||||
|
||||
class TokenEater:
|
||||
def __init__(self, options):
|
||||
self.__options = options
|
||||
@ -208,9 +209,9 @@ class TokenEater:
|
||||
|
||||
def __call__(self, ttype, tstring, stup, etup, line):
|
||||
# dispatch
|
||||
## import token
|
||||
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
|
||||
## 'tstring:', tstring
|
||||
# import token
|
||||
# print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
|
||||
# 'tstring:', tstring
|
||||
self.__state(ttype, tstring, stup[0])
|
||||
|
||||
def __waiting(self, ttype, tstring, lineno):
|
||||
@ -226,7 +227,7 @@ class TokenEater:
|
||||
self.__freshmodule = 0
|
||||
return
|
||||
# class docstring?
|
||||
if ttype == tokenize.NAME and tstring in ('class', 'def'):
|
||||
if ttype == tokenize.NAME and tstring in ("class", "def"):
|
||||
self.__state = self.__suiteseen
|
||||
return
|
||||
if ttype == tokenize.NAME and tstring in opts.keywords:
|
||||
@ -234,7 +235,7 @@ class TokenEater:
|
||||
|
||||
def __suiteseen(self, ttype, tstring, lineno):
|
||||
# ignore anything until we see the colon
|
||||
if ttype == tokenize.OP and tstring == ':':
|
||||
if ttype == tokenize.OP and tstring == ":":
|
||||
self.__state = self.__suitedocstring
|
||||
|
||||
def __suitedocstring(self, ttype, tstring, lineno):
|
||||
@ -242,13 +243,12 @@ class TokenEater:
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__state = self.__waiting
|
||||
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
|
||||
tokenize.COMMENT):
|
||||
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, tokenize.COMMENT):
|
||||
# there was no class docstring
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __keywordseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == '(':
|
||||
if ttype == tokenize.OP and tstring == "(":
|
||||
self.__data = []
|
||||
self.__lineno = lineno
|
||||
self.__state = self.__openseen
|
||||
@ -256,7 +256,7 @@ class TokenEater:
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __openseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == ')':
|
||||
if ttype == tokenize.OP and tstring == ")":
|
||||
# We've seen the last of the translatable strings. Record the
|
||||
# line number of the first line of the strings and update the list
|
||||
# of messages seen. Reset state for the next batch. If there
|
||||
@ -266,20 +266,25 @@ class TokenEater:
|
||||
self.__state = self.__waiting
|
||||
elif ttype == tokenize.STRING:
|
||||
self.__data.append(safe_eval(tstring))
|
||||
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
|
||||
token.NEWLINE, tokenize.NL]:
|
||||
elif ttype not in [
|
||||
tokenize.COMMENT,
|
||||
token.INDENT,
|
||||
token.DEDENT,
|
||||
token.NEWLINE,
|
||||
tokenize.NL,
|
||||
]:
|
||||
# warn if we see anything else than STRING or whitespace
|
||||
print('*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' % {
|
||||
'token': tstring,
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno
|
||||
}, file=sys.stderr)
|
||||
print(
|
||||
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
|
||||
% {"token": tstring, "file": self.__curfile, "lineno": self.__lineno},
|
||||
file=sys.stderr,
|
||||
)
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __addentry(self, msg, lineno=None, isdocstring=0):
|
||||
if lineno is None:
|
||||
lineno = self.__lineno
|
||||
if not msg in self.__options.toexclude:
|
||||
if msg not in self.__options.toexclude:
|
||||
entry = (self.__curfile, lineno)
|
||||
self.__messages.setdefault(msg, {})[entry] = isdocstring
|
||||
|
||||
@ -289,7 +294,6 @@ class TokenEater:
|
||||
|
||||
def write(self, fp):
|
||||
options = self.__options
|
||||
timestamp = time.strftime('%Y-%m-%d %H:%M+%Z')
|
||||
# The time stamp in the header doesn't have the same format as that
|
||||
# generated by xgettext...
|
||||
print(pot_header, file=fp)
|
||||
@ -317,15 +321,15 @@ class TokenEater:
|
||||
# location comments are different b/w Solaris and GNU:
|
||||
elif options.locationstyle == options.SOLARIS:
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
print('# File: %(filename)s, line: %(lineno)d' % d, file=fp)
|
||||
d = {"filename": filename, "lineno": lineno}
|
||||
print("# File: %(filename)s, line: %(lineno)d" % d, file=fp)
|
||||
elif options.locationstyle == options.GNU:
|
||||
# fit as many locations on one line, as long as the
|
||||
# resulting line length doesn't exceeds 'options.width'
|
||||
locline = '#:'
|
||||
locline = "#:"
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
s = ' %(filename)s:%(lineno)d' % d
|
||||
d = {"filename": filename, "lineno": lineno}
|
||||
s = " %(filename)s:%(lineno)d" % d
|
||||
if len(locline) + len(s) <= options.width:
|
||||
locline = locline + s
|
||||
else:
|
||||
@ -334,37 +338,34 @@ class TokenEater:
|
||||
if len(locline) > 2:
|
||||
print(locline, file=fp)
|
||||
if isdocstring:
|
||||
print('#, docstring', file=fp)
|
||||
print('msgid', normalize(k), file=fp)
|
||||
print("#, docstring", file=fp)
|
||||
print("msgid", normalize(k), file=fp)
|
||||
print('msgstr ""\n', file=fp)
|
||||
|
||||
|
||||
|
||||
def main(source_files, outpath, keywords=None):
|
||||
global default_keywords
|
||||
|
||||
# for holding option values
|
||||
class Options:
|
||||
# constants
|
||||
GNU = 1
|
||||
SOLARIS = 2
|
||||
# defaults
|
||||
extractall = 0 # FIXME: currently this option has no effect at all.
|
||||
extractall = 0 # FIXME: currently this option has no effect at all.
|
||||
escape = 0
|
||||
keywords = []
|
||||
outfile = 'messages.pot'
|
||||
outfile = "messages.pot"
|
||||
writelocations = 1
|
||||
locationstyle = GNU
|
||||
verbose = 0
|
||||
width = 78
|
||||
excludefilename = ''
|
||||
excludefilename = ""
|
||||
docstrings = 0
|
||||
nodocstrings = {}
|
||||
|
||||
options = Options()
|
||||
locations = {'gnu' : options.GNU,
|
||||
'solaris' : options.SOLARIS,
|
||||
}
|
||||
|
||||
|
||||
options.outfile = outpath
|
||||
if keywords:
|
||||
options.keywords = keywords
|
||||
@ -378,11 +379,14 @@ def main(source_files, outpath, keywords=None):
|
||||
# initialize list of strings to exclude
|
||||
if options.excludefilename:
|
||||
try:
|
||||
fp = open(options.excludefilename, encoding='utf-8')
|
||||
fp = open(options.excludefilename, encoding="utf-8")
|
||||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
print("Can't read --exclude-file: %s" % options.excludefilename, file=sys.stderr)
|
||||
print(
|
||||
"Can't read --exclude-file: %s" % options.excludefilename,
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
options.toexclude = []
|
||||
@ -391,8 +395,8 @@ def main(source_files, outpath, keywords=None):
|
||||
eater = TokenEater(options)
|
||||
for filename in source_files:
|
||||
if options.verbose:
|
||||
print('Working on %s' % filename)
|
||||
fp = open(filename, encoding='utf-8')
|
||||
print("Working on %s" % filename)
|
||||
fp = open(filename, encoding="utf-8")
|
||||
closep = 1
|
||||
try:
|
||||
eater.set_filename(filename)
|
||||
@ -401,14 +405,16 @@ def main(source_files, outpath, keywords=None):
|
||||
for _token in tokens:
|
||||
eater(*_token)
|
||||
except tokenize.TokenError as e:
|
||||
print('%s: %s, line %d, column %d' % (
|
||||
e.args[0], filename, e.args[1][0], e.args[1][1]),
|
||||
file=sys.stderr)
|
||||
print(
|
||||
"%s: %s, line %d, column %d"
|
||||
% (e.args[0], filename, e.args[1][0], e.args[1][1]),
|
||||
file=sys.stderr,
|
||||
)
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
fp = open(options.outfile, 'w', encoding='utf-8')
|
||||
fp = open(options.outfile, "w", encoding="utf-8")
|
||||
closep = 1
|
||||
try:
|
||||
eater.write(fp)
|
||||
|
@ -19,16 +19,28 @@ CHANGELOG_FORMAT = """
|
||||
{description}
|
||||
"""
|
||||
|
||||
|
||||
def tixgen(tixurl):
|
||||
"""This is a filter *generator*. tixurl is a url pattern for the tix with a {0} placeholder
|
||||
for the tix #
|
||||
"""
|
||||
urlpattern = tixurl.format('\\1') # will be replaced buy the content of the first group in re
|
||||
R = re.compile(r'#(\d+)')
|
||||
repl = '`#\\1 <{}>`__'.format(urlpattern)
|
||||
urlpattern = tixurl.format(
|
||||
"\\1"
|
||||
) # will be replaced buy the content of the first group in re
|
||||
R = re.compile(r"#(\d+)")
|
||||
repl = "`#\\1 <{}>`__".format(urlpattern)
|
||||
return lambda text: R.sub(repl, text)
|
||||
|
||||
def gen(basepath, destpath, changelogpath, tixurl, confrepl=None, confpath=None, changelogtmpl=None):
|
||||
|
||||
def gen(
|
||||
basepath,
|
||||
destpath,
|
||||
changelogpath,
|
||||
tixurl,
|
||||
confrepl=None,
|
||||
confpath=None,
|
||||
changelogtmpl=None,
|
||||
):
|
||||
"""Generate sphinx docs with all bells and whistles.
|
||||
|
||||
basepath: The base sphinx source path.
|
||||
@ -40,41 +52,47 @@ def gen(basepath, destpath, changelogpath, tixurl, confrepl=None, confpath=None,
|
||||
if confrepl is None:
|
||||
confrepl = {}
|
||||
if confpath is None:
|
||||
confpath = op.join(basepath, 'conf.tmpl')
|
||||
confpath = op.join(basepath, "conf.tmpl")
|
||||
if changelogtmpl is None:
|
||||
changelogtmpl = op.join(basepath, 'changelog.tmpl')
|
||||
changelogtmpl = op.join(basepath, "changelog.tmpl")
|
||||
changelog = read_changelog_file(changelogpath)
|
||||
tix = tixgen(tixurl)
|
||||
rendered_logs = []
|
||||
for log in changelog:
|
||||
description = tix(log['description'])
|
||||
description = tix(log["description"])
|
||||
# The format of the changelog descriptions is in markdown, but since we only use bulled list
|
||||
# and links, it's not worth depending on the markdown package. A simple regexp suffice.
|
||||
description = re.sub(r'\[(.*?)\]\((.*?)\)', '`\\1 <\\2>`__', description)
|
||||
rendered = CHANGELOG_FORMAT.format(version=log['version'], date=log['date_str'],
|
||||
description=description)
|
||||
description = re.sub(r"\[(.*?)\]\((.*?)\)", "`\\1 <\\2>`__", description)
|
||||
rendered = CHANGELOG_FORMAT.format(
|
||||
version=log["version"], date=log["date_str"], description=description
|
||||
)
|
||||
rendered_logs.append(rendered)
|
||||
confrepl['version'] = changelog[0]['version']
|
||||
changelog_out = op.join(basepath, 'changelog.rst')
|
||||
filereplace(changelogtmpl, changelog_out, changelog='\n'.join(rendered_logs))
|
||||
confrepl["version"] = changelog[0]["version"]
|
||||
changelog_out = op.join(basepath, "changelog.rst")
|
||||
filereplace(changelogtmpl, changelog_out, changelog="\n".join(rendered_logs))
|
||||
if op.exists(confpath):
|
||||
conf_out = op.join(basepath, 'conf.py')
|
||||
conf_out = op.join(basepath, "conf.py")
|
||||
filereplace(confpath, conf_out, **confrepl)
|
||||
if LooseVersion(get_distribution("sphinx").version) >= LooseVersion("1.7.0"):
|
||||
from sphinx.cmd.build import build_main as sphinx_build
|
||||
|
||||
# Call the sphinx_build function, which is the same as doing sphinx-build from cli
|
||||
try:
|
||||
sphinx_build([basepath, destpath])
|
||||
except SystemExit:
|
||||
print("Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit")
|
||||
print(
|
||||
"Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit"
|
||||
)
|
||||
else:
|
||||
# We used to call sphinx-build with print_and_do(), but the problem was that the virtualenv
|
||||
# of the calling python wasn't correctly considered and caused problems with documentation
|
||||
# relying on autodoc (which tries to import the module to auto-document, but fail because of
|
||||
# missing dependencies which are in the virtualenv). Here, we do exactly what is done when
|
||||
# calling the command from bash.
|
||||
cmd = load_entry_point('Sphinx', 'console_scripts', 'sphinx-build')
|
||||
cmd = load_entry_point("Sphinx", "console_scripts", "sphinx-build")
|
||||
try:
|
||||
cmd(['sphinx-build', basepath, destpath])
|
||||
cmd(["sphinx-build", basepath, destpath])
|
||||
except SystemExit:
|
||||
print("Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit")
|
||||
print(
|
||||
"Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit"
|
||||
)
|
||||
|
@ -2,39 +2,39 @@
|
||||
# Created On: 2007/05/19
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path as op
|
||||
import threading
|
||||
from queue import Queue
|
||||
import time
|
||||
import sqlite3 as sqlite
|
||||
|
||||
STOP = object()
|
||||
COMMIT = object()
|
||||
ROLLBACK = object()
|
||||
|
||||
|
||||
class FakeCursor(list):
|
||||
# It's not possible to use sqlite cursors on another thread than the connection. Thus,
|
||||
# we can't directly return the cursor. We have to fatch all results, and support its interface.
|
||||
def fetchall(self):
|
||||
return self
|
||||
|
||||
|
||||
def fetchone(self):
|
||||
try:
|
||||
return self.pop(0)
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
class _ActualThread(threading.Thread):
|
||||
''' We can't use this class directly because thread object are not automatically freed when
|
||||
""" We can't use this class directly because thread object are not automatically freed when
|
||||
nothing refers to it, making it hang the application if not explicitely closed.
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self, dbname, autocommit):
|
||||
threading.Thread.__init__(self)
|
||||
self._queries = Queue()
|
||||
@ -47,7 +47,7 @@ class _ActualThread(threading.Thread):
|
||||
self.lastrowid = -1
|
||||
self.setDaemon(True)
|
||||
self.start()
|
||||
|
||||
|
||||
def _query(self, query):
|
||||
with self._lock:
|
||||
wait_token = object()
|
||||
@ -56,30 +56,30 @@ class _ActualThread(threading.Thread):
|
||||
self._waiting_list.remove(wait_token)
|
||||
result = self._results.get()
|
||||
return result
|
||||
|
||||
|
||||
def close(self):
|
||||
if not self._run:
|
||||
return
|
||||
self._query(STOP)
|
||||
|
||||
|
||||
def commit(self):
|
||||
if not self._run:
|
||||
return None # Connection closed
|
||||
return None # Connection closed
|
||||
self._query(COMMIT)
|
||||
|
||||
|
||||
def execute(self, sql, values=()):
|
||||
if not self._run:
|
||||
return None # Connection closed
|
||||
return None # Connection closed
|
||||
result = self._query((sql, values))
|
||||
if isinstance(result, Exception):
|
||||
raise result
|
||||
return result
|
||||
|
||||
|
||||
def rollback(self):
|
||||
if not self._run:
|
||||
return None # Connection closed
|
||||
return None # Connection closed
|
||||
self._query(ROLLBACK)
|
||||
|
||||
|
||||
def run(self):
|
||||
# The whole chdir thing is because sqlite doesn't handle directory names with non-asci char in the AT ALL.
|
||||
oldpath = os.getcwd()
|
||||
@ -111,31 +111,31 @@ class _ActualThread(threading.Thread):
|
||||
result = e
|
||||
self._results.put(result)
|
||||
con.close()
|
||||
|
||||
|
||||
|
||||
class ThreadedConn:
|
||||
"""``sqlite`` connections can't be used across threads. ``TheadedConn`` opens a sqlite
|
||||
connection in its own thread and sends it queries through a queue, making it suitable in
|
||||
multi-threaded environment.
|
||||
"""
|
||||
|
||||
def __init__(self, dbname, autocommit):
|
||||
self._t = _ActualThread(dbname, autocommit)
|
||||
self.lastrowid = -1
|
||||
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
def close(self):
|
||||
self._t.close()
|
||||
|
||||
|
||||
def commit(self):
|
||||
self._t.commit()
|
||||
|
||||
|
||||
def execute(self, sql, values=()):
|
||||
result = self._t.execute(sql, values)
|
||||
self.lastrowid = self._t.lastrowid
|
||||
return result
|
||||
|
||||
|
||||
def rollback(self):
|
||||
self._t.rollback()
|
||||
|
||||
|
@ -2,103 +2,105 @@
|
||||
# Created On: 2008-01-08
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..conflict import *
|
||||
from ..path import Path
|
||||
from ..testutil import eq_
|
||||
|
||||
|
||||
class TestCase_GetConflictedName:
|
||||
def test_simple(self):
|
||||
name = get_conflicted_name(['bar'], 'bar')
|
||||
eq_('[000] bar', name)
|
||||
name = get_conflicted_name(['bar', '[000] bar'], 'bar')
|
||||
eq_('[001] bar', name)
|
||||
|
||||
name = get_conflicted_name(["bar"], "bar")
|
||||
eq_("[000] bar", name)
|
||||
name = get_conflicted_name(["bar", "[000] bar"], "bar")
|
||||
eq_("[001] bar", name)
|
||||
|
||||
def test_no_conflict(self):
|
||||
name = get_conflicted_name(['bar'], 'foobar')
|
||||
eq_('foobar', name)
|
||||
|
||||
name = get_conflicted_name(["bar"], "foobar")
|
||||
eq_("foobar", name)
|
||||
|
||||
def test_fourth_digit(self):
|
||||
# This test is long because every time we have to add a conflicted name,
|
||||
# a test must be made for every other conflicted name existing...
|
||||
# Anyway, this has very few chances to happen.
|
||||
names = ['bar'] + ['[%03d] bar' % i for i in range(1000)]
|
||||
name = get_conflicted_name(names, 'bar')
|
||||
eq_('[1000] bar', name)
|
||||
|
||||
names = ["bar"] + ["[%03d] bar" % i for i in range(1000)]
|
||||
name = get_conflicted_name(names, "bar")
|
||||
eq_("[1000] bar", name)
|
||||
|
||||
def test_auto_unconflict(self):
|
||||
# Automatically unconflict the name if it's already conflicted.
|
||||
name = get_conflicted_name([], '[000] foobar')
|
||||
eq_('foobar', name)
|
||||
name = get_conflicted_name(['bar'], '[001] bar')
|
||||
eq_('[000] bar', name)
|
||||
|
||||
name = get_conflicted_name([], "[000] foobar")
|
||||
eq_("foobar", name)
|
||||
name = get_conflicted_name(["bar"], "[001] bar")
|
||||
eq_("[000] bar", name)
|
||||
|
||||
|
||||
class TestCase_GetUnconflictedName:
|
||||
def test_main(self):
|
||||
eq_('foobar',get_unconflicted_name('[000] foobar'))
|
||||
eq_('foobar',get_unconflicted_name('[9999] foobar'))
|
||||
eq_('[000]foobar',get_unconflicted_name('[000]foobar'))
|
||||
eq_('[000a] foobar',get_unconflicted_name('[000a] foobar'))
|
||||
eq_('foobar',get_unconflicted_name('foobar'))
|
||||
eq_('foo [000] bar',get_unconflicted_name('foo [000] bar'))
|
||||
|
||||
eq_("foobar", get_unconflicted_name("[000] foobar"))
|
||||
eq_("foobar", get_unconflicted_name("[9999] foobar"))
|
||||
eq_("[000]foobar", get_unconflicted_name("[000]foobar"))
|
||||
eq_("[000a] foobar", get_unconflicted_name("[000a] foobar"))
|
||||
eq_("foobar", get_unconflicted_name("foobar"))
|
||||
eq_("foo [000] bar", get_unconflicted_name("foo [000] bar"))
|
||||
|
||||
|
||||
class TestCase_IsConflicted:
|
||||
def test_main(self):
|
||||
assert is_conflicted('[000] foobar')
|
||||
assert is_conflicted('[9999] foobar')
|
||||
assert not is_conflicted('[000]foobar')
|
||||
assert not is_conflicted('[000a] foobar')
|
||||
assert not is_conflicted('foobar')
|
||||
assert not is_conflicted('foo [000] bar')
|
||||
|
||||
assert is_conflicted("[000] foobar")
|
||||
assert is_conflicted("[9999] foobar")
|
||||
assert not is_conflicted("[000]foobar")
|
||||
assert not is_conflicted("[000a] foobar")
|
||||
assert not is_conflicted("foobar")
|
||||
assert not is_conflicted("foo [000] bar")
|
||||
|
||||
|
||||
class TestCase_move_copy:
|
||||
def pytest_funcarg__do_setup(self, request):
|
||||
tmpdir = request.getfuncargvalue('tmpdir')
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
self.path = Path(str(tmpdir))
|
||||
self.path['foo'].open('w').close()
|
||||
self.path['bar'].open('w').close()
|
||||
self.path['dir'].mkdir()
|
||||
|
||||
self.path["foo"].open("w").close()
|
||||
self.path["bar"].open("w").close()
|
||||
self.path["dir"].mkdir()
|
||||
|
||||
def test_move_no_conflict(self, do_setup):
|
||||
smart_move(self.path + 'foo', self.path + 'baz')
|
||||
assert self.path['baz'].exists()
|
||||
assert not self.path['foo'].exists()
|
||||
|
||||
def test_copy_no_conflict(self, do_setup): # No need to duplicate the rest of the tests... Let's just test on move
|
||||
smart_copy(self.path + 'foo', self.path + 'baz')
|
||||
assert self.path['baz'].exists()
|
||||
assert self.path['foo'].exists()
|
||||
|
||||
smart_move(self.path + "foo", self.path + "baz")
|
||||
assert self.path["baz"].exists()
|
||||
assert not self.path["foo"].exists()
|
||||
|
||||
def test_copy_no_conflict(
|
||||
self, do_setup
|
||||
): # No need to duplicate the rest of the tests... Let's just test on move
|
||||
smart_copy(self.path + "foo", self.path + "baz")
|
||||
assert self.path["baz"].exists()
|
||||
assert self.path["foo"].exists()
|
||||
|
||||
def test_move_no_conflict_dest_is_dir(self, do_setup):
|
||||
smart_move(self.path + 'foo', self.path + 'dir')
|
||||
assert self.path['dir']['foo'].exists()
|
||||
assert not self.path['foo'].exists()
|
||||
|
||||
smart_move(self.path + "foo", self.path + "dir")
|
||||
assert self.path["dir"]["foo"].exists()
|
||||
assert not self.path["foo"].exists()
|
||||
|
||||
def test_move_conflict(self, do_setup):
|
||||
smart_move(self.path + 'foo', self.path + 'bar')
|
||||
assert self.path['[000] bar'].exists()
|
||||
assert not self.path['foo'].exists()
|
||||
|
||||
smart_move(self.path + "foo", self.path + "bar")
|
||||
assert self.path["[000] bar"].exists()
|
||||
assert not self.path["foo"].exists()
|
||||
|
||||
def test_move_conflict_dest_is_dir(self, do_setup):
|
||||
smart_move(self.path['foo'], self.path['dir'])
|
||||
smart_move(self.path['bar'], self.path['foo'])
|
||||
smart_move(self.path['foo'], self.path['dir'])
|
||||
assert self.path['dir']['foo'].exists()
|
||||
assert self.path['dir']['[000] foo'].exists()
|
||||
assert not self.path['foo'].exists()
|
||||
assert not self.path['bar'].exists()
|
||||
|
||||
smart_move(self.path["foo"], self.path["dir"])
|
||||
smart_move(self.path["bar"], self.path["foo"])
|
||||
smart_move(self.path["foo"], self.path["dir"])
|
||||
assert self.path["dir"]["foo"].exists()
|
||||
assert self.path["dir"]["[000] foo"].exists()
|
||||
assert not self.path["foo"].exists()
|
||||
assert not self.path["bar"].exists()
|
||||
|
||||
def test_copy_folder(self, tmpdir):
|
||||
# smart_copy also works on folders
|
||||
path = Path(str(tmpdir))
|
||||
path['foo'].mkdir()
|
||||
path['bar'].mkdir()
|
||||
smart_copy(path['foo'], path['bar']) # no crash
|
||||
assert path['[000] bar'].exists()
|
||||
|
||||
path["foo"].mkdir()
|
||||
path["bar"].mkdir()
|
||||
smart_copy(path["foo"], path["bar"]) # no crash
|
||||
assert path["[000] bar"].exists()
|
||||
|
@ -1,12 +1,13 @@
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..testutil import eq_
|
||||
from ..notify import Broadcaster, Listener, Repeater
|
||||
|
||||
|
||||
class HelloListener(Listener):
|
||||
def __init__(self, broadcaster):
|
||||
Listener.__init__(self, broadcaster)
|
||||
@ -15,6 +16,7 @@ class HelloListener(Listener):
|
||||
def hello(self):
|
||||
self.hello_count += 1
|
||||
|
||||
|
||||
class HelloRepeater(Repeater):
|
||||
def __init__(self, broadcaster):
|
||||
Repeater.__init__(self, broadcaster)
|
||||
@ -23,13 +25,15 @@ class HelloRepeater(Repeater):
|
||||
def hello(self):
|
||||
self.hello_count += 1
|
||||
|
||||
|
||||
def create_pair():
|
||||
b = Broadcaster()
|
||||
l = HelloListener(b)
|
||||
return b, l
|
||||
|
||||
|
||||
def test_disconnect_during_notification():
|
||||
# When a listener disconnects another listener the other listener will not receive a
|
||||
# When a listener disconnects another listener the other listener will not receive a
|
||||
# notification.
|
||||
# This whole complication scheme below is because the order of the notification is not
|
||||
# guaranteed. We could disconnect everything from self.broadcaster.listeners, but this
|
||||
@ -38,103 +42,116 @@ def test_disconnect_during_notification():
|
||||
def __init__(self, broadcaster):
|
||||
Listener.__init__(self, broadcaster)
|
||||
self.hello_count = 0
|
||||
|
||||
|
||||
def hello(self):
|
||||
self.hello_count += 1
|
||||
self.other.disconnect()
|
||||
|
||||
|
||||
broadcaster = Broadcaster()
|
||||
first = Disconnecter(broadcaster)
|
||||
second = Disconnecter(broadcaster)
|
||||
first.other, second.other = second, first
|
||||
first.connect()
|
||||
second.connect()
|
||||
broadcaster.notify('hello')
|
||||
broadcaster.notify("hello")
|
||||
# only one of them was notified
|
||||
eq_(first.hello_count + second.hello_count, 1)
|
||||
|
||||
|
||||
def test_disconnect():
|
||||
# After a disconnect, the listener doesn't hear anything.
|
||||
b, l = create_pair()
|
||||
l.connect()
|
||||
l.disconnect()
|
||||
b.notify('hello')
|
||||
b.notify("hello")
|
||||
eq_(l.hello_count, 0)
|
||||
|
||||
|
||||
def test_disconnect_when_not_connected():
|
||||
# When disconnecting an already disconnected listener, nothing happens.
|
||||
b, l = create_pair()
|
||||
l.disconnect()
|
||||
|
||||
|
||||
def test_not_connected_on_init():
|
||||
# A listener is not initialized connected.
|
||||
b, l = create_pair()
|
||||
b.notify('hello')
|
||||
b.notify("hello")
|
||||
eq_(l.hello_count, 0)
|
||||
|
||||
|
||||
def test_notify():
|
||||
# The listener listens to the broadcaster.
|
||||
b, l = create_pair()
|
||||
l.connect()
|
||||
b.notify('hello')
|
||||
b.notify("hello")
|
||||
eq_(l.hello_count, 1)
|
||||
|
||||
|
||||
def test_reconnect():
|
||||
# It's possible to reconnect a listener after disconnection.
|
||||
b, l = create_pair()
|
||||
l.connect()
|
||||
l.disconnect()
|
||||
l.connect()
|
||||
b.notify('hello')
|
||||
b.notify("hello")
|
||||
eq_(l.hello_count, 1)
|
||||
|
||||
|
||||
def test_repeater():
|
||||
b = Broadcaster()
|
||||
r = HelloRepeater(b)
|
||||
l = HelloListener(r)
|
||||
r.connect()
|
||||
l.connect()
|
||||
b.notify('hello')
|
||||
b.notify("hello")
|
||||
eq_(r.hello_count, 1)
|
||||
eq_(l.hello_count, 1)
|
||||
|
||||
|
||||
def test_repeater_with_repeated_notifications():
|
||||
# If REPEATED_NOTIFICATIONS is not empty, only notifs in this set are repeated (but they're
|
||||
# still dispatched locally).
|
||||
class MyRepeater(HelloRepeater):
|
||||
REPEATED_NOTIFICATIONS = set(['hello'])
|
||||
REPEATED_NOTIFICATIONS = set(["hello"])
|
||||
|
||||
def __init__(self, broadcaster):
|
||||
HelloRepeater.__init__(self, broadcaster)
|
||||
self.foo_count = 0
|
||||
|
||||
def foo(self):
|
||||
self.foo_count += 1
|
||||
|
||||
|
||||
b = Broadcaster()
|
||||
r = MyRepeater(b)
|
||||
l = HelloListener(r)
|
||||
r.connect()
|
||||
l.connect()
|
||||
b.notify('hello')
|
||||
b.notify('foo') # if the repeater repeated this notif, we'd get a crash on HelloListener
|
||||
b.notify("hello")
|
||||
b.notify(
|
||||
"foo"
|
||||
) # if the repeater repeated this notif, we'd get a crash on HelloListener
|
||||
eq_(r.hello_count, 1)
|
||||
eq_(l.hello_count, 1)
|
||||
eq_(r.foo_count, 1)
|
||||
|
||||
|
||||
def test_repeater_doesnt_try_to_dispatch_to_self_if_it_cant():
|
||||
# if a repeater doesn't handle a particular message, it doesn't crash and simply repeats it.
|
||||
b = Broadcaster()
|
||||
r = Repeater(b) # doesnt handle hello
|
||||
r = Repeater(b) # doesnt handle hello
|
||||
l = HelloListener(r)
|
||||
r.connect()
|
||||
l.connect()
|
||||
b.notify('hello') # no crash
|
||||
b.notify("hello") # no crash
|
||||
eq_(l.hello_count, 1)
|
||||
|
||||
|
||||
def test_bind_messages():
|
||||
b, l = create_pair()
|
||||
l.bind_messages({'foo', 'bar'}, l.hello)
|
||||
l.bind_messages({"foo", "bar"}, l.hello)
|
||||
l.connect()
|
||||
b.notify('foo')
|
||||
b.notify('bar')
|
||||
b.notify('hello') # Normal dispatching still work
|
||||
b.notify("foo")
|
||||
b.notify("bar")
|
||||
b.notify("hello") # Normal dispatching still work
|
||||
eq_(l.hello_count, 3)
|
||||
|
@ -2,8 +2,8 @@
|
||||
# Created On: 2006/02/21
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import sys
|
||||
@ -14,33 +14,39 @@ from pytest import raises, mark
|
||||
from ..path import Path, pathify
|
||||
from ..testutil import eq_
|
||||
|
||||
|
||||
def pytest_funcarg__force_ossep(request):
|
||||
monkeypatch = request.getfuncargvalue('monkeypatch')
|
||||
monkeypatch.setattr(os, 'sep', '/')
|
||||
monkeypatch = request.getfuncargvalue("monkeypatch")
|
||||
monkeypatch.setattr(os, "sep", "/")
|
||||
|
||||
|
||||
def test_empty(force_ossep):
|
||||
path = Path('')
|
||||
eq_('',str(path))
|
||||
eq_(0,len(path))
|
||||
path = Path("")
|
||||
eq_("", str(path))
|
||||
eq_(0, len(path))
|
||||
path = Path(())
|
||||
eq_('',str(path))
|
||||
eq_(0,len(path))
|
||||
eq_("", str(path))
|
||||
eq_(0, len(path))
|
||||
|
||||
|
||||
def test_single(force_ossep):
|
||||
path = Path('foobar')
|
||||
eq_('foobar',path)
|
||||
eq_(1,len(path))
|
||||
path = Path("foobar")
|
||||
eq_("foobar", path)
|
||||
eq_(1, len(path))
|
||||
|
||||
|
||||
def test_multiple(force_ossep):
|
||||
path = Path('foo/bar')
|
||||
eq_('foo/bar',path)
|
||||
eq_(2,len(path))
|
||||
path = Path("foo/bar")
|
||||
eq_("foo/bar", path)
|
||||
eq_(2, len(path))
|
||||
|
||||
|
||||
def test_init_with_tuple_and_list(force_ossep):
|
||||
path = Path(('foo','bar'))
|
||||
eq_('foo/bar',path)
|
||||
path = Path(['foo','bar'])
|
||||
eq_('foo/bar',path)
|
||||
path = Path(("foo", "bar"))
|
||||
eq_("foo/bar", path)
|
||||
path = Path(["foo", "bar"])
|
||||
eq_("foo/bar", path)
|
||||
|
||||
|
||||
def test_init_with_invalid_value(force_ossep):
|
||||
try:
|
||||
@ -49,208 +55,236 @@ def test_init_with_invalid_value(force_ossep):
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
|
||||
def test_access(force_ossep):
|
||||
path = Path('foo/bar/bleh')
|
||||
eq_('foo',path[0])
|
||||
eq_('foo',path[-3])
|
||||
eq_('bar',path[1])
|
||||
eq_('bar',path[-2])
|
||||
eq_('bleh',path[2])
|
||||
eq_('bleh',path[-1])
|
||||
path = Path("foo/bar/bleh")
|
||||
eq_("foo", path[0])
|
||||
eq_("foo", path[-3])
|
||||
eq_("bar", path[1])
|
||||
eq_("bar", path[-2])
|
||||
eq_("bleh", path[2])
|
||||
eq_("bleh", path[-1])
|
||||
|
||||
|
||||
def test_slicing(force_ossep):
|
||||
path = Path('foo/bar/bleh')
|
||||
path = Path("foo/bar/bleh")
|
||||
subpath = path[:2]
|
||||
eq_('foo/bar',subpath)
|
||||
assert isinstance(subpath,Path)
|
||||
|
||||
def test_parent(force_ossep):
|
||||
path = Path('foo/bar/bleh')
|
||||
subpath = path.parent()
|
||||
eq_('foo/bar', subpath)
|
||||
eq_("foo/bar", subpath)
|
||||
assert isinstance(subpath, Path)
|
||||
|
||||
|
||||
def test_parent(force_ossep):
|
||||
path = Path("foo/bar/bleh")
|
||||
subpath = path.parent()
|
||||
eq_("foo/bar", subpath)
|
||||
assert isinstance(subpath, Path)
|
||||
|
||||
|
||||
def test_filename(force_ossep):
|
||||
path = Path('foo/bar/bleh.ext')
|
||||
eq_(path.name, 'bleh.ext')
|
||||
path = Path("foo/bar/bleh.ext")
|
||||
eq_(path.name, "bleh.ext")
|
||||
|
||||
|
||||
def test_deal_with_empty_components(force_ossep):
|
||||
"""Keep ONLY a leading space, which means we want a leading slash.
|
||||
"""
|
||||
eq_('foo//bar',str(Path(('foo','','bar'))))
|
||||
eq_('/foo/bar',str(Path(('','foo','bar'))))
|
||||
eq_('foo/bar',str(Path('foo/bar/')))
|
||||
eq_("foo//bar", str(Path(("foo", "", "bar"))))
|
||||
eq_("/foo/bar", str(Path(("", "foo", "bar"))))
|
||||
eq_("foo/bar", str(Path("foo/bar/")))
|
||||
|
||||
|
||||
def test_old_compare_paths(force_ossep):
|
||||
eq_(Path('foobar'),Path('foobar'))
|
||||
eq_(Path('foobar/'),Path('foobar\\','\\'))
|
||||
eq_(Path('/foobar/'),Path('\\foobar\\','\\'))
|
||||
eq_(Path('/foo/bar'),Path('\\foo\\bar','\\'))
|
||||
eq_(Path('/foo/bar'),Path('\\foo\\bar\\','\\'))
|
||||
assert Path('/foo/bar') != Path('\\foo\\foo','\\')
|
||||
#We also have to test __ne__
|
||||
assert not (Path('foobar') != Path('foobar'))
|
||||
assert Path('/a/b/c.x') != Path('/a/b/c.y')
|
||||
eq_(Path("foobar"), Path("foobar"))
|
||||
eq_(Path("foobar/"), Path("foobar\\", "\\"))
|
||||
eq_(Path("/foobar/"), Path("\\foobar\\", "\\"))
|
||||
eq_(Path("/foo/bar"), Path("\\foo\\bar", "\\"))
|
||||
eq_(Path("/foo/bar"), Path("\\foo\\bar\\", "\\"))
|
||||
assert Path("/foo/bar") != Path("\\foo\\foo", "\\")
|
||||
# We also have to test __ne__
|
||||
assert not (Path("foobar") != Path("foobar"))
|
||||
assert Path("/a/b/c.x") != Path("/a/b/c.y")
|
||||
|
||||
|
||||
def test_old_split_path(force_ossep):
|
||||
eq_(Path('foobar'),('foobar',))
|
||||
eq_(Path('foo/bar'),('foo','bar'))
|
||||
eq_(Path('/foo/bar/'),('','foo','bar'))
|
||||
eq_(Path('\\foo\\bar','\\'),('','foo','bar'))
|
||||
eq_(Path("foobar"), ("foobar",))
|
||||
eq_(Path("foo/bar"), ("foo", "bar"))
|
||||
eq_(Path("/foo/bar/"), ("", "foo", "bar"))
|
||||
eq_(Path("\\foo\\bar", "\\"), ("", "foo", "bar"))
|
||||
|
||||
|
||||
def test_representation(force_ossep):
|
||||
eq_("('foo', 'bar')",repr(Path(('foo','bar'))))
|
||||
eq_("('foo', 'bar')", repr(Path(("foo", "bar"))))
|
||||
|
||||
|
||||
def test_add(force_ossep):
|
||||
eq_('foo/bar/bar/foo',Path(('foo','bar')) + Path('bar/foo'))
|
||||
eq_('foo/bar/bar/foo',Path('foo/bar') + 'bar/foo')
|
||||
eq_('foo/bar/bar/foo',Path('foo/bar') + ('bar','foo'))
|
||||
eq_('foo/bar/bar/foo',('foo','bar') + Path('bar/foo'))
|
||||
eq_('foo/bar/bar/foo','foo/bar' + Path('bar/foo'))
|
||||
#Invalid concatenation
|
||||
eq_("foo/bar/bar/foo", Path(("foo", "bar")) + Path("bar/foo"))
|
||||
eq_("foo/bar/bar/foo", Path("foo/bar") + "bar/foo")
|
||||
eq_("foo/bar/bar/foo", Path("foo/bar") + ("bar", "foo"))
|
||||
eq_("foo/bar/bar/foo", ("foo", "bar") + Path("bar/foo"))
|
||||
eq_("foo/bar/bar/foo", "foo/bar" + Path("bar/foo"))
|
||||
# Invalid concatenation
|
||||
try:
|
||||
Path(('foo','bar')) + 1
|
||||
Path(("foo", "bar")) + 1
|
||||
assert False
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
|
||||
def test_path_slice(force_ossep):
|
||||
foo = Path('foo')
|
||||
bar = Path('bar')
|
||||
foobar = Path('foo/bar')
|
||||
eq_('bar',foobar[foo:])
|
||||
eq_('foo',foobar[:bar])
|
||||
eq_('foo/bar',foobar[bar:])
|
||||
eq_('foo/bar',foobar[:foo])
|
||||
eq_((),foobar[foobar:])
|
||||
eq_((),foobar[:foobar])
|
||||
abcd = Path('a/b/c/d')
|
||||
a = Path('a')
|
||||
b = Path('b')
|
||||
c = Path('c')
|
||||
d = Path('d')
|
||||
z = Path('z')
|
||||
eq_('b/c',abcd[a:d])
|
||||
eq_('b/c/d',abcd[a:d+z])
|
||||
eq_('b/c',abcd[a:z+d])
|
||||
eq_('a/b/c/d',abcd[:z])
|
||||
foo = Path("foo")
|
||||
bar = Path("bar")
|
||||
foobar = Path("foo/bar")
|
||||
eq_("bar", foobar[foo:])
|
||||
eq_("foo", foobar[:bar])
|
||||
eq_("foo/bar", foobar[bar:])
|
||||
eq_("foo/bar", foobar[:foo])
|
||||
eq_((), foobar[foobar:])
|
||||
eq_((), foobar[:foobar])
|
||||
abcd = Path("a/b/c/d")
|
||||
a = Path("a")
|
||||
b = Path("b")
|
||||
c = Path("c")
|
||||
d = Path("d")
|
||||
z = Path("z")
|
||||
eq_("b/c", abcd[a:d])
|
||||
eq_("b/c/d", abcd[a : d + z])
|
||||
eq_("b/c", abcd[a : z + d])
|
||||
eq_("a/b/c/d", abcd[:z])
|
||||
|
||||
|
||||
def test_add_with_root_path(force_ossep):
|
||||
"""if I perform /a/b/c + /d/e/f, I want /a/b/c/d/e/f, not /a/b/c//d/e/f
|
||||
"""
|
||||
eq_('/foo/bar',str(Path('/foo') + Path('/bar')))
|
||||
eq_("/foo/bar", str(Path("/foo") + Path("/bar")))
|
||||
|
||||
|
||||
def test_create_with_tuple_that_have_slash_inside(force_ossep, monkeypatch):
|
||||
eq_(('','foo','bar'), Path(('/foo','bar')))
|
||||
monkeypatch.setattr(os, 'sep', '\\')
|
||||
eq_(('','foo','bar'), Path(('\\foo','bar')))
|
||||
eq_(("", "foo", "bar"), Path(("/foo", "bar")))
|
||||
monkeypatch.setattr(os, "sep", "\\")
|
||||
eq_(("", "foo", "bar"), Path(("\\foo", "bar")))
|
||||
|
||||
|
||||
def test_auto_decode_os_sep(force_ossep, monkeypatch):
|
||||
"""Path should decode any either / or os.sep, but always encode in os.sep.
|
||||
"""
|
||||
eq_(('foo\\bar','bleh'),Path('foo\\bar/bleh'))
|
||||
monkeypatch.setattr(os, 'sep', '\\')
|
||||
eq_(('foo','bar/bleh'),Path('foo\\bar/bleh'))
|
||||
path = Path('foo/bar')
|
||||
eq_(('foo','bar'),path)
|
||||
eq_('foo\\bar',str(path))
|
||||
eq_(("foo\\bar", "bleh"), Path("foo\\bar/bleh"))
|
||||
monkeypatch.setattr(os, "sep", "\\")
|
||||
eq_(("foo", "bar/bleh"), Path("foo\\bar/bleh"))
|
||||
path = Path("foo/bar")
|
||||
eq_(("foo", "bar"), path)
|
||||
eq_("foo\\bar", str(path))
|
||||
|
||||
|
||||
def test_contains(force_ossep):
|
||||
p = Path(('foo','bar'))
|
||||
assert Path(('foo','bar','bleh')) in p
|
||||
assert Path(('foo','bar')) in p
|
||||
assert 'foo' in p
|
||||
assert 'bleh' not in p
|
||||
assert Path('foo') not in p
|
||||
p = Path(("foo", "bar"))
|
||||
assert Path(("foo", "bar", "bleh")) in p
|
||||
assert Path(("foo", "bar")) in p
|
||||
assert "foo" in p
|
||||
assert "bleh" not in p
|
||||
assert Path("foo") not in p
|
||||
|
||||
|
||||
def test_is_parent_of(force_ossep):
|
||||
assert Path(('foo','bar')).is_parent_of(Path(('foo','bar','bleh')))
|
||||
assert not Path(('foo','bar')).is_parent_of(Path(('foo','baz')))
|
||||
assert not Path(('foo','bar')).is_parent_of(Path(('foo','bar')))
|
||||
assert Path(("foo", "bar")).is_parent_of(Path(("foo", "bar", "bleh")))
|
||||
assert not Path(("foo", "bar")).is_parent_of(Path(("foo", "baz")))
|
||||
assert not Path(("foo", "bar")).is_parent_of(Path(("foo", "bar")))
|
||||
|
||||
|
||||
def test_windows_drive_letter(force_ossep):
|
||||
p = Path(('c:',))
|
||||
eq_('c:\\',str(p))
|
||||
p = Path(("c:",))
|
||||
eq_("c:\\", str(p))
|
||||
|
||||
|
||||
def test_root_path(force_ossep):
|
||||
p = Path('/')
|
||||
eq_('/',str(p))
|
||||
p = Path("/")
|
||||
eq_("/", str(p))
|
||||
|
||||
|
||||
def test_str_encodes_unicode_to_getfilesystemencoding(force_ossep):
|
||||
p = Path(('foo','bar\u00e9'))
|
||||
eq_('foo/bar\u00e9'.encode(sys.getfilesystemencoding()), p.tobytes())
|
||||
p = Path(("foo", "bar\u00e9"))
|
||||
eq_("foo/bar\u00e9".encode(sys.getfilesystemencoding()), p.tobytes())
|
||||
|
||||
|
||||
def test_unicode(force_ossep):
|
||||
p = Path(('foo','bar\u00e9'))
|
||||
eq_('foo/bar\u00e9',str(p))
|
||||
p = Path(("foo", "bar\u00e9"))
|
||||
eq_("foo/bar\u00e9", str(p))
|
||||
|
||||
|
||||
def test_str_repr_of_mix_between_non_ascii_str_and_unicode(force_ossep):
|
||||
u = 'foo\u00e9'
|
||||
u = "foo\u00e9"
|
||||
encoded = u.encode(sys.getfilesystemencoding())
|
||||
p = Path((encoded,'bar'))
|
||||
p = Path((encoded, "bar"))
|
||||
print(repr(tuple(p)))
|
||||
eq_('foo\u00e9/bar'.encode(sys.getfilesystemencoding()), p.tobytes())
|
||||
eq_("foo\u00e9/bar".encode(sys.getfilesystemencoding()), p.tobytes())
|
||||
|
||||
|
||||
def test_Path_of_a_Path_returns_self(force_ossep):
|
||||
#if Path() is called with a path as value, just return value.
|
||||
p = Path('foo/bar')
|
||||
# if Path() is called with a path as value, just return value.
|
||||
p = Path("foo/bar")
|
||||
assert Path(p) is p
|
||||
|
||||
|
||||
def test_getitem_str(force_ossep):
|
||||
# path['something'] returns the child path corresponding to the name
|
||||
p = Path('/foo/bar')
|
||||
eq_(p['baz'], Path('/foo/bar/baz'))
|
||||
p = Path("/foo/bar")
|
||||
eq_(p["baz"], Path("/foo/bar/baz"))
|
||||
|
||||
|
||||
def test_getitem_path(force_ossep):
|
||||
# path[Path('something')] returns the child path corresponding to the name (or subpath)
|
||||
p = Path('/foo/bar')
|
||||
eq_(p[Path('baz/bleh')], Path('/foo/bar/baz/bleh'))
|
||||
p = Path("/foo/bar")
|
||||
eq_(p[Path("baz/bleh")], Path("/foo/bar/baz/bleh"))
|
||||
|
||||
|
||||
@mark.xfail(reason="pytest's capture mechanism is flaky, I have to investigate")
|
||||
def test_log_unicode_errors(force_ossep, monkeypatch, capsys):
|
||||
# When an there's a UnicodeDecodeError on path creation, log it so it can be possible
|
||||
# to debug the cause of it.
|
||||
monkeypatch.setattr(sys, 'getfilesystemencoding', lambda: 'ascii')
|
||||
monkeypatch.setattr(sys, "getfilesystemencoding", lambda: "ascii")
|
||||
with raises(UnicodeDecodeError):
|
||||
Path(['', b'foo\xe9'])
|
||||
Path(["", b"foo\xe9"])
|
||||
out, err = capsys.readouterr()
|
||||
assert repr(b'foo\xe9') in err
|
||||
assert repr(b"foo\xe9") in err
|
||||
|
||||
|
||||
def test_has_drive_letter(monkeypatch):
|
||||
monkeypatch.setattr(os, 'sep', '\\')
|
||||
p = Path('foo\\bar')
|
||||
monkeypatch.setattr(os, "sep", "\\")
|
||||
p = Path("foo\\bar")
|
||||
assert not p.has_drive_letter()
|
||||
p = Path('C:\\')
|
||||
p = Path("C:\\")
|
||||
assert p.has_drive_letter()
|
||||
p = Path('z:\\foo')
|
||||
p = Path("z:\\foo")
|
||||
assert p.has_drive_letter()
|
||||
|
||||
|
||||
def test_remove_drive_letter(monkeypatch):
|
||||
monkeypatch.setattr(os, 'sep', '\\')
|
||||
p = Path('foo\\bar')
|
||||
eq_(p.remove_drive_letter(), Path('foo\\bar'))
|
||||
p = Path('C:\\')
|
||||
eq_(p.remove_drive_letter(), Path(''))
|
||||
p = Path('z:\\foo')
|
||||
eq_(p.remove_drive_letter(), Path('foo'))
|
||||
monkeypatch.setattr(os, "sep", "\\")
|
||||
p = Path("foo\\bar")
|
||||
eq_(p.remove_drive_letter(), Path("foo\\bar"))
|
||||
p = Path("C:\\")
|
||||
eq_(p.remove_drive_letter(), Path(""))
|
||||
p = Path("z:\\foo")
|
||||
eq_(p.remove_drive_letter(), Path("foo"))
|
||||
|
||||
|
||||
def test_pathify():
|
||||
@pathify
|
||||
def foo(a: Path, b, c:Path):
|
||||
def foo(a: Path, b, c: Path):
|
||||
return a, b, c
|
||||
|
||||
a, b, c = foo('foo', 0, c=Path('bar'))
|
||||
|
||||
a, b, c = foo("foo", 0, c=Path("bar"))
|
||||
assert isinstance(a, Path)
|
||||
assert a == Path('foo')
|
||||
assert a == Path("foo")
|
||||
assert b == 0
|
||||
assert isinstance(c, Path)
|
||||
assert c == Path('bar')
|
||||
assert c == Path("bar")
|
||||
|
||||
|
||||
def test_pathify_preserve_none():
|
||||
# @pathify preserves None value and doesn't try to return a Path
|
||||
@pathify
|
||||
def foo(a: Path):
|
||||
return a
|
||||
|
||||
|
||||
a = foo(None)
|
||||
assert a is None
|
||||
|
@ -1,14 +1,15 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2011-09-06
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..testutil import eq_, callcounter, CallLogger
|
||||
from ..gui.selectable_list import SelectableList, GUISelectableList
|
||||
|
||||
|
||||
def test_in():
|
||||
# When a SelectableList is in a list, doing "in list" with another instance returns false, even
|
||||
# if they're the same as lists.
|
||||
@ -16,50 +17,56 @@ def test_in():
|
||||
some_list = [sl]
|
||||
assert SelectableList() not in some_list
|
||||
|
||||
|
||||
def test_selection_range():
|
||||
# selection is correctly adjusted on deletion
|
||||
sl = SelectableList(['foo', 'bar', 'baz'])
|
||||
sl = SelectableList(["foo", "bar", "baz"])
|
||||
sl.selected_index = 3
|
||||
eq_(sl.selected_index, 2)
|
||||
del sl[2]
|
||||
eq_(sl.selected_index, 1)
|
||||
|
||||
|
||||
def test_update_selection_called():
|
||||
# _update_selection_is called after a change in selection. However, we only do so on select()
|
||||
# calls. I follow the old behavior of the Table class. At the moment, I don't quite remember
|
||||
# why there was a specific select() method for triggering _update_selection(), but I think I
|
||||
# remember there was a reason, so I keep it that way.
|
||||
sl = SelectableList(['foo', 'bar'])
|
||||
sl = SelectableList(["foo", "bar"])
|
||||
sl._update_selection = callcounter()
|
||||
sl.select(1)
|
||||
eq_(sl._update_selection.callcount, 1)
|
||||
sl.selected_index = 0
|
||||
eq_(sl._update_selection.callcount, 1) # no call
|
||||
eq_(sl._update_selection.callcount, 1) # no call
|
||||
|
||||
|
||||
def test_guicalls():
|
||||
# A GUISelectableList appropriately calls its view.
|
||||
sl = GUISelectableList(['foo', 'bar'])
|
||||
sl = GUISelectableList(["foo", "bar"])
|
||||
sl.view = CallLogger()
|
||||
sl.view.check_gui_calls(['refresh']) # Upon setting the view, we get a call to refresh()
|
||||
sl[1] = 'baz'
|
||||
sl.view.check_gui_calls(['refresh'])
|
||||
sl.append('foo')
|
||||
sl.view.check_gui_calls(['refresh'])
|
||||
sl.view.check_gui_calls(
|
||||
["refresh"]
|
||||
) # Upon setting the view, we get a call to refresh()
|
||||
sl[1] = "baz"
|
||||
sl.view.check_gui_calls(["refresh"])
|
||||
sl.append("foo")
|
||||
sl.view.check_gui_calls(["refresh"])
|
||||
del sl[2]
|
||||
sl.view.check_gui_calls(['refresh'])
|
||||
sl.remove('baz')
|
||||
sl.view.check_gui_calls(['refresh'])
|
||||
sl.insert(0, 'foo')
|
||||
sl.view.check_gui_calls(['refresh'])
|
||||
sl.view.check_gui_calls(["refresh"])
|
||||
sl.remove("baz")
|
||||
sl.view.check_gui_calls(["refresh"])
|
||||
sl.insert(0, "foo")
|
||||
sl.view.check_gui_calls(["refresh"])
|
||||
sl.select(1)
|
||||
sl.view.check_gui_calls(['update_selection'])
|
||||
sl.view.check_gui_calls(["update_selection"])
|
||||
# XXX We have to give up on this for now because of a breakage it causes in the tables.
|
||||
# sl.select(1) # don't update when selection stays the same
|
||||
# gui.check_gui_calls([])
|
||||
|
||||
|
||||
def test_search_by_prefix():
|
||||
sl = SelectableList(['foo', 'bAr', 'baZ'])
|
||||
eq_(sl.search_by_prefix('b'), 1)
|
||||
eq_(sl.search_by_prefix('BA'), 1)
|
||||
eq_(sl.search_by_prefix('BAZ'), 2)
|
||||
eq_(sl.search_by_prefix('BAZZ'), -1)
|
||||
sl = SelectableList(["foo", "bAr", "baZ"])
|
||||
eq_(sl.search_by_prefix("b"), 1)
|
||||
eq_(sl.search_by_prefix("BA"), 1)
|
||||
eq_(sl.search_by_prefix("BAZ"), 2)
|
||||
eq_(sl.search_by_prefix("BAZZ"), -1)
|
||||
|
@ -2,8 +2,8 @@
|
||||
# Created On: 2007/05/19
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import time
|
||||
@ -19,69 +19,75 @@ from ..sqlite import ThreadedConn
|
||||
# Threading is hard to test. In a lot of those tests, a failure means that the test run will
|
||||
# hang forever. Well... I don't know a better alternative.
|
||||
|
||||
|
||||
def test_can_access_from_multiple_threads():
|
||||
def run():
|
||||
con.execute('insert into foo(bar) values(\'baz\')')
|
||||
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
t = threading.Thread(target=run)
|
||||
t.start()
|
||||
t.join()
|
||||
result = con.execute('select * from foo')
|
||||
result = con.execute("select * from foo")
|
||||
eq_(1, len(result))
|
||||
eq_('baz', result[0][0])
|
||||
eq_("baz", result[0][0])
|
||||
|
||||
|
||||
def test_exception_during_query():
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
with raises(sqlite.OperationalError):
|
||||
con.execute('select * from bleh')
|
||||
con.execute("select * from bleh")
|
||||
|
||||
|
||||
def test_not_autocommit(tmpdir):
|
||||
dbpath = str(tmpdir.join('foo.db'))
|
||||
dbpath = str(tmpdir.join("foo.db"))
|
||||
con = ThreadedConn(dbpath, False)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con.execute('insert into foo(bar) values(\'baz\')')
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
del con
|
||||
#The data shouldn't have been inserted
|
||||
# The data shouldn't have been inserted
|
||||
con = ThreadedConn(dbpath, False)
|
||||
result = con.execute('select * from foo')
|
||||
result = con.execute("select * from foo")
|
||||
eq_(0, len(result))
|
||||
con.execute('insert into foo(bar) values(\'baz\')')
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
con.commit()
|
||||
del con
|
||||
# Now the data should be there
|
||||
con = ThreadedConn(dbpath, False)
|
||||
result = con.execute('select * from foo')
|
||||
result = con.execute("select * from foo")
|
||||
eq_(1, len(result))
|
||||
|
||||
|
||||
def test_rollback():
|
||||
con = ThreadedConn(':memory:', False)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con.execute('insert into foo(bar) values(\'baz\')')
|
||||
con = ThreadedConn(":memory:", False)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
con.rollback()
|
||||
result = con.execute('select * from foo')
|
||||
result = con.execute("select * from foo")
|
||||
eq_(0, len(result))
|
||||
|
||||
|
||||
def test_query_palceholders():
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con.execute('insert into foo(bar) values(?)', ['baz'])
|
||||
result = con.execute('select * from foo')
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values(?)", ["baz"])
|
||||
result = con.execute("select * from foo")
|
||||
eq_(1, len(result))
|
||||
eq_('baz', result[0][0])
|
||||
eq_("baz", result[0][0])
|
||||
|
||||
|
||||
def test_make_sure_theres_no_messup_between_queries():
|
||||
def run(expected_rowid):
|
||||
time.sleep(0.1)
|
||||
result = con.execute('select rowid from foo where rowid = ?', [expected_rowid])
|
||||
result = con.execute("select rowid from foo where rowid = ?", [expected_rowid])
|
||||
assert expected_rowid == result[0][0]
|
||||
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
for i in range(100):
|
||||
con.execute('insert into foo(bar) values(\'baz\')')
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
threads = []
|
||||
for i in range(1, 101):
|
||||
t = threading.Thread(target=run, args=(i,))
|
||||
@ -91,36 +97,41 @@ def test_make_sure_theres_no_messup_between_queries():
|
||||
time.sleep(0.1)
|
||||
threads = [t for t in threads if t.isAlive()]
|
||||
|
||||
|
||||
def test_query_after_close():
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.close()
|
||||
con.execute('select 1')
|
||||
con.execute("select 1")
|
||||
|
||||
|
||||
def test_lastrowid():
|
||||
# It's not possible to return a cursor because of the threading, but lastrowid should be
|
||||
# fetchable from the connection itself
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con.execute('insert into foo(bar) values(\'baz\')')
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
eq_(1, con.lastrowid)
|
||||
|
||||
|
||||
def test_add_fetchone_fetchall_interface_to_results():
|
||||
con = ThreadedConn(':memory:', True)
|
||||
con.execute('create table foo(bar TEXT)')
|
||||
con.execute('insert into foo(bar) values(\'baz1\')')
|
||||
con.execute('insert into foo(bar) values(\'baz2\')')
|
||||
result = con.execute('select * from foo')
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz1')")
|
||||
con.execute("insert into foo(bar) values('baz2')")
|
||||
result = con.execute("select * from foo")
|
||||
ref = result[:]
|
||||
eq_(ref, result.fetchall())
|
||||
eq_(ref[0], result.fetchone())
|
||||
eq_(ref[1], result.fetchone())
|
||||
assert result.fetchone() is None
|
||||
|
||||
|
||||
def test_non_ascii_dbname(tmpdir):
|
||||
ThreadedConn(str(tmpdir.join('foo\u00e9.db')), True)
|
||||
ThreadedConn(str(tmpdir.join("foo\u00e9.db")), True)
|
||||
|
||||
|
||||
def test_non_ascii_dbdir(tmpdir):
|
||||
# when this test fails, it doesn't fail gracefully, it brings the whole test suite with it.
|
||||
dbdir = tmpdir.join('foo\u00e9')
|
||||
dbdir = tmpdir.join("foo\u00e9")
|
||||
os.mkdir(str(dbdir))
|
||||
ThreadedConn(str(dbdir.join('foo.db')), True)
|
||||
ThreadedConn(str(dbdir.join("foo.db")), True)
|
||||
|
@ -9,6 +9,7 @@
|
||||
from ..testutil import CallLogger, eq_
|
||||
from ..gui.table import Table, GUITable, Row
|
||||
|
||||
|
||||
class TestRow(Row):
|
||||
def __init__(self, table, index, is_new=False):
|
||||
Row.__init__(self, table)
|
||||
@ -55,6 +56,7 @@ def table_with_footer():
|
||||
table.footer = footer
|
||||
return table, footer
|
||||
|
||||
|
||||
def table_with_header():
|
||||
table = Table()
|
||||
table.append(TestRow(table, 1))
|
||||
@ -62,24 +64,28 @@ def table_with_header():
|
||||
table.header = header
|
||||
return table, header
|
||||
|
||||
#--- Tests
|
||||
|
||||
# --- Tests
|
||||
def test_allow_edit_when_attr_is_property_with_fset():
|
||||
# When a row has a property that has a fset, by default, make that cell editable.
|
||||
class TestRow(Row):
|
||||
@property
|
||||
def foo(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def bar(self):
|
||||
pass
|
||||
|
||||
@bar.setter
|
||||
def bar(self, value):
|
||||
pass
|
||||
|
||||
row = TestRow(Table())
|
||||
assert row.can_edit_cell('bar')
|
||||
assert not row.can_edit_cell('foo')
|
||||
assert not row.can_edit_cell('baz') # doesn't exist, can't edit
|
||||
assert row.can_edit_cell("bar")
|
||||
assert not row.can_edit_cell("foo")
|
||||
assert not row.can_edit_cell("baz") # doesn't exist, can't edit
|
||||
|
||||
|
||||
def test_can_edit_prop_has_priority_over_fset_checks():
|
||||
# When a row has a cen_edit_* property, it's the result of that property that is used, not the
|
||||
@ -88,13 +94,16 @@ def test_can_edit_prop_has_priority_over_fset_checks():
|
||||
@property
|
||||
def bar(self):
|
||||
pass
|
||||
|
||||
@bar.setter
|
||||
def bar(self, value):
|
||||
pass
|
||||
|
||||
can_edit_bar = False
|
||||
|
||||
row = TestRow(Table())
|
||||
assert not row.can_edit_cell('bar')
|
||||
assert not row.can_edit_cell("bar")
|
||||
|
||||
|
||||
def test_in():
|
||||
# When a table is in a list, doing "in list" with another instance returns false, even if
|
||||
@ -103,12 +112,14 @@ def test_in():
|
||||
some_list = [table]
|
||||
assert Table() not in some_list
|
||||
|
||||
|
||||
def test_footer_del_all():
|
||||
# Removing all rows doesn't crash when doing the footer check.
|
||||
table, footer = table_with_footer()
|
||||
del table[:]
|
||||
assert table.footer is None
|
||||
|
||||
|
||||
def test_footer_del_row():
|
||||
# Removing the footer row sets it to None
|
||||
table, footer = table_with_footer()
|
||||
@ -116,18 +127,21 @@ def test_footer_del_row():
|
||||
assert table.footer is None
|
||||
eq_(len(table), 1)
|
||||
|
||||
|
||||
def test_footer_is_appened_to_table():
|
||||
# A footer is appended at the table's bottom
|
||||
table, footer = table_with_footer()
|
||||
eq_(len(table), 2)
|
||||
assert table[1] is footer
|
||||
|
||||
|
||||
def test_footer_remove():
|
||||
# remove() on footer sets it to None
|
||||
table, footer = table_with_footer()
|
||||
table.remove(footer)
|
||||
assert table.footer is None
|
||||
|
||||
|
||||
def test_footer_replaces_old_footer():
|
||||
table, footer = table_with_footer()
|
||||
other = Row(table)
|
||||
@ -136,18 +150,21 @@ def test_footer_replaces_old_footer():
|
||||
eq_(len(table), 2)
|
||||
assert table[1] is other
|
||||
|
||||
|
||||
def test_footer_rows_and_row_count():
|
||||
# rows() and row_count() ignore footer.
|
||||
table, footer = table_with_footer()
|
||||
eq_(table.row_count, 1)
|
||||
eq_(table.rows, table[:-1])
|
||||
|
||||
|
||||
def test_footer_setting_to_none_removes_old_one():
|
||||
table, footer = table_with_footer()
|
||||
table.footer = None
|
||||
assert table.footer is None
|
||||
eq_(len(table), 1)
|
||||
|
||||
|
||||
def test_footer_stays_there_on_append():
|
||||
# Appending another row puts it above the footer
|
||||
table, footer = table_with_footer()
|
||||
@ -155,6 +172,7 @@ def test_footer_stays_there_on_append():
|
||||
eq_(len(table), 3)
|
||||
assert table[2] is footer
|
||||
|
||||
|
||||
def test_footer_stays_there_on_insert():
|
||||
# Inserting another row puts it above the footer
|
||||
table, footer = table_with_footer()
|
||||
@ -162,12 +180,14 @@ def test_footer_stays_there_on_insert():
|
||||
eq_(len(table), 3)
|
||||
assert table[2] is footer
|
||||
|
||||
|
||||
def test_header_del_all():
|
||||
# Removing all rows doesn't crash when doing the header check.
|
||||
table, header = table_with_header()
|
||||
del table[:]
|
||||
assert table.header is None
|
||||
|
||||
|
||||
def test_header_del_row():
|
||||
# Removing the header row sets it to None
|
||||
table, header = table_with_header()
|
||||
@ -175,18 +195,21 @@ def test_header_del_row():
|
||||
assert table.header is None
|
||||
eq_(len(table), 1)
|
||||
|
||||
|
||||
def test_header_is_inserted_in_table():
|
||||
# A header is inserted at the table's top
|
||||
table, header = table_with_header()
|
||||
eq_(len(table), 2)
|
||||
assert table[0] is header
|
||||
|
||||
|
||||
def test_header_remove():
|
||||
# remove() on header sets it to None
|
||||
table, header = table_with_header()
|
||||
table.remove(header)
|
||||
assert table.header is None
|
||||
|
||||
|
||||
def test_header_replaces_old_header():
|
||||
table, header = table_with_header()
|
||||
other = Row(table)
|
||||
@ -195,18 +218,21 @@ def test_header_replaces_old_header():
|
||||
eq_(len(table), 2)
|
||||
assert table[0] is other
|
||||
|
||||
|
||||
def test_header_rows_and_row_count():
|
||||
# rows() and row_count() ignore header.
|
||||
table, header = table_with_header()
|
||||
eq_(table.row_count, 1)
|
||||
eq_(table.rows, table[1:])
|
||||
|
||||
|
||||
def test_header_setting_to_none_removes_old_one():
|
||||
table, header = table_with_header()
|
||||
table.header = None
|
||||
assert table.header is None
|
||||
eq_(len(table), 1)
|
||||
|
||||
|
||||
def test_header_stays_there_on_insert():
|
||||
# Inserting another row at the top puts it below the header
|
||||
table, header = table_with_header()
|
||||
@ -214,21 +240,24 @@ def test_header_stays_there_on_insert():
|
||||
eq_(len(table), 3)
|
||||
assert table[0] is header
|
||||
|
||||
|
||||
def test_refresh_view_on_refresh():
|
||||
# If refresh_view is not False, we refresh the table's view on refresh()
|
||||
table = TestGUITable(1)
|
||||
table.refresh()
|
||||
table.view.check_gui_calls(['refresh'])
|
||||
table.view.check_gui_calls(["refresh"])
|
||||
table.view.clear_calls()
|
||||
table.refresh(refresh_view=False)
|
||||
table.view.check_gui_calls([])
|
||||
|
||||
|
||||
def test_restore_selection():
|
||||
# By default, after a refresh, selection goes on the last row
|
||||
table = TestGUITable(10)
|
||||
table.refresh()
|
||||
eq_(table.selected_indexes, [9])
|
||||
|
||||
|
||||
def test_restore_selection_after_cancel_edits():
|
||||
# _restore_selection() is called after cancel_edits(). Previously, only _update_selection would
|
||||
# be called.
|
||||
@ -242,6 +271,7 @@ def test_restore_selection_after_cancel_edits():
|
||||
table.cancel_edits()
|
||||
eq_(table.selected_indexes, [6])
|
||||
|
||||
|
||||
def test_restore_selection_with_previous_selection():
|
||||
# By default, we try to restore the selection that was there before a refresh
|
||||
table = TestGUITable(10)
|
||||
@ -250,6 +280,7 @@ def test_restore_selection_with_previous_selection():
|
||||
table.refresh()
|
||||
eq_(table.selected_indexes, [2, 4])
|
||||
|
||||
|
||||
def test_restore_selection_custom():
|
||||
# After a _fill() called, the virtual _restore_selection() is called so that it's possible for a
|
||||
# GUITable subclass to customize its post-refresh selection behavior.
|
||||
@ -261,58 +292,64 @@ def test_restore_selection_custom():
|
||||
table.refresh()
|
||||
eq_(table.selected_indexes, [6])
|
||||
|
||||
|
||||
def test_row_cell_value():
|
||||
# *_cell_value() correctly mangles attrnames that are Python reserved words.
|
||||
row = Row(Table())
|
||||
row.from_ = 'foo'
|
||||
eq_(row.get_cell_value('from'), 'foo')
|
||||
row.set_cell_value('from', 'bar')
|
||||
eq_(row.get_cell_value('from'), 'bar')
|
||||
row.from_ = "foo"
|
||||
eq_(row.get_cell_value("from"), "foo")
|
||||
row.set_cell_value("from", "bar")
|
||||
eq_(row.get_cell_value("from"), "bar")
|
||||
|
||||
|
||||
def test_sort_table_also_tries_attributes_without_underscores():
|
||||
# When determining a sort key, after having unsuccessfully tried the attribute with the,
|
||||
# underscore, try the one without one.
|
||||
table = Table()
|
||||
row1 = Row(table)
|
||||
row1._foo = 'a' # underscored attr must be checked first
|
||||
row1.foo = 'b'
|
||||
row1.bar = 'c'
|
||||
row1._foo = "a" # underscored attr must be checked first
|
||||
row1.foo = "b"
|
||||
row1.bar = "c"
|
||||
row2 = Row(table)
|
||||
row2._foo = 'b'
|
||||
row2.foo = 'a'
|
||||
row2.bar = 'b'
|
||||
row2._foo = "b"
|
||||
row2.foo = "a"
|
||||
row2.bar = "b"
|
||||
table.append(row1)
|
||||
table.append(row2)
|
||||
table.sort_by('foo')
|
||||
table.sort_by("foo")
|
||||
assert table[0] is row1
|
||||
assert table[1] is row2
|
||||
table.sort_by('bar')
|
||||
table.sort_by("bar")
|
||||
assert table[0] is row2
|
||||
assert table[1] is row1
|
||||
|
||||
|
||||
def test_sort_table_updates_selection():
|
||||
table = TestGUITable(10)
|
||||
table.refresh()
|
||||
table.select([2, 4])
|
||||
table.sort_by('index', desc=True)
|
||||
table.sort_by("index", desc=True)
|
||||
# Now, the updated rows should be 7 and 5
|
||||
eq_(len(table.updated_rows), 2)
|
||||
r1, r2 = table.updated_rows
|
||||
eq_(r1.index, 7)
|
||||
eq_(r2.index, 5)
|
||||
|
||||
|
||||
def test_sort_table_with_footer():
|
||||
# Sorting a table with a footer keeps it at the bottom
|
||||
table, footer = table_with_footer()
|
||||
table.sort_by('index', desc=True)
|
||||
table.sort_by("index", desc=True)
|
||||
assert table[-1] is footer
|
||||
|
||||
|
||||
def test_sort_table_with_header():
|
||||
# Sorting a table with a header keeps it at the top
|
||||
table, header = table_with_header()
|
||||
table.sort_by('index', desc=True)
|
||||
table.sort_by("index", desc=True)
|
||||
assert table[0] is header
|
||||
|
||||
|
||||
def test_add_with_view_that_saves_during_refresh():
|
||||
# Calling save_edits during refresh() called by add() is ignored.
|
||||
class TableView(CallLogger):
|
||||
@ -321,5 +358,4 @@ def test_add_with_view_that_saves_during_refresh():
|
||||
|
||||
table = TestGUITable(10, viewclass=TableView)
|
||||
table.add()
|
||||
assert table.edited is not None # still in edit mode
|
||||
|
||||
assert table.edited is not None # still in edit mode
|
||||
|
@ -1,23 +1,25 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-02-12
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..testutil import eq_
|
||||
from ..gui.tree import Tree, Node
|
||||
|
||||
|
||||
def tree_with_some_nodes():
|
||||
t = Tree()
|
||||
t.append(Node('foo'))
|
||||
t.append(Node('bar'))
|
||||
t.append(Node('baz'))
|
||||
t[0].append(Node('sub1'))
|
||||
t[0].append(Node('sub2'))
|
||||
t.append(Node("foo"))
|
||||
t.append(Node("bar"))
|
||||
t.append(Node("baz"))
|
||||
t[0].append(Node("sub1"))
|
||||
t[0].append(Node("sub2"))
|
||||
return t
|
||||
|
||||
|
||||
def test_selection():
|
||||
t = tree_with_some_nodes()
|
||||
assert t.selected_node is None
|
||||
@ -25,6 +27,7 @@ def test_selection():
|
||||
assert t.selected_path is None
|
||||
eq_(t.selected_paths, [])
|
||||
|
||||
|
||||
def test_select_one_node():
|
||||
t = tree_with_some_nodes()
|
||||
t.selected_node = t[0][0]
|
||||
@ -33,33 +36,39 @@ def test_select_one_node():
|
||||
eq_(t.selected_path, [0, 0])
|
||||
eq_(t.selected_paths, [[0, 0]])
|
||||
|
||||
|
||||
def test_select_one_path():
|
||||
t = tree_with_some_nodes()
|
||||
t.selected_path = [0, 1]
|
||||
assert t.selected_node is t[0][1]
|
||||
|
||||
|
||||
def test_select_multiple_nodes():
|
||||
t = tree_with_some_nodes()
|
||||
t.selected_nodes = [t[0], t[1]]
|
||||
eq_(t.selected_paths, [[0], [1]])
|
||||
|
||||
|
||||
def test_select_multiple_paths():
|
||||
t = tree_with_some_nodes()
|
||||
t.selected_paths = [[0], [1]]
|
||||
eq_(t.selected_nodes, [t[0], t[1]])
|
||||
|
||||
|
||||
def test_select_none_path():
|
||||
# setting selected_path to None clears the selection
|
||||
t = Tree()
|
||||
t.selected_path = None
|
||||
assert t.selected_path is None
|
||||
|
||||
|
||||
def test_select_none_node():
|
||||
# setting selected_node to None clears the selection
|
||||
t = Tree()
|
||||
t.selected_node = None
|
||||
eq_(t.selected_nodes, [])
|
||||
|
||||
|
||||
def test_clear_removes_selection():
|
||||
# When clearing a tree, we want to clear the selection as well or else we end up with a crash
|
||||
# when calling selected_paths.
|
||||
@ -68,15 +77,16 @@ def test_clear_removes_selection():
|
||||
t.clear()
|
||||
assert t.selected_node is None
|
||||
|
||||
|
||||
def test_selection_override():
|
||||
# All selection changed pass through the _select_node() method so it's easy for subclasses to
|
||||
# customize the tree's behavior.
|
||||
class MyTree(Tree):
|
||||
called = False
|
||||
|
||||
def _select_nodes(self, nodes):
|
||||
self.called = True
|
||||
|
||||
|
||||
|
||||
t = MyTree()
|
||||
t.selected_paths = []
|
||||
assert t.called
|
||||
@ -84,26 +94,32 @@ def test_selection_override():
|
||||
t.selected_node = None
|
||||
assert t.called
|
||||
|
||||
|
||||
def test_findall():
|
||||
t = tree_with_some_nodes()
|
||||
r = t.findall(lambda n: n.name.startswith('sub'))
|
||||
r = t.findall(lambda n: n.name.startswith("sub"))
|
||||
eq_(set(r), set([t[0][0], t[0][1]]))
|
||||
|
||||
|
||||
def test_findall_dont_include_self():
|
||||
# When calling findall with include_self=False, the node itself is never evaluated.
|
||||
t = tree_with_some_nodes()
|
||||
del t._name # so that if the predicate is called on `t`, we crash
|
||||
r = t.findall(lambda n: not n.name.startswith('sub'), include_self=False) # no crash
|
||||
del t._name # so that if the predicate is called on `t`, we crash
|
||||
r = t.findall(
|
||||
lambda n: not n.name.startswith("sub"), include_self=False
|
||||
) # no crash
|
||||
eq_(set(r), set([t[0], t[1], t[2]]))
|
||||
|
||||
|
||||
def test_find_dont_include_self():
|
||||
# When calling find with include_self=False, the node itself is never evaluated.
|
||||
t = tree_with_some_nodes()
|
||||
del t._name # so that if the predicate is called on `t`, we crash
|
||||
r = t.find(lambda n: not n.name.startswith('sub'), include_self=False) # no crash
|
||||
del t._name # so that if the predicate is called on `t`, we crash
|
||||
r = t.find(lambda n: not n.name.startswith("sub"), include_self=False) # no crash
|
||||
assert r is t[0]
|
||||
|
||||
|
||||
def test_find_none():
|
||||
# when find() yields no result, return None
|
||||
t = Tree()
|
||||
assert t.find(lambda n: False) is None # no StopIteration exception
|
||||
assert t.find(lambda n: False) is None # no StopIteration exception
|
||||
|
@ -14,43 +14,53 @@ from ..testutil import eq_
|
||||
from ..path import Path
|
||||
from ..util import *
|
||||
|
||||
|
||||
def test_nonone():
|
||||
eq_('foo', nonone('foo', 'bar'))
|
||||
eq_('bar', nonone(None, 'bar'))
|
||||
eq_("foo", nonone("foo", "bar"))
|
||||
eq_("bar", nonone(None, "bar"))
|
||||
|
||||
|
||||
def test_tryint():
|
||||
eq_(42,tryint('42'))
|
||||
eq_(0,tryint('abc'))
|
||||
eq_(0,tryint(None))
|
||||
eq_(42,tryint(None, 42))
|
||||
eq_(42, tryint("42"))
|
||||
eq_(0, tryint("abc"))
|
||||
eq_(0, tryint(None))
|
||||
eq_(42, tryint(None, 42))
|
||||
|
||||
|
||||
def test_minmax():
|
||||
eq_(minmax(2, 1, 3), 2)
|
||||
eq_(minmax(0, 1, 3), 1)
|
||||
eq_(minmax(4, 1, 3), 3)
|
||||
|
||||
#--- Sequence
|
||||
|
||||
# --- Sequence
|
||||
|
||||
|
||||
def test_first():
|
||||
eq_(first([3, 2, 1]), 3)
|
||||
eq_(first(i for i in [3, 2, 1] if i < 3), 2)
|
||||
|
||||
|
||||
def test_flatten():
|
||||
eq_([1,2,3,4],flatten([[1,2],[3,4]]))
|
||||
eq_([],flatten([]))
|
||||
eq_([1, 2, 3, 4], flatten([[1, 2], [3, 4]]))
|
||||
eq_([], flatten([]))
|
||||
|
||||
|
||||
def test_dedupe():
|
||||
reflist = [0,7,1,2,3,4,4,5,6,7,1,2,3]
|
||||
eq_(dedupe(reflist),[0,7,1,2,3,4,5,6])
|
||||
reflist = [0, 7, 1, 2, 3, 4, 4, 5, 6, 7, 1, 2, 3]
|
||||
eq_(dedupe(reflist), [0, 7, 1, 2, 3, 4, 5, 6])
|
||||
|
||||
|
||||
def test_stripfalse():
|
||||
eq_([1, 2, 3], stripfalse([None, 0, 1, 2, 3, None]))
|
||||
|
||||
|
||||
def test_extract():
|
||||
wheat, shaft = extract(lambda n: n % 2 == 0, list(range(10)))
|
||||
eq_(wheat, [0, 2, 4, 6, 8])
|
||||
eq_(shaft, [1, 3, 5, 7, 9])
|
||||
|
||||
|
||||
def test_allsame():
|
||||
assert allsame([42, 42, 42])
|
||||
assert not allsame([42, 43, 42])
|
||||
@ -58,25 +68,32 @@ def test_allsame():
|
||||
# Works on non-sequence as well
|
||||
assert allsame(iter([42, 42, 42]))
|
||||
|
||||
|
||||
def test_trailiter():
|
||||
eq_(list(trailiter([])), [])
|
||||
eq_(list(trailiter(['foo'])), [(None, 'foo')])
|
||||
eq_(list(trailiter(['foo', 'bar'])), [(None, 'foo'), ('foo', 'bar')])
|
||||
eq_(list(trailiter(['foo', 'bar'], skipfirst=True)), [('foo', 'bar')])
|
||||
eq_(list(trailiter([], skipfirst=True)), []) # no crash
|
||||
eq_(list(trailiter(["foo"])), [(None, "foo")])
|
||||
eq_(list(trailiter(["foo", "bar"])), [(None, "foo"), ("foo", "bar")])
|
||||
eq_(list(trailiter(["foo", "bar"], skipfirst=True)), [("foo", "bar")])
|
||||
eq_(list(trailiter([], skipfirst=True)), []) # no crash
|
||||
|
||||
|
||||
def test_iterconsume():
|
||||
# We just want to make sure that we return *all* items and that we're not mistakenly skipping
|
||||
# one.
|
||||
eq_(list(range(2500)), list(iterconsume(list(range(2500)))))
|
||||
eq_(list(reversed(range(2500))), list(iterconsume(list(range(2500)), reverse=False)))
|
||||
eq_(
|
||||
list(reversed(range(2500))), list(iterconsume(list(range(2500)), reverse=False))
|
||||
)
|
||||
|
||||
|
||||
# --- String
|
||||
|
||||
#--- String
|
||||
|
||||
def test_escape():
|
||||
eq_('f\\o\\ob\\ar', escape('foobar', 'oa'))
|
||||
eq_('f*o*ob*ar', escape('foobar', 'oa', '*'))
|
||||
eq_('f*o*ob*ar', escape('foobar', set('oa'), '*'))
|
||||
eq_("f\\o\\ob\\ar", escape("foobar", "oa"))
|
||||
eq_("f*o*ob*ar", escape("foobar", "oa", "*"))
|
||||
eq_("f*o*ob*ar", escape("foobar", set("oa"), "*"))
|
||||
|
||||
|
||||
def test_get_file_ext():
|
||||
eq_(get_file_ext("foobar"), "")
|
||||
@ -84,146 +101,155 @@ def test_get_file_ext():
|
||||
eq_(get_file_ext("foobar."), "")
|
||||
eq_(get_file_ext(".foobar"), "foobar")
|
||||
|
||||
|
||||
def test_rem_file_ext():
|
||||
eq_(rem_file_ext("foobar"), "foobar")
|
||||
eq_(rem_file_ext("foo.bar"), "foo")
|
||||
eq_(rem_file_ext("foobar."), "foobar")
|
||||
eq_(rem_file_ext(".foobar"), "")
|
||||
|
||||
|
||||
def test_pluralize():
|
||||
eq_('0 song', pluralize(0,'song'))
|
||||
eq_('1 song', pluralize(1,'song'))
|
||||
eq_('2 songs', pluralize(2,'song'))
|
||||
eq_('1 song', pluralize(1.1,'song'))
|
||||
eq_('2 songs', pluralize(1.5,'song'))
|
||||
eq_('1.1 songs', pluralize(1.1,'song',1))
|
||||
eq_('1.5 songs', pluralize(1.5,'song',1))
|
||||
eq_('2 entries', pluralize(2,'entry', plural_word='entries'))
|
||||
eq_("0 song", pluralize(0, "song"))
|
||||
eq_("1 song", pluralize(1, "song"))
|
||||
eq_("2 songs", pluralize(2, "song"))
|
||||
eq_("1 song", pluralize(1.1, "song"))
|
||||
eq_("2 songs", pluralize(1.5, "song"))
|
||||
eq_("1.1 songs", pluralize(1.1, "song", 1))
|
||||
eq_("1.5 songs", pluralize(1.5, "song", 1))
|
||||
eq_("2 entries", pluralize(2, "entry", plural_word="entries"))
|
||||
|
||||
|
||||
def test_format_time():
|
||||
eq_(format_time(0),'00:00:00')
|
||||
eq_(format_time(1),'00:00:01')
|
||||
eq_(format_time(23),'00:00:23')
|
||||
eq_(format_time(60),'00:01:00')
|
||||
eq_(format_time(101),'00:01:41')
|
||||
eq_(format_time(683),'00:11:23')
|
||||
eq_(format_time(3600),'01:00:00')
|
||||
eq_(format_time(3754),'01:02:34')
|
||||
eq_(format_time(36000),'10:00:00')
|
||||
eq_(format_time(366666),'101:51:06')
|
||||
eq_(format_time(0, with_hours=False),'00:00')
|
||||
eq_(format_time(1, with_hours=False),'00:01')
|
||||
eq_(format_time(23, with_hours=False),'00:23')
|
||||
eq_(format_time(60, with_hours=False),'01:00')
|
||||
eq_(format_time(101, with_hours=False),'01:41')
|
||||
eq_(format_time(683, with_hours=False),'11:23')
|
||||
eq_(format_time(3600, with_hours=False),'60:00')
|
||||
eq_(format_time(6036, with_hours=False),'100:36')
|
||||
eq_(format_time(60360, with_hours=False),'1006:00')
|
||||
eq_(format_time(0), "00:00:00")
|
||||
eq_(format_time(1), "00:00:01")
|
||||
eq_(format_time(23), "00:00:23")
|
||||
eq_(format_time(60), "00:01:00")
|
||||
eq_(format_time(101), "00:01:41")
|
||||
eq_(format_time(683), "00:11:23")
|
||||
eq_(format_time(3600), "01:00:00")
|
||||
eq_(format_time(3754), "01:02:34")
|
||||
eq_(format_time(36000), "10:00:00")
|
||||
eq_(format_time(366666), "101:51:06")
|
||||
eq_(format_time(0, with_hours=False), "00:00")
|
||||
eq_(format_time(1, with_hours=False), "00:01")
|
||||
eq_(format_time(23, with_hours=False), "00:23")
|
||||
eq_(format_time(60, with_hours=False), "01:00")
|
||||
eq_(format_time(101, with_hours=False), "01:41")
|
||||
eq_(format_time(683, with_hours=False), "11:23")
|
||||
eq_(format_time(3600, with_hours=False), "60:00")
|
||||
eq_(format_time(6036, with_hours=False), "100:36")
|
||||
eq_(format_time(60360, with_hours=False), "1006:00")
|
||||
|
||||
|
||||
def test_format_time_decimal():
|
||||
eq_(format_time_decimal(0), '0.0 second')
|
||||
eq_(format_time_decimal(1), '1.0 second')
|
||||
eq_(format_time_decimal(23), '23.0 seconds')
|
||||
eq_(format_time_decimal(60), '1.0 minute')
|
||||
eq_(format_time_decimal(101), '1.7 minutes')
|
||||
eq_(format_time_decimal(683), '11.4 minutes')
|
||||
eq_(format_time_decimal(3600), '1.0 hour')
|
||||
eq_(format_time_decimal(6036), '1.7 hours')
|
||||
eq_(format_time_decimal(86400), '1.0 day')
|
||||
eq_(format_time_decimal(160360), '1.9 days')
|
||||
eq_(format_time_decimal(0), "0.0 second")
|
||||
eq_(format_time_decimal(1), "1.0 second")
|
||||
eq_(format_time_decimal(23), "23.0 seconds")
|
||||
eq_(format_time_decimal(60), "1.0 minute")
|
||||
eq_(format_time_decimal(101), "1.7 minutes")
|
||||
eq_(format_time_decimal(683), "11.4 minutes")
|
||||
eq_(format_time_decimal(3600), "1.0 hour")
|
||||
eq_(format_time_decimal(6036), "1.7 hours")
|
||||
eq_(format_time_decimal(86400), "1.0 day")
|
||||
eq_(format_time_decimal(160360), "1.9 days")
|
||||
|
||||
|
||||
def test_format_size():
|
||||
eq_(format_size(1024), '1 KB')
|
||||
eq_(format_size(1024,2), '1.00 KB')
|
||||
eq_(format_size(1024,0,2), '1 MB')
|
||||
eq_(format_size(1024,2,2), '0.01 MB')
|
||||
eq_(format_size(1024,3,2), '0.001 MB')
|
||||
eq_(format_size(1024,3,2,False), '0.001')
|
||||
eq_(format_size(1023), '1023 B')
|
||||
eq_(format_size(1023,0,1), '1 KB')
|
||||
eq_(format_size(511,0,1), '1 KB')
|
||||
eq_(format_size(9), '9 B')
|
||||
eq_(format_size(99), '99 B')
|
||||
eq_(format_size(999), '999 B')
|
||||
eq_(format_size(9999), '10 KB')
|
||||
eq_(format_size(99999), '98 KB')
|
||||
eq_(format_size(999999), '977 KB')
|
||||
eq_(format_size(9999999), '10 MB')
|
||||
eq_(format_size(99999999), '96 MB')
|
||||
eq_(format_size(999999999), '954 MB')
|
||||
eq_(format_size(9999999999), '10 GB')
|
||||
eq_(format_size(99999999999), '94 GB')
|
||||
eq_(format_size(999999999999), '932 GB')
|
||||
eq_(format_size(9999999999999), '10 TB')
|
||||
eq_(format_size(99999999999999), '91 TB')
|
||||
eq_(format_size(999999999999999), '910 TB')
|
||||
eq_(format_size(9999999999999999), '9 PB')
|
||||
eq_(format_size(99999999999999999), '89 PB')
|
||||
eq_(format_size(999999999999999999), '889 PB')
|
||||
eq_(format_size(9999999999999999999), '9 EB')
|
||||
eq_(format_size(99999999999999999999), '87 EB')
|
||||
eq_(format_size(999999999999999999999), '868 EB')
|
||||
eq_(format_size(9999999999999999999999), '9 ZB')
|
||||
eq_(format_size(99999999999999999999999), '85 ZB')
|
||||
eq_(format_size(999999999999999999999999), '848 ZB')
|
||||
eq_(format_size(1024), "1 KB")
|
||||
eq_(format_size(1024, 2), "1.00 KB")
|
||||
eq_(format_size(1024, 0, 2), "1 MB")
|
||||
eq_(format_size(1024, 2, 2), "0.01 MB")
|
||||
eq_(format_size(1024, 3, 2), "0.001 MB")
|
||||
eq_(format_size(1024, 3, 2, False), "0.001")
|
||||
eq_(format_size(1023), "1023 B")
|
||||
eq_(format_size(1023, 0, 1), "1 KB")
|
||||
eq_(format_size(511, 0, 1), "1 KB")
|
||||
eq_(format_size(9), "9 B")
|
||||
eq_(format_size(99), "99 B")
|
||||
eq_(format_size(999), "999 B")
|
||||
eq_(format_size(9999), "10 KB")
|
||||
eq_(format_size(99999), "98 KB")
|
||||
eq_(format_size(999999), "977 KB")
|
||||
eq_(format_size(9999999), "10 MB")
|
||||
eq_(format_size(99999999), "96 MB")
|
||||
eq_(format_size(999999999), "954 MB")
|
||||
eq_(format_size(9999999999), "10 GB")
|
||||
eq_(format_size(99999999999), "94 GB")
|
||||
eq_(format_size(999999999999), "932 GB")
|
||||
eq_(format_size(9999999999999), "10 TB")
|
||||
eq_(format_size(99999999999999), "91 TB")
|
||||
eq_(format_size(999999999999999), "910 TB")
|
||||
eq_(format_size(9999999999999999), "9 PB")
|
||||
eq_(format_size(99999999999999999), "89 PB")
|
||||
eq_(format_size(999999999999999999), "889 PB")
|
||||
eq_(format_size(9999999999999999999), "9 EB")
|
||||
eq_(format_size(99999999999999999999), "87 EB")
|
||||
eq_(format_size(999999999999999999999), "868 EB")
|
||||
eq_(format_size(9999999999999999999999), "9 ZB")
|
||||
eq_(format_size(99999999999999999999999), "85 ZB")
|
||||
eq_(format_size(999999999999999999999999), "848 ZB")
|
||||
|
||||
|
||||
def test_remove_invalid_xml():
|
||||
eq_(remove_invalid_xml('foo\0bar\x0bbaz'), 'foo bar baz')
|
||||
eq_(remove_invalid_xml("foo\0bar\x0bbaz"), "foo bar baz")
|
||||
# surrogate blocks have to be replaced, but not the rest
|
||||
eq_(remove_invalid_xml('foo\ud800bar\udfffbaz\ue000'), 'foo bar baz\ue000')
|
||||
eq_(remove_invalid_xml("foo\ud800bar\udfffbaz\ue000"), "foo bar baz\ue000")
|
||||
# replace with something else
|
||||
eq_(remove_invalid_xml('foo\0baz', replace_with='bar'), 'foobarbaz')
|
||||
eq_(remove_invalid_xml("foo\0baz", replace_with="bar"), "foobarbaz")
|
||||
|
||||
|
||||
def test_multi_replace():
|
||||
eq_('136',multi_replace('123456',('2','45')))
|
||||
eq_('1 3 6',multi_replace('123456',('2','45'),' '))
|
||||
eq_('1 3 6',multi_replace('123456','245',' '))
|
||||
eq_('173896',multi_replace('123456','245','789'))
|
||||
eq_('173896',multi_replace('123456','245',('7','8','9')))
|
||||
eq_('17386',multi_replace('123456',('2','45'),'78'))
|
||||
eq_('17386',multi_replace('123456',('2','45'),('7','8')))
|
||||
eq_("136", multi_replace("123456", ("2", "45")))
|
||||
eq_("1 3 6", multi_replace("123456", ("2", "45"), " "))
|
||||
eq_("1 3 6", multi_replace("123456", "245", " "))
|
||||
eq_("173896", multi_replace("123456", "245", "789"))
|
||||
eq_("173896", multi_replace("123456", "245", ("7", "8", "9")))
|
||||
eq_("17386", multi_replace("123456", ("2", "45"), "78"))
|
||||
eq_("17386", multi_replace("123456", ("2", "45"), ("7", "8")))
|
||||
with raises(ValueError):
|
||||
multi_replace('123456',('2','45'),('7','8','9'))
|
||||
eq_('17346',multi_replace('12346',('2','45'),'78'))
|
||||
multi_replace("123456", ("2", "45"), ("7", "8", "9"))
|
||||
eq_("17346", multi_replace("12346", ("2", "45"), "78"))
|
||||
|
||||
|
||||
# --- Files
|
||||
|
||||
#--- Files
|
||||
|
||||
class TestCase_modified_after:
|
||||
def test_first_is_modified_after(self, monkeyplus):
|
||||
monkeyplus.patch_osstat('first', st_mtime=42)
|
||||
monkeyplus.patch_osstat('second', st_mtime=41)
|
||||
assert modified_after('first', 'second')
|
||||
monkeyplus.patch_osstat("first", st_mtime=42)
|
||||
monkeyplus.patch_osstat("second", st_mtime=41)
|
||||
assert modified_after("first", "second")
|
||||
|
||||
def test_second_is_modified_after(self, monkeyplus):
|
||||
monkeyplus.patch_osstat('first', st_mtime=42)
|
||||
monkeyplus.patch_osstat('second', st_mtime=43)
|
||||
assert not modified_after('first', 'second')
|
||||
monkeyplus.patch_osstat("first", st_mtime=42)
|
||||
monkeyplus.patch_osstat("second", st_mtime=43)
|
||||
assert not modified_after("first", "second")
|
||||
|
||||
def test_same_mtime(self, monkeyplus):
|
||||
monkeyplus.patch_osstat('first', st_mtime=42)
|
||||
monkeyplus.patch_osstat('second', st_mtime=42)
|
||||
assert not modified_after('first', 'second')
|
||||
monkeyplus.patch_osstat("first", st_mtime=42)
|
||||
monkeyplus.patch_osstat("second", st_mtime=42)
|
||||
assert not modified_after("first", "second")
|
||||
|
||||
def test_first_file_does_not_exist(self, monkeyplus):
|
||||
# when the first file doesn't exist, we return False
|
||||
monkeyplus.patch_osstat('second', st_mtime=42)
|
||||
assert not modified_after('does_not_exist', 'second') # no crash
|
||||
monkeyplus.patch_osstat("second", st_mtime=42)
|
||||
assert not modified_after("does_not_exist", "second") # no crash
|
||||
|
||||
def test_second_file_does_not_exist(self, monkeyplus):
|
||||
# when the second file doesn't exist, we return True
|
||||
monkeyplus.patch_osstat('first', st_mtime=42)
|
||||
assert modified_after('first', 'does_not_exist') # no crash
|
||||
monkeyplus.patch_osstat("first", st_mtime=42)
|
||||
assert modified_after("first", "does_not_exist") # no crash
|
||||
|
||||
def test_first_file_is_none(self, monkeyplus):
|
||||
# when the first file is None, we return False
|
||||
monkeyplus.patch_osstat('second', st_mtime=42)
|
||||
assert not modified_after(None, 'second') # no crash
|
||||
monkeyplus.patch_osstat("second", st_mtime=42)
|
||||
assert not modified_after(None, "second") # no crash
|
||||
|
||||
def test_second_file_is_none(self, monkeyplus):
|
||||
# when the second file is None, we return True
|
||||
monkeyplus.patch_osstat('first', st_mtime=42)
|
||||
assert modified_after('first', None) # no crash
|
||||
monkeyplus.patch_osstat("first", st_mtime=42)
|
||||
assert modified_after("first", None) # no crash
|
||||
|
||||
|
||||
class TestCase_delete_if_empty:
|
||||
@ -234,92 +260,91 @@ class TestCase_delete_if_empty:
|
||||
|
||||
def test_not_empty(self, tmpdir):
|
||||
testpath = Path(str(tmpdir))
|
||||
testpath['foo'].mkdir()
|
||||
testpath["foo"].mkdir()
|
||||
assert not delete_if_empty(testpath)
|
||||
assert testpath.exists()
|
||||
|
||||
def test_with_files_to_delete(self, tmpdir):
|
||||
testpath = Path(str(tmpdir))
|
||||
testpath['foo'].open('w')
|
||||
testpath['bar'].open('w')
|
||||
assert delete_if_empty(testpath, ['foo', 'bar'])
|
||||
testpath["foo"].open("w")
|
||||
testpath["bar"].open("w")
|
||||
assert delete_if_empty(testpath, ["foo", "bar"])
|
||||
assert not testpath.exists()
|
||||
|
||||
def test_directory_in_files_to_delete(self, tmpdir):
|
||||
testpath = Path(str(tmpdir))
|
||||
testpath['foo'].mkdir()
|
||||
assert not delete_if_empty(testpath, ['foo'])
|
||||
testpath["foo"].mkdir()
|
||||
assert not delete_if_empty(testpath, ["foo"])
|
||||
assert testpath.exists()
|
||||
|
||||
def test_delete_files_to_delete_only_if_dir_is_empty(self, tmpdir):
|
||||
testpath = Path(str(tmpdir))
|
||||
testpath['foo'].open('w')
|
||||
testpath['bar'].open('w')
|
||||
assert not delete_if_empty(testpath, ['foo'])
|
||||
testpath["foo"].open("w")
|
||||
testpath["bar"].open("w")
|
||||
assert not delete_if_empty(testpath, ["foo"])
|
||||
assert testpath.exists()
|
||||
assert testpath['foo'].exists()
|
||||
assert testpath["foo"].exists()
|
||||
|
||||
def test_doesnt_exist(self):
|
||||
# When the 'path' doesn't exist, just do nothing.
|
||||
delete_if_empty(Path('does_not_exist')) # no crash
|
||||
delete_if_empty(Path("does_not_exist")) # no crash
|
||||
|
||||
def test_is_file(self, tmpdir):
|
||||
# When 'path' is a file, do nothing.
|
||||
p = Path(str(tmpdir)) + 'filename'
|
||||
p.open('w').close()
|
||||
delete_if_empty(p) # no crash
|
||||
p = Path(str(tmpdir)) + "filename"
|
||||
p.open("w").close()
|
||||
delete_if_empty(p) # no crash
|
||||
|
||||
def test_ioerror(self, tmpdir, monkeypatch):
|
||||
# if an IO error happens during the operation, ignore it.
|
||||
def do_raise(*args, **kw):
|
||||
raise OSError()
|
||||
|
||||
monkeypatch.setattr(Path, 'rmdir', do_raise)
|
||||
delete_if_empty(Path(str(tmpdir))) # no crash
|
||||
monkeypatch.setattr(Path, "rmdir", do_raise)
|
||||
delete_if_empty(Path(str(tmpdir))) # no crash
|
||||
|
||||
|
||||
class TestCase_open_if_filename:
|
||||
def test_file_name(self, tmpdir):
|
||||
filepath = str(tmpdir.join('test.txt'))
|
||||
open(filepath, 'wb').write(b'test_data')
|
||||
filepath = str(tmpdir.join("test.txt"))
|
||||
open(filepath, "wb").write(b"test_data")
|
||||
file, close = open_if_filename(filepath)
|
||||
assert close
|
||||
eq_(b'test_data', file.read())
|
||||
eq_(b"test_data", file.read())
|
||||
file.close()
|
||||
|
||||
def test_opened_file(self):
|
||||
sio = StringIO()
|
||||
sio.write('test_data')
|
||||
sio.write("test_data")
|
||||
sio.seek(0)
|
||||
file, close = open_if_filename(sio)
|
||||
assert not close
|
||||
eq_('test_data', file.read())
|
||||
eq_("test_data", file.read())
|
||||
|
||||
def test_mode_is_passed_to_open(self, tmpdir):
|
||||
filepath = str(tmpdir.join('test.txt'))
|
||||
open(filepath, 'w').close()
|
||||
file, close = open_if_filename(filepath, 'a')
|
||||
eq_('a', file.mode)
|
||||
filepath = str(tmpdir.join("test.txt"))
|
||||
open(filepath, "w").close()
|
||||
file, close = open_if_filename(filepath, "a")
|
||||
eq_("a", file.mode)
|
||||
file.close()
|
||||
|
||||
|
||||
class TestCase_FileOrPath:
|
||||
def test_path(self, tmpdir):
|
||||
filepath = str(tmpdir.join('test.txt'))
|
||||
open(filepath, 'wb').write(b'test_data')
|
||||
filepath = str(tmpdir.join("test.txt"))
|
||||
open(filepath, "wb").write(b"test_data")
|
||||
with FileOrPath(filepath) as fp:
|
||||
eq_(b'test_data', fp.read())
|
||||
eq_(b"test_data", fp.read())
|
||||
|
||||
def test_opened_file(self):
|
||||
sio = StringIO()
|
||||
sio.write('test_data')
|
||||
sio.write("test_data")
|
||||
sio.seek(0)
|
||||
with FileOrPath(sio) as fp:
|
||||
eq_('test_data', fp.read())
|
||||
eq_("test_data", fp.read())
|
||||
|
||||
def test_mode_is_passed_to_open(self, tmpdir):
|
||||
filepath = str(tmpdir.join('test.txt'))
|
||||
open(filepath, 'w').close()
|
||||
with FileOrPath(filepath, 'a') as fp:
|
||||
eq_('a', fp.mode)
|
||||
|
||||
filepath = str(tmpdir.join("test.txt"))
|
||||
open(filepath, "w").close()
|
||||
with FileOrPath(filepath, "a") as fp:
|
||||
eq_("a", fp.mode)
|
||||
|
@ -9,10 +9,12 @@
|
||||
import threading
|
||||
import py.path
|
||||
|
||||
|
||||
def eq_(a, b, msg=None):
|
||||
__tracebackhide__ = True
|
||||
assert a == b, msg or "%r != %r" % (a, b)
|
||||
|
||||
|
||||
def eq_sorted(a, b, msg=None):
|
||||
"""If both a and b are iterable sort them and compare using eq_, otherwise just pass them through to eq_ anyway."""
|
||||
try:
|
||||
@ -20,10 +22,12 @@ def eq_sorted(a, b, msg=None):
|
||||
except TypeError:
|
||||
eq_(a, b, msg)
|
||||
|
||||
|
||||
def assert_almost_equal(a, b, places=7):
|
||||
__tracebackhide__ = True
|
||||
assert round(a, ndigits=places) == round(b, ndigits=places)
|
||||
|
||||
|
||||
def callcounter():
|
||||
def f(*args, **kwargs):
|
||||
f.callcount += 1
|
||||
@ -31,6 +35,7 @@ def callcounter():
|
||||
f.callcount = 0
|
||||
return f
|
||||
|
||||
|
||||
class TestData:
|
||||
def __init__(self, datadirpath):
|
||||
self.datadirpath = py.path.local(datadirpath)
|
||||
@ -53,12 +58,14 @@ class CallLogger:
|
||||
|
||||
It is used to simulate the GUI layer.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.calls = []
|
||||
|
||||
def __getattr__(self, func_name):
|
||||
def func(*args, **kw):
|
||||
self.calls.append(func_name)
|
||||
|
||||
return func
|
||||
|
||||
def clear_calls(self):
|
||||
@ -77,7 +84,9 @@ class CallLogger:
|
||||
eq_(set(self.calls), set(expected))
|
||||
self.clear_calls()
|
||||
|
||||
def check_gui_calls_partial(self, expected=None, not_expected=None, verify_order=False):
|
||||
def check_gui_calls_partial(
|
||||
self, expected=None, not_expected=None, verify_order=False
|
||||
):
|
||||
"""Checks that the expected calls have been made to 'self', then clears the log.
|
||||
|
||||
`expected` is an iterable of strings representing method names. Order doesn't matter.
|
||||
@ -88,17 +97,25 @@ class CallLogger:
|
||||
__tracebackhide__ = True
|
||||
if expected is not None:
|
||||
not_called = set(expected) - set(self.calls)
|
||||
assert not not_called, "These calls haven't been made: {0}".format(not_called)
|
||||
assert not not_called, "These calls haven't been made: {0}".format(
|
||||
not_called
|
||||
)
|
||||
if verify_order:
|
||||
max_index = 0
|
||||
for call in expected:
|
||||
index = self.calls.index(call)
|
||||
if index < max_index:
|
||||
raise AssertionError("The call {0} hasn't been made in the correct order".format(call))
|
||||
raise AssertionError(
|
||||
"The call {0} hasn't been made in the correct order".format(
|
||||
call
|
||||
)
|
||||
)
|
||||
max_index = index
|
||||
if not_expected is not None:
|
||||
called = set(not_expected) & set(self.calls)
|
||||
assert not called, "These calls shouldn't have been made: {0}".format(called)
|
||||
assert not called, "These calls shouldn't have been made: {0}".format(
|
||||
called
|
||||
)
|
||||
self.clear_calls()
|
||||
|
||||
|
||||
@ -124,7 +141,7 @@ class TestApp:
|
||||
parent = self.default_parent
|
||||
if holder is None:
|
||||
holder = self
|
||||
setattr(holder, '{0}_gui'.format(name), view)
|
||||
setattr(holder, "{0}_gui".format(name), view)
|
||||
gui = class_(parent)
|
||||
gui.view = view
|
||||
setattr(holder, name, gui)
|
||||
@ -136,38 +153,44 @@ def with_app(setupfunc):
|
||||
def decorator(func):
|
||||
func.setupfunc = setupfunc
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def pytest_funcarg__app(request):
|
||||
setupfunc = request.function.setupfunc
|
||||
if hasattr(setupfunc, '__code__'):
|
||||
argnames = setupfunc.__code__.co_varnames[:setupfunc.__code__.co_argcount]
|
||||
if hasattr(setupfunc, "__code__"):
|
||||
argnames = setupfunc.__code__.co_varnames[: setupfunc.__code__.co_argcount]
|
||||
|
||||
def getarg(name):
|
||||
if name == 'self':
|
||||
if name == "self":
|
||||
return request.function.__self__
|
||||
else:
|
||||
return request.getfixturevalue(name)
|
||||
|
||||
args = [getarg(argname) for argname in argnames]
|
||||
else:
|
||||
args = []
|
||||
app = setupfunc(*args)
|
||||
return app
|
||||
|
||||
|
||||
def jointhreads():
|
||||
"""Join all threads to the main thread"""
|
||||
for thread in threading.enumerate():
|
||||
if hasattr(thread, 'BUGGY'):
|
||||
if hasattr(thread, "BUGGY"):
|
||||
continue
|
||||
if thread.getName() != 'MainThread' and thread.isAlive():
|
||||
if hasattr(thread, 'close'):
|
||||
if thread.getName() != "MainThread" and thread.isAlive():
|
||||
if hasattr(thread, "close"):
|
||||
thread.close()
|
||||
thread.join(1)
|
||||
if thread.isAlive():
|
||||
print("Thread problem. Some thread doesn't want to stop.")
|
||||
thread.BUGGY = True
|
||||
|
||||
|
||||
def _unify_args(func, args, kwargs, args_to_ignore=None):
|
||||
''' Unify args and kwargs in the same dictionary.
|
||||
""" Unify args and kwargs in the same dictionary.
|
||||
|
||||
The result is kwargs with args added to it. func.func_code.co_varnames is used to determine
|
||||
under what key each elements of arg will be mapped in kwargs.
|
||||
@ -181,36 +204,40 @@ def _unify_args(func, args, kwargs, args_to_ignore=None):
|
||||
def foo(bar, baz)
|
||||
_unifyArgs(foo, (42,), {'baz': 23}) --> {'bar': 42, 'baz': 23}
|
||||
_unifyArgs(foo, (42,), {'baz': 23}, ['bar']) --> {'baz': 23}
|
||||
'''
|
||||
"""
|
||||
result = kwargs.copy()
|
||||
if hasattr(func, '__code__'): # built-in functions don't have func_code
|
||||
if hasattr(func, "__code__"): # built-in functions don't have func_code
|
||||
args = list(args)
|
||||
if getattr(func, '__self__', None) is not None: # bound method, we have to add self to args list
|
||||
if (
|
||||
getattr(func, "__self__", None) is not None
|
||||
): # bound method, we have to add self to args list
|
||||
args = [func.__self__] + args
|
||||
defaults = list(func.__defaults__) if func.__defaults__ is not None else []
|
||||
arg_count = func.__code__.co_argcount
|
||||
arg_names = list(func.__code__.co_varnames)
|
||||
if len(args) < arg_count: # We have default values
|
||||
if len(args) < arg_count: # We have default values
|
||||
required_arg_count = arg_count - len(args)
|
||||
args = args + defaults[-required_arg_count:]
|
||||
for arg_name, arg in zip(arg_names, args):
|
||||
# setdefault is used because if the arg is already in kwargs, we don't want to use default values
|
||||
result.setdefault(arg_name, arg)
|
||||
else:
|
||||
#'func' has a *args argument
|
||||
result['args'] = args
|
||||
# 'func' has a *args argument
|
||||
result["args"] = args
|
||||
if args_to_ignore:
|
||||
for kw in args_to_ignore:
|
||||
del result[kw]
|
||||
return result
|
||||
|
||||
|
||||
def log_calls(func):
|
||||
''' Logs all func calls' arguments under func.calls.
|
||||
""" Logs all func calls' arguments under func.calls.
|
||||
|
||||
func.calls is a list of _unify_args() result (dict).
|
||||
|
||||
Mostly used for unit testing.
|
||||
'''
|
||||
"""
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
unifiedArgs = _unify_args(func, args, kwargs)
|
||||
wrapper.calls.append(unifiedArgs)
|
||||
@ -218,4 +245,3 @@ def log_calls(func):
|
||||
|
||||
wrapper.calls = []
|
||||
return wrapper
|
||||
|
||||
|
@ -19,6 +19,7 @@ _trfunc = None
|
||||
_trget = None
|
||||
installed_lang = None
|
||||
|
||||
|
||||
def tr(s, context=None):
|
||||
if _trfunc is None:
|
||||
return s
|
||||
@ -28,6 +29,7 @@ def tr(s, context=None):
|
||||
else:
|
||||
return _trfunc(s)
|
||||
|
||||
|
||||
def trget(domain):
|
||||
# Returns a tr() function for the specified domain.
|
||||
if _trget is None:
|
||||
@ -35,57 +37,61 @@ def trget(domain):
|
||||
else:
|
||||
return _trget(domain)
|
||||
|
||||
|
||||
def set_tr(new_tr, new_trget=None):
|
||||
global _trfunc, _trget
|
||||
_trfunc = new_tr
|
||||
if new_trget is not None:
|
||||
_trget = new_trget
|
||||
|
||||
|
||||
def get_locale_name(lang):
|
||||
if ISWINDOWS:
|
||||
# http://msdn.microsoft.com/en-us/library/39cwe7zf(vs.71).aspx
|
||||
LANG2LOCALENAME = {
|
||||
'cs': 'czy',
|
||||
'de': 'deu',
|
||||
'el': 'grc',
|
||||
'es': 'esn',
|
||||
'fr': 'fra',
|
||||
'it': 'ita',
|
||||
'ko': 'korean',
|
||||
'nl': 'nld',
|
||||
'pl_PL': 'polish_poland',
|
||||
'pt_BR': 'ptb',
|
||||
'ru': 'rus',
|
||||
'zh_CN': 'chs',
|
||||
"cs": "czy",
|
||||
"de": "deu",
|
||||
"el": "grc",
|
||||
"es": "esn",
|
||||
"fr": "fra",
|
||||
"it": "ita",
|
||||
"ko": "korean",
|
||||
"nl": "nld",
|
||||
"pl_PL": "polish_poland",
|
||||
"pt_BR": "ptb",
|
||||
"ru": "rus",
|
||||
"zh_CN": "chs",
|
||||
}
|
||||
else:
|
||||
LANG2LOCALENAME = {
|
||||
'cs': 'cs_CZ',
|
||||
'de': 'de_DE',
|
||||
'el': 'el_GR',
|
||||
'es': 'es_ES',
|
||||
'fr': 'fr_FR',
|
||||
'it': 'it_IT',
|
||||
'nl': 'nl_NL',
|
||||
'hy': 'hy_AM',
|
||||
'ko': 'ko_KR',
|
||||
'pl_PL': 'pl_PL',
|
||||
'pt_BR': 'pt_BR',
|
||||
'ru': 'ru_RU',
|
||||
'uk': 'uk_UA',
|
||||
'vi': 'vi_VN',
|
||||
'zh_CN': 'zh_CN',
|
||||
"cs": "cs_CZ",
|
||||
"de": "de_DE",
|
||||
"el": "el_GR",
|
||||
"es": "es_ES",
|
||||
"fr": "fr_FR",
|
||||
"it": "it_IT",
|
||||
"nl": "nl_NL",
|
||||
"hy": "hy_AM",
|
||||
"ko": "ko_KR",
|
||||
"pl_PL": "pl_PL",
|
||||
"pt_BR": "pt_BR",
|
||||
"ru": "ru_RU",
|
||||
"uk": "uk_UA",
|
||||
"vi": "vi_VN",
|
||||
"zh_CN": "zh_CN",
|
||||
}
|
||||
if lang not in LANG2LOCALENAME:
|
||||
return None
|
||||
result = LANG2LOCALENAME[lang]
|
||||
if ISLINUX:
|
||||
result += '.UTF-8'
|
||||
result += ".UTF-8"
|
||||
return result
|
||||
|
||||
#--- Qt
|
||||
|
||||
# --- Qt
|
||||
def install_qt_trans(lang=None):
|
||||
from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale
|
||||
|
||||
if not lang:
|
||||
lang = str(QLocale.system().name())[:2]
|
||||
localename = get_locale_name(lang)
|
||||
@ -95,54 +101,66 @@ def install_qt_trans(lang=None):
|
||||
except locale.Error:
|
||||
logging.warning("Couldn't set locale %s", localename)
|
||||
else:
|
||||
lang = 'en'
|
||||
lang = "en"
|
||||
qtr1 = QTranslator(QCoreApplication.instance())
|
||||
qtr1.load(':/qt_%s' % lang)
|
||||
qtr1.load(":/qt_%s" % lang)
|
||||
QCoreApplication.installTranslator(qtr1)
|
||||
qtr2 = QTranslator(QCoreApplication.instance())
|
||||
qtr2.load(':/%s' % lang)
|
||||
qtr2.load(":/%s" % lang)
|
||||
QCoreApplication.installTranslator(qtr2)
|
||||
def qt_tr(s, context='core'):
|
||||
|
||||
def qt_tr(s, context="core"):
|
||||
return str(QCoreApplication.translate(context, s, None))
|
||||
|
||||
set_tr(qt_tr)
|
||||
|
||||
#--- gettext
|
||||
|
||||
# --- gettext
|
||||
def install_gettext_trans(base_folder, lang):
|
||||
import gettext
|
||||
|
||||
def gettext_trget(domain):
|
||||
if not lang:
|
||||
return lambda s: s
|
||||
try:
|
||||
return gettext.translation(domain, localedir=base_folder, languages=[lang]).gettext
|
||||
return gettext.translation(
|
||||
domain, localedir=base_folder, languages=[lang]
|
||||
).gettext
|
||||
except IOError:
|
||||
return lambda s: s
|
||||
|
||||
default_gettext = gettext_trget('core')
|
||||
default_gettext = gettext_trget("core")
|
||||
|
||||
def gettext_tr(s, context=None):
|
||||
if not context:
|
||||
return default_gettext(s)
|
||||
else:
|
||||
trfunc = gettext_trget(context)
|
||||
return trfunc(s)
|
||||
|
||||
set_tr(gettext_tr, gettext_trget)
|
||||
global installed_lang
|
||||
installed_lang = lang
|
||||
|
||||
|
||||
def install_gettext_trans_under_cocoa():
|
||||
from cocoa import proxy
|
||||
|
||||
resFolder = proxy.getResourcePath()
|
||||
baseFolder = op.join(resFolder, 'locale')
|
||||
baseFolder = op.join(resFolder, "locale")
|
||||
currentLang = proxy.systemLang()
|
||||
install_gettext_trans(baseFolder, currentLang)
|
||||
localename = get_locale_name(currentLang)
|
||||
if localename is not None:
|
||||
locale.setlocale(locale.LC_ALL, localename)
|
||||
|
||||
|
||||
def install_gettext_trans_under_qt(base_folder, lang=None):
|
||||
# So, we install the gettext locale, great, but we also should try to install qt_*.qm if
|
||||
# available so that strings that are inside Qt itself over which I have no control are in the
|
||||
# right language.
|
||||
from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale, QLibraryInfo
|
||||
|
||||
if not lang:
|
||||
lang = str(QLocale.system().name())[:2]
|
||||
localename = get_locale_name(lang)
|
||||
@ -151,7 +169,7 @@ def install_gettext_trans_under_qt(base_folder, lang=None):
|
||||
locale.setlocale(locale.LC_ALL, localename)
|
||||
except locale.Error:
|
||||
logging.warning("Couldn't set locale %s", localename)
|
||||
qmname = 'qt_%s' % lang
|
||||
qmname = "qt_%s" % lang
|
||||
if ISLINUX:
|
||||
# Under linux, a full Qt installation is already available in the system, we didn't bundle
|
||||
# up the qm files in our package, so we have to load translations from the system.
|
||||
|
113
hscommon/util.py
113
hscommon/util.py
@ -17,6 +17,7 @@ from datetime import timedelta
|
||||
|
||||
from .path import Path, pathify, log_io_error
|
||||
|
||||
|
||||
def nonone(value, replace_value):
|
||||
"""Returns ``value`` if ``value`` is not ``None``. Returns ``replace_value`` otherwise.
|
||||
"""
|
||||
@ -25,6 +26,7 @@ def nonone(value, replace_value):
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def tryint(value, default=0):
|
||||
"""Tries to convert ``value`` to in ``int`` and returns ``default`` if it fails.
|
||||
"""
|
||||
@ -33,12 +35,15 @@ def tryint(value, default=0):
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def minmax(value, min_value, max_value):
|
||||
"""Returns `value` or one of the min/max bounds if `value` is not between them.
|
||||
"""
|
||||
return min(max(value, min_value), max_value)
|
||||
|
||||
#--- Sequence related
|
||||
|
||||
# --- Sequence related
|
||||
|
||||
|
||||
def dedupe(iterable):
|
||||
"""Returns a list of elements in ``iterable`` with all dupes removed.
|
||||
@ -54,6 +59,7 @@ def dedupe(iterable):
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
def flatten(iterables, start_with=None):
|
||||
"""Takes a list of lists ``iterables`` and returns a list containing elements of every list.
|
||||
|
||||
@ -67,6 +73,7 @@ def flatten(iterables, start_with=None):
|
||||
result.extend(iterable)
|
||||
return result
|
||||
|
||||
|
||||
def first(iterable):
|
||||
"""Returns the first item of ``iterable``.
|
||||
"""
|
||||
@ -75,11 +82,13 @@ def first(iterable):
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
|
||||
def stripfalse(seq):
|
||||
"""Returns a sequence with all false elements stripped out of seq.
|
||||
"""
|
||||
return [x for x in seq if x]
|
||||
|
||||
|
||||
def extract(predicate, iterable):
|
||||
"""Separates the wheat from the shaft (`predicate` defines what's the wheat), and returns both.
|
||||
"""
|
||||
@ -92,6 +101,7 @@ def extract(predicate, iterable):
|
||||
shaft.append(item)
|
||||
return wheat, shaft
|
||||
|
||||
|
||||
def allsame(iterable):
|
||||
"""Returns whether all elements of 'iterable' are the same.
|
||||
"""
|
||||
@ -102,6 +112,7 @@ def allsame(iterable):
|
||||
raise ValueError("iterable cannot be empty")
|
||||
return all(element == first_item for element in it)
|
||||
|
||||
|
||||
def trailiter(iterable, skipfirst=False):
|
||||
"""Yields (prev_element, element), starting with (None, first_element).
|
||||
|
||||
@ -120,6 +131,7 @@ def trailiter(iterable, skipfirst=False):
|
||||
yield prev, item
|
||||
prev = item
|
||||
|
||||
|
||||
def iterconsume(seq, reverse=True):
|
||||
"""Iterate over ``seq`` and pops yielded objects.
|
||||
|
||||
@ -135,31 +147,36 @@ def iterconsume(seq, reverse=True):
|
||||
while seq:
|
||||
yield seq.pop()
|
||||
|
||||
#--- String related
|
||||
|
||||
def escape(s, to_escape, escape_with='\\'):
|
||||
# --- String related
|
||||
|
||||
|
||||
def escape(s, to_escape, escape_with="\\"):
|
||||
"""Returns ``s`` with characters in ``to_escape`` all prepended with ``escape_with``.
|
||||
"""
|
||||
return ''.join((escape_with + c if c in to_escape else c) for c in s)
|
||||
return "".join((escape_with + c if c in to_escape else c) for c in s)
|
||||
|
||||
|
||||
def get_file_ext(filename):
|
||||
"""Returns the lowercase extension part of filename, without the dot.
|
||||
"""
|
||||
pos = filename.rfind('.')
|
||||
pos = filename.rfind(".")
|
||||
if pos > -1:
|
||||
return filename[pos + 1:].lower()
|
||||
return filename[pos + 1 :].lower()
|
||||
else:
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
def rem_file_ext(filename):
|
||||
"""Returns the filename without extension.
|
||||
"""
|
||||
pos = filename.rfind('.')
|
||||
pos = filename.rfind(".")
|
||||
if pos > -1:
|
||||
return filename[:pos]
|
||||
else:
|
||||
return filename
|
||||
|
||||
|
||||
def pluralize(number, word, decimals=0, plural_word=None):
|
||||
"""Returns a pluralized string with ``number`` in front of ``word``.
|
||||
|
||||
@ -173,11 +190,12 @@ def pluralize(number, word, decimals=0, plural_word=None):
|
||||
format = "%%1.%df %%s" % decimals
|
||||
if number > 1:
|
||||
if plural_word is None:
|
||||
word += 's'
|
||||
word += "s"
|
||||
else:
|
||||
word = plural_word
|
||||
return format % (number, word)
|
||||
|
||||
|
||||
def format_time(seconds, with_hours=True):
|
||||
"""Transforms seconds in a hh:mm:ss string.
|
||||
|
||||
@ -189,14 +207,15 @@ def format_time(seconds, with_hours=True):
|
||||
m, s = divmod(seconds, 60)
|
||||
if with_hours:
|
||||
h, m = divmod(m, 60)
|
||||
r = '%02d:%02d:%02d' % (h, m, s)
|
||||
r = "%02d:%02d:%02d" % (h, m, s)
|
||||
else:
|
||||
r = '%02d:%02d' % (m,s)
|
||||
r = "%02d:%02d" % (m, s)
|
||||
if minus:
|
||||
return '-' + r
|
||||
return "-" + r
|
||||
else:
|
||||
return r
|
||||
|
||||
|
||||
def format_time_decimal(seconds):
|
||||
"""Transforms seconds in a strings like '3.4 minutes'.
|
||||
"""
|
||||
@ -204,20 +223,23 @@ def format_time_decimal(seconds):
|
||||
if minus:
|
||||
seconds *= -1
|
||||
if seconds < 60:
|
||||
r = pluralize(seconds, 'second', 1)
|
||||
r = pluralize(seconds, "second", 1)
|
||||
elif seconds < 3600:
|
||||
r = pluralize(seconds / 60.0, 'minute', 1)
|
||||
r = pluralize(seconds / 60.0, "minute", 1)
|
||||
elif seconds < 86400:
|
||||
r = pluralize(seconds / 3600.0, 'hour', 1)
|
||||
r = pluralize(seconds / 3600.0, "hour", 1)
|
||||
else:
|
||||
r = pluralize(seconds / 86400.0, 'day', 1)
|
||||
r = pluralize(seconds / 86400.0, "day", 1)
|
||||
if minus:
|
||||
return '-' + r
|
||||
return "-" + r
|
||||
else:
|
||||
return r
|
||||
|
||||
SIZE_DESC = ('B','KB','MB','GB','TB','PB','EB','ZB','YB')
|
||||
SIZE_VALS = tuple(1024 ** i for i in range(1,9))
|
||||
|
||||
SIZE_DESC = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
|
||||
SIZE_VALS = tuple(1024 ** i for i in range(1, 9))
|
||||
|
||||
|
||||
def format_size(size, decimal=0, forcepower=-1, showdesc=True):
|
||||
"""Transform a byte count in a formatted string (KB, MB etc..).
|
||||
|
||||
@ -238,12 +260,12 @@ def format_size(size, decimal=0, forcepower=-1, showdesc=True):
|
||||
else:
|
||||
i = forcepower
|
||||
if i > 0:
|
||||
div = SIZE_VALS[i-1]
|
||||
div = SIZE_VALS[i - 1]
|
||||
else:
|
||||
div = 1
|
||||
format = '%%%d.%df' % (decimal,decimal)
|
||||
format = "%%%d.%df" % (decimal, decimal)
|
||||
negative = size < 0
|
||||
divided_size = ((0.0 + abs(size)) / div)
|
||||
divided_size = (0.0 + abs(size)) / div
|
||||
if decimal == 0:
|
||||
divided_size = ceil(divided_size)
|
||||
else:
|
||||
@ -252,18 +274,21 @@ def format_size(size, decimal=0, forcepower=-1, showdesc=True):
|
||||
divided_size *= -1
|
||||
result = format % divided_size
|
||||
if showdesc:
|
||||
result += ' ' + SIZE_DESC[i]
|
||||
result += " " + SIZE_DESC[i]
|
||||
return result
|
||||
|
||||
_valid_xml_range = '\x09\x0A\x0D\x20-\uD7FF\uE000-\uFFFD'
|
||||
if sys.maxunicode > 0x10000:
|
||||
_valid_xml_range += '%s-%s' % (chr(0x10000), chr(min(sys.maxunicode, 0x10FFFF)))
|
||||
RE_INVALID_XML_SUB = re.compile('[^%s]' % _valid_xml_range, re.U).sub
|
||||
|
||||
def remove_invalid_xml(s, replace_with=' '):
|
||||
_valid_xml_range = "\x09\x0A\x0D\x20-\uD7FF\uE000-\uFFFD"
|
||||
if sys.maxunicode > 0x10000:
|
||||
_valid_xml_range += "%s-%s" % (chr(0x10000), chr(min(sys.maxunicode, 0x10FFFF)))
|
||||
RE_INVALID_XML_SUB = re.compile("[^%s]" % _valid_xml_range, re.U).sub
|
||||
|
||||
|
||||
def remove_invalid_xml(s, replace_with=" "):
|
||||
return RE_INVALID_XML_SUB(replace_with, s)
|
||||
|
||||
def multi_replace(s, replace_from, replace_to=''):
|
||||
|
||||
def multi_replace(s, replace_from, replace_to=""):
|
||||
"""A function like str.replace() with multiple replacements.
|
||||
|
||||
``replace_from`` is a list of things you want to replace. Ex: ['a','bc','d']
|
||||
@ -280,17 +305,20 @@ def multi_replace(s, replace_from, replace_to=''):
|
||||
if isinstance(replace_to, str) and (len(replace_from) != len(replace_to)):
|
||||
replace_to = [replace_to for r in replace_from]
|
||||
if len(replace_from) != len(replace_to):
|
||||
raise ValueError('len(replace_from) must be equal to len(replace_to)')
|
||||
raise ValueError("len(replace_from) must be equal to len(replace_to)")
|
||||
replace = list(zip(replace_from, replace_to))
|
||||
for r_from, r_to in [r for r in replace if r[0] in s]:
|
||||
s = s.replace(r_from, r_to)
|
||||
return s
|
||||
|
||||
#--- Date related
|
||||
|
||||
# --- Date related
|
||||
|
||||
# It might seem like needless namespace pollution, but the speedup gained by this constant is
|
||||
# significant, so it stays.
|
||||
ONE_DAY = timedelta(1)
|
||||
|
||||
|
||||
def iterdaterange(start, end):
|
||||
"""Yields every day between ``start`` and ``end``.
|
||||
"""
|
||||
@ -299,7 +327,9 @@ def iterdaterange(start, end):
|
||||
yield date
|
||||
date += ONE_DAY
|
||||
|
||||
#--- Files related
|
||||
|
||||
# --- Files related
|
||||
|
||||
|
||||
@pathify
|
||||
def modified_after(first_path: Path, second_path: Path):
|
||||
@ -317,19 +347,21 @@ def modified_after(first_path: Path, second_path: Path):
|
||||
return True
|
||||
return first_mtime > second_mtime
|
||||
|
||||
|
||||
def find_in_path(name, paths=None):
|
||||
"""Search for `name` in all directories of `paths` and return the absolute path of the first
|
||||
occurrence. If `paths` is None, $PATH is used.
|
||||
"""
|
||||
if paths is None:
|
||||
paths = os.environ['PATH']
|
||||
if isinstance(paths, str): # if it's not a string, it's already a list
|
||||
paths = os.environ["PATH"]
|
||||
if isinstance(paths, str): # if it's not a string, it's already a list
|
||||
paths = paths.split(os.pathsep)
|
||||
for path in paths:
|
||||
if op.exists(op.join(path, name)):
|
||||
return op.join(path, name)
|
||||
return None
|
||||
|
||||
|
||||
@log_io_error
|
||||
@pathify
|
||||
def delete_if_empty(path: Path, files_to_delete=[]):
|
||||
@ -345,7 +377,8 @@ def delete_if_empty(path: Path, files_to_delete=[]):
|
||||
path.rmdir()
|
||||
return True
|
||||
|
||||
def open_if_filename(infile, mode='rb'):
|
||||
|
||||
def open_if_filename(infile, mode="rb"):
|
||||
"""If ``infile`` is a string, it opens and returns it. If it's already a file object, it simply returns it.
|
||||
|
||||
This function returns ``(file, should_close_flag)``. The should_close_flag is True is a file has
|
||||
@ -364,15 +397,18 @@ def open_if_filename(infile, mode='rb'):
|
||||
else:
|
||||
return (infile, False)
|
||||
|
||||
|
||||
def ensure_folder(path):
|
||||
"Create `path` as a folder if it doesn't exist."
|
||||
if not op.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
def ensure_file(path):
|
||||
"Create `path` as an empty file if it doesn't exist."
|
||||
if not op.exists(path):
|
||||
open(path, 'w').close()
|
||||
open(path, "w").close()
|
||||
|
||||
|
||||
def delete_files_with_pattern(folder_path, pattern, recursive=True):
|
||||
"""Delete all files (or folders) in `folder_path` that match the glob `pattern`.
|
||||
@ -389,6 +425,7 @@ def delete_files_with_pattern(folder_path, pattern, recursive=True):
|
||||
for p in subfolders:
|
||||
delete_files_with_pattern(p, pattern, True)
|
||||
|
||||
|
||||
class FileOrPath:
|
||||
"""Does the same as :func:`open_if_filename`, but it can be used with a ``with`` statement.
|
||||
|
||||
@ -397,7 +434,8 @@ class FileOrPath:
|
||||
with FileOrPath(infile):
|
||||
dostuff()
|
||||
"""
|
||||
def __init__(self, file_or_path, mode='rb'):
|
||||
|
||||
def __init__(self, file_or_path, mode="rb"):
|
||||
self.file_or_path = file_or_path
|
||||
self.mode = mode
|
||||
self.mustclose = False
|
||||
@ -410,4 +448,3 @@ class FileOrPath:
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self.fp and self.mustclose:
|
||||
self.fp.close()
|
||||
|
||||
|
184
package.py
184
package.py
@ -15,16 +15,23 @@ import platform
|
||||
import re
|
||||
|
||||
from hscommon.build import (
|
||||
print_and_do, copy_packages, build_debian_changelog,
|
||||
get_module_version, filereplace, copy, setup_package_argparser,
|
||||
copy_all
|
||||
print_and_do,
|
||||
copy_packages,
|
||||
build_debian_changelog,
|
||||
get_module_version,
|
||||
filereplace,
|
||||
copy,
|
||||
setup_package_argparser,
|
||||
copy_all,
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser()
|
||||
setup_package_argparser(parser)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def copy_files_to_package(destpath, packages, with_so):
|
||||
# when with_so is true, we keep .so files in the package, and otherwise, we don't. We need this
|
||||
# flag because when building debian src pkg, we *don't* want .so files (they're compiled later)
|
||||
@ -32,126 +39,162 @@ def copy_files_to_package(destpath, packages, with_so):
|
||||
if op.exists(destpath):
|
||||
shutil.rmtree(destpath)
|
||||
os.makedirs(destpath)
|
||||
shutil.copy('run.py', op.join(destpath, 'run.py'))
|
||||
extra_ignores = ['*.so'] if not with_so else None
|
||||
shutil.copy("run.py", op.join(destpath, "run.py"))
|
||||
extra_ignores = ["*.so"] if not with_so else None
|
||||
copy_packages(packages, destpath, extra_ignores=extra_ignores)
|
||||
shutil.copytree(op.join('build', 'help'), op.join(destpath, 'help'))
|
||||
shutil.copytree(op.join('build', 'locale'), op.join(destpath, 'locale'))
|
||||
shutil.copytree(op.join("build", "help"), op.join(destpath, "help"))
|
||||
shutil.copytree(op.join("build", "locale"), op.join(destpath, "locale"))
|
||||
compileall.compile_dir(destpath)
|
||||
|
||||
|
||||
def package_debian_distribution(distribution):
|
||||
app_version = get_module_version('core')
|
||||
version = '{}~{}'.format(app_version, distribution)
|
||||
destpath = op.join('build', 'dupeguru-{}'.format(version))
|
||||
srcpath = op.join(destpath, 'src')
|
||||
packages = [
|
||||
'hscommon', 'core', 'qtlib', 'qt', 'send2trash', 'hsaudiotag'
|
||||
]
|
||||
app_version = get_module_version("core")
|
||||
version = "{}~{}".format(app_version, distribution)
|
||||
destpath = op.join("build", "dupeguru-{}".format(version))
|
||||
srcpath = op.join(destpath, "src")
|
||||
packages = ["hscommon", "core", "qtlib", "qt", "send2trash", "hsaudiotag"]
|
||||
copy_files_to_package(srcpath, packages, with_so=False)
|
||||
os.mkdir(op.join(destpath, 'modules'))
|
||||
copy_all(op.join('core', 'pe', 'modules', '*.*'), op.join(destpath, 'modules'))
|
||||
copy(op.join('qt', 'pe', 'modules', 'block.c'), op.join(destpath, 'modules', 'block_qt.c'))
|
||||
copy(op.join('pkg', 'debian', 'build_pe_modules.py'), op.join(destpath, 'build_pe_modules.py'))
|
||||
debdest = op.join(destpath, 'debian')
|
||||
debskel = op.join('pkg', 'debian')
|
||||
os.makedirs(debdest)
|
||||
debopts = json.load(open(op.join(debskel, 'dupeguru.json')))
|
||||
for fn in ['compat', 'copyright', 'dirs', 'rules', 'source']:
|
||||
copy(op.join(debskel, fn), op.join(debdest, fn))
|
||||
filereplace(op.join(debskel, 'control'), op.join(debdest, 'control'), **debopts)
|
||||
filereplace(op.join(debskel, 'Makefile'), op.join(destpath, 'Makefile'), **debopts)
|
||||
filereplace(op.join(debskel, 'dupeguru.desktop'), op.join(debdest, 'dupeguru.desktop'), **debopts)
|
||||
changelogpath = op.join('help', 'changelog')
|
||||
changelog_dest = op.join(debdest, 'changelog')
|
||||
project_name = debopts['pkgname']
|
||||
from_version = '2.9.2'
|
||||
build_debian_changelog(
|
||||
changelogpath, changelog_dest, project_name, from_version=from_version,
|
||||
distribution=distribution
|
||||
os.mkdir(op.join(destpath, "modules"))
|
||||
copy_all(op.join("core", "pe", "modules", "*.*"), op.join(destpath, "modules"))
|
||||
copy(
|
||||
op.join("qt", "pe", "modules", "block.c"),
|
||||
op.join(destpath, "modules", "block_qt.c"),
|
||||
)
|
||||
shutil.copy(op.join('images', 'dgse_logo_128.png'), srcpath)
|
||||
copy(
|
||||
op.join("pkg", "debian", "build_pe_modules.py"),
|
||||
op.join(destpath, "build_pe_modules.py"),
|
||||
)
|
||||
debdest = op.join(destpath, "debian")
|
||||
debskel = op.join("pkg", "debian")
|
||||
os.makedirs(debdest)
|
||||
debopts = json.load(open(op.join(debskel, "dupeguru.json")))
|
||||
for fn in ["compat", "copyright", "dirs", "rules", "source"]:
|
||||
copy(op.join(debskel, fn), op.join(debdest, fn))
|
||||
filereplace(op.join(debskel, "control"), op.join(debdest, "control"), **debopts)
|
||||
filereplace(op.join(debskel, "Makefile"), op.join(destpath, "Makefile"), **debopts)
|
||||
filereplace(
|
||||
op.join(debskel, "dupeguru.desktop"),
|
||||
op.join(debdest, "dupeguru.desktop"),
|
||||
**debopts
|
||||
)
|
||||
changelogpath = op.join("help", "changelog")
|
||||
changelog_dest = op.join(debdest, "changelog")
|
||||
project_name = debopts["pkgname"]
|
||||
from_version = "2.9.2"
|
||||
build_debian_changelog(
|
||||
changelogpath,
|
||||
changelog_dest,
|
||||
project_name,
|
||||
from_version=from_version,
|
||||
distribution=distribution,
|
||||
)
|
||||
shutil.copy(op.join("images", "dgse_logo_128.png"), srcpath)
|
||||
os.chdir(destpath)
|
||||
cmd = "dpkg-buildpackage -S -us -uc"
|
||||
os.system(cmd)
|
||||
os.chdir('../..')
|
||||
os.chdir("../..")
|
||||
|
||||
|
||||
def package_debian():
|
||||
print("Packaging for Debian/Ubuntu")
|
||||
for distribution in ['unstable']:
|
||||
for distribution in ["unstable"]:
|
||||
package_debian_distribution(distribution)
|
||||
|
||||
|
||||
def package_arch():
|
||||
# For now, package_arch() will only copy the source files into build/. It copies less packages
|
||||
# than package_debian because there are more python packages available in Arch (so we don't
|
||||
# need to include them).
|
||||
print("Packaging for Arch")
|
||||
srcpath = op.join('build', 'dupeguru-arch')
|
||||
srcpath = op.join("build", "dupeguru-arch")
|
||||
packages = [
|
||||
'hscommon', 'core', 'qtlib', 'qt', 'send2trash', 'hsaudiotag',
|
||||
"hscommon",
|
||||
"core",
|
||||
"qtlib",
|
||||
"qt",
|
||||
"send2trash",
|
||||
"hsaudiotag",
|
||||
]
|
||||
copy_files_to_package(srcpath, packages, with_so=True)
|
||||
shutil.copy(op.join('images', 'dgse_logo_128.png'), srcpath)
|
||||
debopts = json.load(open(op.join('pkg', 'arch', 'dupeguru.json')))
|
||||
filereplace(op.join('pkg', 'arch', 'dupeguru.desktop'), op.join(srcpath, 'dupeguru.desktop'), **debopts)
|
||||
shutil.copy(op.join("images", "dgse_logo_128.png"), srcpath)
|
||||
debopts = json.load(open(op.join("pkg", "arch", "dupeguru.json")))
|
||||
filereplace(
|
||||
op.join("pkg", "arch", "dupeguru.desktop"),
|
||||
op.join(srcpath, "dupeguru.desktop"),
|
||||
**debopts
|
||||
)
|
||||
|
||||
|
||||
def package_source_txz():
|
||||
print("Creating git archive")
|
||||
app_version = get_module_version('core')
|
||||
name = 'dupeguru-src-{}.tar'.format(app_version)
|
||||
app_version = get_module_version("core")
|
||||
name = "dupeguru-src-{}.tar".format(app_version)
|
||||
base_path = os.getcwd()
|
||||
build_path = op.join(base_path, 'build')
|
||||
build_path = op.join(base_path, "build")
|
||||
dest = op.join(build_path, name)
|
||||
print_and_do('git archive -o {} HEAD'.format(dest))
|
||||
print_and_do("git archive -o {} HEAD".format(dest))
|
||||
# Now, we need to include submodules
|
||||
SUBMODULES = ['hscommon', 'qtlib']
|
||||
SUBMODULES = ["hscommon", "qtlib"]
|
||||
for submodule in SUBMODULES:
|
||||
print("Adding submodule {} to archive".format(submodule))
|
||||
os.chdir(submodule)
|
||||
archive_path = op.join(build_path, '{}.tar'.format(submodule))
|
||||
print_and_do('git archive -o {} --prefix {}/ HEAD'.format(archive_path, submodule))
|
||||
archive_path = op.join(build_path, "{}.tar".format(submodule))
|
||||
print_and_do(
|
||||
"git archive -o {} --prefix {}/ HEAD".format(archive_path, submodule)
|
||||
)
|
||||
os.chdir(base_path)
|
||||
print_and_do('tar -A {} -f {}'.format(archive_path, dest))
|
||||
print_and_do('xz {}'.format(dest))
|
||||
print_and_do("tar -A {} -f {}".format(archive_path, dest))
|
||||
print_and_do("xz {}".format(dest))
|
||||
|
||||
|
||||
def package_windows():
|
||||
app_version = get_module_version('core')
|
||||
app_version = get_module_version("core")
|
||||
arch = platform.architecture()[0]
|
||||
# Information to pass to pyinstaller and NSIS
|
||||
match = re.search('[0-9]+.[0-9]+.[0-9]+', app_version)
|
||||
version_array = match.group(0).split('.')
|
||||
match = re.search('[0-9]+', arch)
|
||||
match = re.search("[0-9]+.[0-9]+.[0-9]+", app_version)
|
||||
version_array = match.group(0).split(".")
|
||||
match = re.search("[0-9]+", arch)
|
||||
bits = match.group(0)
|
||||
# include locale files if they are built otherwise exit as it will break
|
||||
# the localization
|
||||
if not op.exists('build/locale'):
|
||||
if not op.exists("build/locale"):
|
||||
print("Locale files not built, exiting...")
|
||||
return
|
||||
# include help files if they are built otherwise exit as they should be included?
|
||||
if not op.exists('build/help'):
|
||||
if not op.exists("build/help"):
|
||||
print("Help files not built, exiting...")
|
||||
return
|
||||
# create version information file from template
|
||||
try:
|
||||
version_template = open("win_version_info.temp", "r")
|
||||
version_info = version_template.read()
|
||||
version_template.close()
|
||||
version_template.close()
|
||||
version_info_file = open("win_version_info.txt", "w")
|
||||
version_info_file.write(version_info.format(version_array[0], version_array[1], version_array[2], bits))
|
||||
version_info_file.write(
|
||||
version_info.format(
|
||||
version_array[0], version_array[1], version_array[2], bits
|
||||
)
|
||||
)
|
||||
version_info_file.close()
|
||||
except Exception:
|
||||
print("Error creating version info file, exiting...")
|
||||
return
|
||||
# run pyinstaller via command line
|
||||
print_and_do('pyinstaller -w --name=dupeguru-win{0} --icon=images/dgse_logo.ico '
|
||||
'--add-data "build/locale;locale" --add-data "build/help;help" '
|
||||
'--version-file win_version_info.txt run.py'.format(bits))
|
||||
# run pyinstaller via command line
|
||||
print_and_do(
|
||||
"pyinstaller -w --name=dupeguru-win{0} --icon=images/dgse_logo.ico "
|
||||
'--add-data "build/locale;locale" --add-data "build/help;help" '
|
||||
"--version-file win_version_info.txt run.py".format(bits)
|
||||
)
|
||||
# remove version info file
|
||||
os.remove('win_version_info.txt')
|
||||
os.remove("win_version_info.txt")
|
||||
# Call NSIS (TODO update to not use hardcoded path)
|
||||
cmd = ('"C:\\Program Files (x86)\\NSIS\\Bin\\makensis.exe" '
|
||||
'/DVERSIONMAJOR={0} /DVERSIONMINOR={1} /DVERSIONPATCH={2} /DBITS={3} setup.nsi')
|
||||
cmd = (
|
||||
'"C:\\Program Files (x86)\\NSIS\\Bin\\makensis.exe" '
|
||||
"/DVERSIONMAJOR={0} /DVERSIONMINOR={1} /DVERSIONPATCH={2} /DBITS={3} setup.nsi"
|
||||
)
|
||||
print_and_do(cmd.format(version_array[0], version_array[1], version_array[2], bits))
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
if args.src_pkg:
|
||||
@ -159,17 +202,18 @@ def main():
|
||||
package_source_txz()
|
||||
return
|
||||
print("Packaging dupeGuru with UI qt")
|
||||
if sys.platform == 'win32':
|
||||
if sys.platform == "win32":
|
||||
package_windows()
|
||||
else:
|
||||
if not args.arch_pkg:
|
||||
distname, _, _ = platform.dist()
|
||||
else:
|
||||
distname = 'arch'
|
||||
if distname == 'arch':
|
||||
distname = "arch"
|
||||
if distname == "arch":
|
||||
package_arch()
|
||||
else:
|
||||
package_debian()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
147
qt/app.py
147
qt/app.py
@ -36,11 +36,12 @@ from .me.preferences_dialog import PreferencesDialog as PreferencesDialogMusic
|
||||
from .pe.preferences_dialog import PreferencesDialog as PreferencesDialogPicture
|
||||
from .pe.photo import File as PlatSpecificPhoto
|
||||
|
||||
tr = trget('ui')
|
||||
tr = trget("ui")
|
||||
|
||||
|
||||
class DupeGuru(QObject):
|
||||
LOGO_NAME = 'logo_se'
|
||||
NAME = 'dupeGuru'
|
||||
LOGO_NAME = "logo_se"
|
||||
NAME = "dupeGuru"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
@ -49,20 +50,28 @@ class DupeGuru(QObject):
|
||||
self.model = DupeGuruModel(view=self)
|
||||
self._setup()
|
||||
|
||||
#--- Private
|
||||
# --- Private
|
||||
def _setup(self):
|
||||
core.pe.photo.PLAT_SPECIFIC_PHOTO_CLASS = PlatSpecificPhoto
|
||||
self._setupActions()
|
||||
self._update_options()
|
||||
self.recentResults = Recent(self, 'recentResults')
|
||||
self.recentResults = Recent(self, "recentResults")
|
||||
self.recentResults.mustOpenItem.connect(self.model.load_from)
|
||||
self.resultWindow = None
|
||||
self.details_dialog = None
|
||||
self.directories_dialog = DirectoriesDialog(self)
|
||||
self.progress_window = ProgressWindow(self.directories_dialog, self.model.progress_window)
|
||||
self.problemDialog = ProblemDialog(parent=self.directories_dialog, model=self.model.problem_dialog)
|
||||
self.ignoreListDialog = IgnoreListDialog(parent=self.directories_dialog, model=self.model.ignore_list_dialog)
|
||||
self.deletionOptions = DeletionOptions(parent=self.directories_dialog, model=self.model.deletion_options)
|
||||
self.progress_window = ProgressWindow(
|
||||
self.directories_dialog, self.model.progress_window
|
||||
)
|
||||
self.problemDialog = ProblemDialog(
|
||||
parent=self.directories_dialog, model=self.model.problem_dialog
|
||||
)
|
||||
self.ignoreListDialog = IgnoreListDialog(
|
||||
parent=self.directories_dialog, model=self.model.ignore_list_dialog
|
||||
)
|
||||
self.deletionOptions = DeletionOptions(
|
||||
parent=self.directories_dialog, model=self.model.deletion_options
|
||||
)
|
||||
self.about_box = AboutBox(self.directories_dialog, self)
|
||||
|
||||
self.directories_dialog.show()
|
||||
@ -80,46 +89,70 @@ class DupeGuru(QObject):
|
||||
# Setup actions that are common to both the directory dialog and the results window.
|
||||
# (name, shortcut, icon, desc, func)
|
||||
ACTIONS = [
|
||||
('actionQuit', 'Ctrl+Q', '', tr("Quit"), self.quitTriggered),
|
||||
('actionPreferences', 'Ctrl+P', '', tr("Options"), self.preferencesTriggered),
|
||||
('actionIgnoreList', '', '', tr("Ignore List"), self.ignoreListTriggered),
|
||||
('actionClearPictureCache', 'Ctrl+Shift+P', '', tr("Clear Picture Cache"), self.clearPictureCacheTriggered),
|
||||
('actionShowHelp', 'F1', '', tr("dupeGuru Help"), self.showHelpTriggered),
|
||||
('actionAbout', '', '', tr("About dupeGuru"), self.showAboutBoxTriggered),
|
||||
('actionOpenDebugLog', '', '', tr("Open Debug Log"), self.openDebugLogTriggered),
|
||||
("actionQuit", "Ctrl+Q", "", tr("Quit"), self.quitTriggered),
|
||||
(
|
||||
"actionPreferences",
|
||||
"Ctrl+P",
|
||||
"",
|
||||
tr("Options"),
|
||||
self.preferencesTriggered,
|
||||
),
|
||||
("actionIgnoreList", "", "", tr("Ignore List"), self.ignoreListTriggered),
|
||||
(
|
||||
"actionClearPictureCache",
|
||||
"Ctrl+Shift+P",
|
||||
"",
|
||||
tr("Clear Picture Cache"),
|
||||
self.clearPictureCacheTriggered,
|
||||
),
|
||||
("actionShowHelp", "F1", "", tr("dupeGuru Help"), self.showHelpTriggered),
|
||||
("actionAbout", "", "", tr("About dupeGuru"), self.showAboutBoxTriggered),
|
||||
(
|
||||
"actionOpenDebugLog",
|
||||
"",
|
||||
"",
|
||||
tr("Open Debug Log"),
|
||||
self.openDebugLogTriggered,
|
||||
),
|
||||
]
|
||||
createActions(ACTIONS, self)
|
||||
|
||||
def _update_options(self):
|
||||
self.model.options['mix_file_kind'] = self.prefs.mix_file_kind
|
||||
self.model.options['escape_filter_regexp'] = not self.prefs.use_regexp
|
||||
self.model.options['clean_empty_dirs'] = self.prefs.remove_empty_folders
|
||||
self.model.options['ignore_hardlink_matches'] = self.prefs.ignore_hardlink_matches
|
||||
self.model.options['copymove_dest_type'] = self.prefs.destination_type
|
||||
self.model.options['scan_type'] = self.prefs.get_scan_type(self.model.app_mode)
|
||||
self.model.options['min_match_percentage'] = self.prefs.filter_hardness
|
||||
self.model.options['word_weighting'] = self.prefs.word_weighting
|
||||
self.model.options['match_similar_words'] = self.prefs.match_similar
|
||||
threshold = self.prefs.small_file_threshold if self.prefs.ignore_small_files else 0
|
||||
self.model.options['size_threshold'] = threshold * 1024 # threshold is in KB. the scanner wants bytes
|
||||
self.model.options["mix_file_kind"] = self.prefs.mix_file_kind
|
||||
self.model.options["escape_filter_regexp"] = not self.prefs.use_regexp
|
||||
self.model.options["clean_empty_dirs"] = self.prefs.remove_empty_folders
|
||||
self.model.options[
|
||||
"ignore_hardlink_matches"
|
||||
] = self.prefs.ignore_hardlink_matches
|
||||
self.model.options["copymove_dest_type"] = self.prefs.destination_type
|
||||
self.model.options["scan_type"] = self.prefs.get_scan_type(self.model.app_mode)
|
||||
self.model.options["min_match_percentage"] = self.prefs.filter_hardness
|
||||
self.model.options["word_weighting"] = self.prefs.word_weighting
|
||||
self.model.options["match_similar_words"] = self.prefs.match_similar
|
||||
threshold = (
|
||||
self.prefs.small_file_threshold if self.prefs.ignore_small_files else 0
|
||||
)
|
||||
self.model.options["size_threshold"] = (
|
||||
threshold * 1024
|
||||
) # threshold is in KB. the scanner wants bytes
|
||||
scanned_tags = set()
|
||||
if self.prefs.scan_tag_track:
|
||||
scanned_tags.add('track')
|
||||
scanned_tags.add("track")
|
||||
if self.prefs.scan_tag_artist:
|
||||
scanned_tags.add('artist')
|
||||
scanned_tags.add("artist")
|
||||
if self.prefs.scan_tag_album:
|
||||
scanned_tags.add('album')
|
||||
scanned_tags.add("album")
|
||||
if self.prefs.scan_tag_title:
|
||||
scanned_tags.add('title')
|
||||
scanned_tags.add("title")
|
||||
if self.prefs.scan_tag_genre:
|
||||
scanned_tags.add('genre')
|
||||
scanned_tags.add("genre")
|
||||
if self.prefs.scan_tag_year:
|
||||
scanned_tags.add('year')
|
||||
self.model.options['scanned_tags'] = scanned_tags
|
||||
self.model.options['match_scaled'] = self.prefs.match_scaled
|
||||
self.model.options['picture_cache_type'] = self.prefs.picture_cache_type
|
||||
scanned_tags.add("year")
|
||||
self.model.options["scanned_tags"] = scanned_tags
|
||||
self.model.options["match_scaled"] = self.prefs.match_scaled
|
||||
self.model.options["picture_cache_type"] = self.prefs.picture_cache_type
|
||||
|
||||
#--- Private
|
||||
# --- Private
|
||||
def _get_details_dialog_class(self):
|
||||
if self.model.app_mode == AppMode.Picture:
|
||||
return DetailsDialogPicture
|
||||
@ -136,7 +169,7 @@ class DupeGuru(QObject):
|
||||
else:
|
||||
return PreferencesDialogStandard
|
||||
|
||||
#--- Public
|
||||
# --- Public
|
||||
def add_selected_to_ignore_list(self):
|
||||
self.model.add_selected_to_ignore_list()
|
||||
|
||||
@ -166,17 +199,19 @@ class DupeGuru(QObject):
|
||||
self.model.save()
|
||||
QApplication.quit()
|
||||
|
||||
#--- Signals
|
||||
# --- Signals
|
||||
willSavePrefs = pyqtSignal()
|
||||
SIGTERM = pyqtSignal()
|
||||
|
||||
#--- Events
|
||||
# --- Events
|
||||
def finishedLaunching(self):
|
||||
if sys.getfilesystemencoding() == 'ascii':
|
||||
if sys.getfilesystemencoding() == "ascii":
|
||||
# No need to localize this, it's a debugging message.
|
||||
msg = "Something is wrong with the way your system locale is set. If the files you're "\
|
||||
"scanning have accented letters, you'll probably get a crash. It is advised that "\
|
||||
msg = (
|
||||
"Something is wrong with the way your system locale is set. If the files you're "
|
||||
"scanning have accented letters, you'll probably get a crash. It is advised that "
|
||||
"you set your system locale properly."
|
||||
)
|
||||
QMessageBox.warning(self.directories_dialog, "Wrong Locale", msg)
|
||||
|
||||
def clearPictureCacheTriggered(self):
|
||||
@ -191,11 +226,13 @@ class DupeGuru(QObject):
|
||||
self.model.ignore_list_dialog.show()
|
||||
|
||||
def openDebugLogTriggered(self):
|
||||
debugLogPath = op.join(self.model.appdata, 'debug.log')
|
||||
debugLogPath = op.join(self.model.appdata, "debug.log")
|
||||
desktop.open_path(debugLogPath)
|
||||
|
||||
def preferencesTriggered(self):
|
||||
preferences_dialog = self._get_preferences_dialog_class()(self.directories_dialog, self)
|
||||
preferences_dialog = self._get_preferences_dialog_class()(
|
||||
self.directories_dialog, self
|
||||
)
|
||||
preferences_dialog.load()
|
||||
result = preferences_dialog.exec()
|
||||
if result == QDialog.Accepted:
|
||||
@ -212,17 +249,17 @@ class DupeGuru(QObject):
|
||||
|
||||
def showHelpTriggered(self):
|
||||
base_path = platform.HELP_PATH
|
||||
help_path = op.abspath(op.join(base_path, 'index.html'))
|
||||
help_path = op.abspath(op.join(base_path, "index.html"))
|
||||
if op.exists(help_path):
|
||||
url = QUrl.fromLocalFile(help_path)
|
||||
else:
|
||||
url = QUrl('https://www.hardcoded.net/dupeguru/help/en/')
|
||||
url = QUrl("https://www.hardcoded.net/dupeguru/help/en/")
|
||||
QDesktopServices.openUrl(url)
|
||||
|
||||
def handleSIGTERM(self):
|
||||
self.shutdown()
|
||||
|
||||
#--- model --> view
|
||||
# --- model --> view
|
||||
def get_default(self, key):
|
||||
return self.prefs.get_value(key)
|
||||
|
||||
@ -231,10 +268,10 @@ class DupeGuru(QObject):
|
||||
|
||||
def show_message(self, msg):
|
||||
window = QApplication.activeWindow()
|
||||
QMessageBox.information(window, '', msg)
|
||||
QMessageBox.information(window, "", msg)
|
||||
|
||||
def ask_yes_no(self, prompt):
|
||||
return self.confirm('', prompt)
|
||||
return self.confirm("", prompt)
|
||||
|
||||
def create_results_window(self):
|
||||
"""Creates resultWindow and details_dialog depending on the selected ``app_mode``.
|
||||
@ -256,11 +293,13 @@ class DupeGuru(QObject):
|
||||
|
||||
def select_dest_folder(self, prompt):
|
||||
flags = QFileDialog.ShowDirsOnly
|
||||
return QFileDialog.getExistingDirectory(self.resultWindow, prompt, '', flags)
|
||||
return QFileDialog.getExistingDirectory(self.resultWindow, prompt, "", flags)
|
||||
|
||||
def select_dest_file(self, prompt, extension):
|
||||
files = tr("{} file (*.{})").format(extension.upper(), extension)
|
||||
destination, chosen_filter = QFileDialog.getSaveFileName(self.resultWindow, prompt, '', files)
|
||||
if not destination.endswith('.{}'.format(extension)):
|
||||
destination = '{}.{}'.format(destination, extension)
|
||||
destination, chosen_filter = QFileDialog.getSaveFileName(
|
||||
self.resultWindow, prompt, "", files
|
||||
)
|
||||
if not destination.endswith(".{}".format(extension)):
|
||||
destination = "{}.{}".format(destination, extension)
|
||||
return destination
|
||||
|
@ -12,7 +12,8 @@ from PyQt5.QtWidgets import QDialog, QVBoxLayout, QLabel, QCheckBox, QDialogButt
|
||||
from hscommon.trans import trget
|
||||
from qtlib.radio_box import RadioBox
|
||||
|
||||
tr = trget('ui')
|
||||
tr = trget("ui")
|
||||
|
||||
|
||||
class DeletionOptions(QDialog):
|
||||
def __init__(self, parent, model, **kwargs):
|
||||
@ -41,7 +42,9 @@ class DeletionOptions(QDialog):
|
||||
self.linkMessageLabel = QLabel(text)
|
||||
self.linkMessageLabel.setWordWrap(True)
|
||||
self.verticalLayout.addWidget(self.linkMessageLabel)
|
||||
self.linkTypeRadio = RadioBox(items=[tr("Symlink"), tr("Hardlink")], spread=False)
|
||||
self.linkTypeRadio = RadioBox(
|
||||
items=[tr("Symlink"), tr("Hardlink")], spread=False
|
||||
)
|
||||
self.verticalLayout.addWidget(self.linkTypeRadio)
|
||||
if not self.model.supports_links():
|
||||
self.linkCheckbox.setEnabled(False)
|
||||
@ -60,11 +63,11 @@ class DeletionOptions(QDialog):
|
||||
self.buttonBox.addButton(tr("Cancel"), QDialogButtonBox.RejectRole)
|
||||
self.verticalLayout.addWidget(self.buttonBox)
|
||||
|
||||
#--- Signals
|
||||
# --- Signals
|
||||
def linkCheckboxChanged(self, changed: int):
|
||||
self.model.link_deleted = bool(changed)
|
||||
|
||||
#--- model --> view
|
||||
# --- model --> view
|
||||
def update_msg(self, msg: str):
|
||||
self.msgLabel.setText(msg)
|
||||
|
||||
@ -80,4 +83,3 @@ class DeletionOptions(QDialog):
|
||||
|
||||
def set_hardlink_option_enabled(self, is_enabled: bool):
|
||||
self.linkTypeRadio.setEnabled(is_enabled)
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-02-05
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from PyQt5.QtCore import Qt
|
||||
@ -11,6 +11,7 @@ from PyQt5.QtWidgets import QDialog
|
||||
|
||||
from .details_table import DetailsModel
|
||||
|
||||
|
||||
class DetailsDialog(QDialog):
|
||||
def __init__(self, parent, app, **kwargs):
|
||||
super().__init__(parent, Qt.Tool, **kwargs)
|
||||
@ -20,28 +21,27 @@ class DetailsDialog(QDialog):
|
||||
# To avoid saving uninitialized geometry on appWillSavePrefs, we track whether our dialog
|
||||
# has been shown. If it has, we know that our geometry should be saved.
|
||||
self._shown_once = False
|
||||
self.app.prefs.restoreGeometry('DetailsWindowRect', self)
|
||||
self.app.prefs.restoreGeometry("DetailsWindowRect", self)
|
||||
self.tableModel = DetailsModel(self.model)
|
||||
# tableView is defined in subclasses
|
||||
self.tableView.setModel(self.tableModel)
|
||||
self.model.view = self
|
||||
|
||||
|
||||
self.app.willSavePrefs.connect(self.appWillSavePrefs)
|
||||
|
||||
def _setupUi(self): # Virtual
|
||||
|
||||
def _setupUi(self): # Virtual
|
||||
pass
|
||||
|
||||
|
||||
def show(self):
|
||||
self._shown_once = True
|
||||
super().show()
|
||||
|
||||
#--- Events
|
||||
# --- Events
|
||||
def appWillSavePrefs(self):
|
||||
if self._shown_once:
|
||||
self.app.prefs.saveGeometry('DetailsWindowRect', self)
|
||||
|
||||
#--- model --> view
|
||||
self.app.prefs.saveGeometry("DetailsWindowRect", self)
|
||||
|
||||
# --- model --> view
|
||||
def refresh(self):
|
||||
self.tableModel.beginResetModel()
|
||||
self.tableModel.endResetModel()
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2009-05-17
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from PyQt5.QtCore import Qt, QAbstractTableModel
|
||||
@ -11,18 +11,19 @@ from PyQt5.QtWidgets import QHeaderView, QTableView
|
||||
|
||||
from hscommon.trans import trget
|
||||
|
||||
tr = trget('ui')
|
||||
tr = trget("ui")
|
||||
|
||||
HEADER = [tr("Attribute"), tr("Selected"), tr("Reference")]
|
||||
|
||||
|
||||
class DetailsModel(QAbstractTableModel):
|
||||
def __init__(self, model, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.model = model
|
||||
|
||||
|
||||
def columnCount(self, parent):
|
||||
return len(HEADER)
|
||||
|
||||
|
||||
def data(self, index, role):
|
||||
if not index.isValid():
|
||||
return None
|
||||
@ -31,15 +32,19 @@ class DetailsModel(QAbstractTableModel):
|
||||
column = index.column()
|
||||
row = index.row()
|
||||
return self.model.row(row)[column]
|
||||
|
||||
|
||||
def headerData(self, section, orientation, role):
|
||||
if orientation == Qt.Horizontal and role == Qt.DisplayRole and section < len(HEADER):
|
||||
if (
|
||||
orientation == Qt.Horizontal
|
||||
and role == Qt.DisplayRole
|
||||
and section < len(HEADER)
|
||||
):
|
||||
return HEADER[section]
|
||||
return None
|
||||
|
||||
|
||||
def rowCount(self, parent):
|
||||
return self.model.row_count()
|
||||
|
||||
|
||||
|
||||
class DetailsTable(QTableView):
|
||||
def __init__(self, *args):
|
||||
@ -47,7 +52,7 @@ class DetailsTable(QTableView):
|
||||
self.setAlternatingRowColors(True)
|
||||
self.setSelectionBehavior(QTableView.SelectRows)
|
||||
self.setShowGrid(False)
|
||||
|
||||
|
||||
def setModel(self, model):
|
||||
QTableView.setModel(self, model)
|
||||
# The model needs to be set to set header stuff
|
||||
@ -61,4 +66,3 @@ class DetailsTable(QTableView):
|
||||
vheader = self.verticalHeader()
|
||||
vheader.setVisible(False)
|
||||
vheader.setDefaultSectionSize(18)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user