Format files with black

- Format all files with black
- Update tox.ini flake8 arguments to be compatible
- Add black to requirements-extra.txt
- Reduce ignored flake8 rules and fix a few violations
This commit is contained in:
Andrew Senetar 2019-12-31 20:16:27 -06:00
parent 359d6498f7
commit 7ba8aa3514
Signed by: arsenetar
GPG Key ID: C63300DCE48AB2F1
141 changed files with 5241 additions and 3648 deletions

143
build.py
View File

@ -13,129 +13,165 @@ from setuptools import setup, Extension
from hscommon import sphinxgen from hscommon import sphinxgen
from hscommon.build import ( from hscommon.build import (
add_to_pythonpath, print_and_do, move_all, fix_qt_resource_file, add_to_pythonpath,
print_and_do,
move_all,
fix_qt_resource_file,
) )
from hscommon import loc from hscommon import loc
def parse_args(): def parse_args():
usage = "usage: %prog [options]" usage = "usage: %prog [options]"
parser = OptionParser(usage=usage) parser = OptionParser(usage=usage)
parser.add_option( parser.add_option(
'--clean', action='store_true', dest='clean', "--clean",
help="Clean build folder before building" action="store_true",
dest="clean",
help="Clean build folder before building",
) )
parser.add_option( parser.add_option(
'--doc', action='store_true', dest='doc', "--doc", action="store_true", dest="doc", help="Build only the help file"
help="Build only the help file"
) )
parser.add_option( parser.add_option(
'--loc', action='store_true', dest='loc', "--loc", action="store_true", dest="loc", help="Build only localization"
help="Build only localization"
) )
parser.add_option( parser.add_option(
'--updatepot', action='store_true', dest='updatepot', "--updatepot",
help="Generate .pot files from source code." action="store_true",
dest="updatepot",
help="Generate .pot files from source code.",
) )
parser.add_option( parser.add_option(
'--mergepot', action='store_true', dest='mergepot', "--mergepot",
help="Update all .po files based on .pot files." action="store_true",
dest="mergepot",
help="Update all .po files based on .pot files.",
) )
parser.add_option( parser.add_option(
'--normpo', action='store_true', dest='normpo', "--normpo",
help="Normalize all PO files (do this before commit)." action="store_true",
dest="normpo",
help="Normalize all PO files (do this before commit).",
) )
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
return options return options
def build_help(): def build_help():
print("Generating Help") print("Generating Help")
current_path = op.abspath('.') current_path = op.abspath(".")
help_basepath = op.join(current_path, 'help', 'en') help_basepath = op.join(current_path, "help", "en")
help_destpath = op.join(current_path, 'build', 'help') help_destpath = op.join(current_path, "build", "help")
changelog_path = op.join(current_path, 'help', 'changelog') changelog_path = op.join(current_path, "help", "changelog")
tixurl = "https://github.com/hsoft/dupeguru/issues/{}" tixurl = "https://github.com/hsoft/dupeguru/issues/{}"
confrepl = {'language': 'en'} confrepl = {"language": "en"}
changelogtmpl = op.join(current_path, 'help', 'changelog.tmpl') changelogtmpl = op.join(current_path, "help", "changelog.tmpl")
conftmpl = op.join(current_path, 'help', 'conf.tmpl') conftmpl = op.join(current_path, "help", "conf.tmpl")
sphinxgen.gen(help_basepath, help_destpath, changelog_path, tixurl, confrepl, conftmpl, changelogtmpl) sphinxgen.gen(
help_basepath,
help_destpath,
changelog_path,
tixurl,
confrepl,
conftmpl,
changelogtmpl,
)
def build_qt_localizations(): def build_qt_localizations():
loc.compile_all_po(op.join('qtlib', 'locale')) loc.compile_all_po(op.join("qtlib", "locale"))
loc.merge_locale_dir(op.join('qtlib', 'locale'), 'locale') loc.merge_locale_dir(op.join("qtlib", "locale"), "locale")
def build_localizations(): def build_localizations():
loc.compile_all_po('locale') loc.compile_all_po("locale")
build_qt_localizations() build_qt_localizations()
locale_dest = op.join('build', 'locale') locale_dest = op.join("build", "locale")
if op.exists(locale_dest): if op.exists(locale_dest):
shutil.rmtree(locale_dest) shutil.rmtree(locale_dest)
shutil.copytree('locale', locale_dest, ignore=shutil.ignore_patterns('*.po', '*.pot')) shutil.copytree(
"locale", locale_dest, ignore=shutil.ignore_patterns("*.po", "*.pot")
)
def build_updatepot(): def build_updatepot():
print("Building .pot files from source files") print("Building .pot files from source files")
print("Building core.pot") print("Building core.pot")
loc.generate_pot(['core'], op.join('locale', 'core.pot'), ['tr']) loc.generate_pot(["core"], op.join("locale", "core.pot"), ["tr"])
print("Building columns.pot") print("Building columns.pot")
loc.generate_pot(['core'], op.join('locale', 'columns.pot'), ['coltr']) loc.generate_pot(["core"], op.join("locale", "columns.pot"), ["coltr"])
print("Building ui.pot") print("Building ui.pot")
# When we're not under OS X, we don't want to overwrite ui.pot because it contains Cocoa locs # When we're not under OS X, we don't want to overwrite ui.pot because it contains Cocoa locs
# We want to merge the generated pot with the old pot in the most preserving way possible. # We want to merge the generated pot with the old pot in the most preserving way possible.
ui_packages = ['qt', op.join('cocoa', 'inter')] ui_packages = ["qt", op.join("cocoa", "inter")]
loc.generate_pot(ui_packages, op.join('locale', 'ui.pot'), ['tr'], merge=True) loc.generate_pot(ui_packages, op.join("locale", "ui.pot"), ["tr"], merge=True)
print("Building qtlib.pot") print("Building qtlib.pot")
loc.generate_pot(['qtlib'], op.join('qtlib', 'locale', 'qtlib.pot'), ['tr']) loc.generate_pot(["qtlib"], op.join("qtlib", "locale", "qtlib.pot"), ["tr"])
def build_mergepot(): def build_mergepot():
print("Updating .po files using .pot files") print("Updating .po files using .pot files")
loc.merge_pots_into_pos('locale') loc.merge_pots_into_pos("locale")
loc.merge_pots_into_pos(op.join('qtlib', 'locale')) loc.merge_pots_into_pos(op.join("qtlib", "locale"))
loc.merge_pots_into_pos(op.join('cocoalib', 'locale')) loc.merge_pots_into_pos(op.join("cocoalib", "locale"))
def build_normpo(): def build_normpo():
loc.normalize_all_pos('locale') loc.normalize_all_pos("locale")
loc.normalize_all_pos(op.join('qtlib', 'locale')) loc.normalize_all_pos(op.join("qtlib", "locale"))
loc.normalize_all_pos(op.join('cocoalib', 'locale')) loc.normalize_all_pos(op.join("cocoalib", "locale"))
def build_pe_modules(): def build_pe_modules():
print("Building PE Modules") print("Building PE Modules")
exts = [ exts = [
Extension( Extension(
"_block", "_block",
[op.join('core', 'pe', 'modules', 'block.c'), op.join('core', 'pe', 'modules', 'common.c')] [
op.join("core", "pe", "modules", "block.c"),
op.join("core", "pe", "modules", "common.c"),
],
), ),
Extension( Extension(
"_cache", "_cache",
[op.join('core', 'pe', 'modules', 'cache.c'), op.join('core', 'pe', 'modules', 'common.c')] [
op.join("core", "pe", "modules", "cache.c"),
op.join("core", "pe", "modules", "common.c"),
],
), ),
] ]
exts.append(Extension("_block_qt", [op.join('qt', 'pe', 'modules', 'block.c')])) exts.append(Extension("_block_qt", [op.join("qt", "pe", "modules", "block.c")]))
setup( setup(
script_args=['build_ext', '--inplace'], script_args=["build_ext", "--inplace"], ext_modules=exts,
ext_modules=exts,
) )
move_all('_block_qt*', op.join('qt', 'pe')) move_all("_block_qt*", op.join("qt", "pe"))
move_all('_block*', op.join('core', 'pe')) move_all("_block*", op.join("core", "pe"))
move_all('_cache*', op.join('core', 'pe')) move_all("_cache*", op.join("core", "pe"))
def build_normal(): def build_normal():
print("Building dupeGuru with UI qt") print("Building dupeGuru with UI qt")
add_to_pythonpath('.') add_to_pythonpath(".")
print("Building dupeGuru") print("Building dupeGuru")
build_pe_modules() build_pe_modules()
print("Building localizations") print("Building localizations")
build_localizations() build_localizations()
print("Building Qt stuff") print("Building Qt stuff")
print_and_do("pyrcc5 {0} > {1}".format(op.join('qt', 'dg.qrc'), op.join('qt', 'dg_rc.py'))) print_and_do(
fix_qt_resource_file(op.join('qt', 'dg_rc.py')) "pyrcc5 {0} > {1}".format(op.join("qt", "dg.qrc"), op.join("qt", "dg_rc.py"))
)
fix_qt_resource_file(op.join("qt", "dg_rc.py"))
build_help() build_help()
def main(): def main():
options = parse_args() options = parse_args()
if options.clean: if options.clean:
if op.exists('build'): if op.exists("build"):
shutil.rmtree('build') shutil.rmtree("build")
if not op.exists('build'): if not op.exists("build"):
os.mkdir('build') os.mkdir("build")
if options.doc: if options.doc:
build_help() build_help()
elif options.loc: elif options.loc:
@ -149,5 +185,6 @@ def main():
else: else:
build_normal() build_normal()
if __name__ == '__main__':
if __name__ == "__main__":
main() main()

View File

@ -1,3 +1,2 @@
__version__ = '4.0.4' __version__ = "4.0.4"
__appname__ = 'dupeGuru' __appname__ = "dupeGuru"

View File

@ -34,8 +34,8 @@ from .gui.ignore_list_dialog import IgnoreListDialog
from .gui.problem_dialog import ProblemDialog from .gui.problem_dialog import ProblemDialog
from .gui.stats_label import StatsLabel from .gui.stats_label import StatsLabel
HAD_FIRST_LAUNCH_PREFERENCE = 'HadFirstLaunch' HAD_FIRST_LAUNCH_PREFERENCE = "HadFirstLaunch"
DEBUG_MODE_PREFERENCE = 'DebugMode' DEBUG_MODE_PREFERENCE = "DebugMode"
MSG_NO_MARKED_DUPES = tr("There are no marked duplicates. Nothing has been done.") MSG_NO_MARKED_DUPES = tr("There are no marked duplicates. Nothing has been done.")
MSG_NO_SELECTED_DUPES = tr("There are no selected duplicates. Nothing has been done.") MSG_NO_SELECTED_DUPES = tr("There are no selected duplicates. Nothing has been done.")
@ -44,23 +44,27 @@ MSG_MANY_FILES_TO_OPEN = tr(
"files are opened with, doing so can create quite a mess. Continue?" "files are opened with, doing so can create quite a mess. Continue?"
) )
class DestType: class DestType:
Direct = 0 Direct = 0
Relative = 1 Relative = 1
Absolute = 2 Absolute = 2
class JobType: class JobType:
Scan = 'job_scan' Scan = "job_scan"
Load = 'job_load' Load = "job_load"
Move = 'job_move' Move = "job_move"
Copy = 'job_copy' Copy = "job_copy"
Delete = 'job_delete' Delete = "job_delete"
class AppMode: class AppMode:
Standard = 0 Standard = 0
Music = 1 Music = 1
Picture = 2 Picture = 2
JOBID2TITLE = { JOBID2TITLE = {
JobType.Scan: tr("Scanning for duplicates"), JobType.Scan: tr("Scanning for duplicates"),
JobType.Load: tr("Loading"), JobType.Load: tr("Loading"),
@ -69,6 +73,7 @@ JOBID2TITLE = {
JobType.Delete: tr("Sending to Trash"), JobType.Delete: tr("Sending to Trash"),
} }
class DupeGuru(Broadcaster): class DupeGuru(Broadcaster):
"""Holds everything together. """Holds everything together.
@ -100,7 +105,8 @@ class DupeGuru(Broadcaster):
Instance of :mod:`meta-gui <core.gui>` table listing the results from :attr:`results` Instance of :mod:`meta-gui <core.gui>` table listing the results from :attr:`results`
""" """
#--- View interface
# --- View interface
# get_default(key_name) # get_default(key_name)
# set_default(key_name, value) # set_default(key_name, value)
# show_message(msg) # show_message(msg)
@ -116,7 +122,7 @@ class DupeGuru(Broadcaster):
NAME = PROMPT_NAME = "dupeGuru" NAME = PROMPT_NAME = "dupeGuru"
PICTURE_CACHE_TYPE = 'sqlite' # set to 'shelve' for a ShelveCache PICTURE_CACHE_TYPE = "sqlite" # set to 'shelve' for a ShelveCache
def __init__(self, view): def __init__(self, view):
if view.get_default(DEBUG_MODE_PREFERENCE): if view.get_default(DEBUG_MODE_PREFERENCE):
@ -124,7 +130,9 @@ class DupeGuru(Broadcaster):
logging.debug("Debug mode enabled") logging.debug("Debug mode enabled")
Broadcaster.__init__(self) Broadcaster.__init__(self)
self.view = view self.view = view
self.appdata = desktop.special_folder_path(desktop.SpecialFolder.AppData, appname=self.NAME) self.appdata = desktop.special_folder_path(
desktop.SpecialFolder.AppData, appname=self.NAME
)
if not op.exists(self.appdata): if not op.exists(self.appdata):
os.makedirs(self.appdata) os.makedirs(self.appdata)
self.app_mode = AppMode.Standard self.app_mode = AppMode.Standard
@ -136,11 +144,11 @@ class DupeGuru(Broadcaster):
# sent to the scanner. They don't have default values because those defaults values are # sent to the scanner. They don't have default values because those defaults values are
# defined in the scanner class. # defined in the scanner class.
self.options = { self.options = {
'escape_filter_regexp': True, "escape_filter_regexp": True,
'clean_empty_dirs': False, "clean_empty_dirs": False,
'ignore_hardlink_matches': False, "ignore_hardlink_matches": False,
'copymove_dest_type': DestType.Relative, "copymove_dest_type": DestType.Relative,
'picture_cache_type': self.PICTURE_CACHE_TYPE "picture_cache_type": self.PICTURE_CACHE_TYPE,
} }
self.selected_dupes = [] self.selected_dupes = []
self.details_panel = DetailsPanel(self) self.details_panel = DetailsPanel(self)
@ -155,7 +163,7 @@ class DupeGuru(Broadcaster):
for child in children: for child in children:
child.connect() child.connect()
#--- Private # --- Private
def _recreate_result_table(self): def _recreate_result_table(self):
if self.result_table is not None: if self.result_table is not None:
self.result_table.disconnect() self.result_table.disconnect()
@ -169,26 +177,30 @@ class DupeGuru(Broadcaster):
self.view.create_results_window() self.view.create_results_window()
def _get_picture_cache_path(self): def _get_picture_cache_path(self):
cache_type = self.options['picture_cache_type'] cache_type = self.options["picture_cache_type"]
cache_name = 'cached_pictures.shelve' if cache_type == 'shelve' else 'cached_pictures.db' cache_name = (
"cached_pictures.shelve" if cache_type == "shelve" else "cached_pictures.db"
)
return op.join(self.appdata, cache_name) return op.join(self.appdata, cache_name)
def _get_dupe_sort_key(self, dupe, get_group, key, delta): def _get_dupe_sort_key(self, dupe, get_group, key, delta):
if self.app_mode in (AppMode.Music, AppMode.Picture): if self.app_mode in (AppMode.Music, AppMode.Picture):
if key == 'folder_path': if key == "folder_path":
dupe_folder_path = getattr(dupe, 'display_folder_path', dupe.folder_path) dupe_folder_path = getattr(
dupe, "display_folder_path", dupe.folder_path
)
return str(dupe_folder_path).lower() return str(dupe_folder_path).lower()
if self.app_mode == AppMode.Picture: if self.app_mode == AppMode.Picture:
if delta and key == 'dimensions': if delta and key == "dimensions":
r = cmp_value(dupe, key) r = cmp_value(dupe, key)
ref_value = cmp_value(get_group().ref, key) ref_value = cmp_value(get_group().ref, key)
return get_delta_dimensions(r, ref_value) return get_delta_dimensions(r, ref_value)
if key == 'marked': if key == "marked":
return self.results.is_marked(dupe) return self.results.is_marked(dupe)
if key == 'percentage': if key == "percentage":
m = get_group().get_match_of(dupe) m = get_group().get_match_of(dupe)
return m.percentage return m.percentage
elif key == 'dupe_count': elif key == "dupe_count":
return 0 return 0
else: else:
result = cmp_value(dupe, key) result = cmp_value(dupe, key)
@ -203,21 +215,25 @@ class DupeGuru(Broadcaster):
def _get_group_sort_key(self, group, key): def _get_group_sort_key(self, group, key):
if self.app_mode in (AppMode.Music, AppMode.Picture): if self.app_mode in (AppMode.Music, AppMode.Picture):
if key == 'folder_path': if key == "folder_path":
dupe_folder_path = getattr(group.ref, 'display_folder_path', group.ref.folder_path) dupe_folder_path = getattr(
group.ref, "display_folder_path", group.ref.folder_path
)
return str(dupe_folder_path).lower() return str(dupe_folder_path).lower()
if key == 'percentage': if key == "percentage":
return group.percentage return group.percentage
if key == 'dupe_count': if key == "dupe_count":
return len(group) return len(group)
if key == 'marked': if key == "marked":
return len([dupe for dupe in group.dupes if self.results.is_marked(dupe)]) return len([dupe for dupe in group.dupes if self.results.is_marked(dupe)])
return cmp_value(group.ref, key) return cmp_value(group.ref, key)
def _do_delete(self, j, link_deleted, use_hardlinks, direct_deletion): def _do_delete(self, j, link_deleted, use_hardlinks, direct_deletion):
def op(dupe): def op(dupe):
j.add_progress() j.add_progress()
return self._do_delete_dupe(dupe, link_deleted, use_hardlinks, direct_deletion) return self._do_delete_dupe(
dupe, link_deleted, use_hardlinks, direct_deletion
)
j.start_job(self.results.mark_count) j.start_job(self.results.mark_count)
self.results.perform_on_marked(op, True) self.results.perform_on_marked(op, True)
@ -233,7 +249,7 @@ class DupeGuru(Broadcaster):
else: else:
os.remove(str_path) os.remove(str_path)
else: else:
send2trash(str_path) # Raises OSError when there's a problem send2trash(str_path) # Raises OSError when there's a problem
if link_deleted: if link_deleted:
group = self.results.get_group_of_duplicate(dupe) group = self.results.get_group_of_duplicate(dupe)
ref = group.ref ref = group.ref
@ -258,8 +274,9 @@ class DupeGuru(Broadcaster):
def _get_export_data(self): def _get_export_data(self):
columns = [ columns = [
col for col in self.result_table.columns.ordered_columns col
if col.visible and col.name != 'marked' for col in self.result_table.columns.ordered_columns
if col.visible and col.name != "marked"
] ]
colnames = [col.display for col in columns] colnames = [col.display for col in columns]
rows = [] rows = []
@ -273,10 +290,11 @@ class DupeGuru(Broadcaster):
def _results_changed(self): def _results_changed(self):
self.selected_dupes = [ self.selected_dupes = [
d for d in self.selected_dupes d
for d in self.selected_dupes
if self.results.get_group_of_duplicate(d) is not None if self.results.get_group_of_duplicate(d) is not None
] ]
self.notify('results_changed') self.notify("results_changed")
def _start_job(self, jobid, func, args=()): def _start_job(self, jobid, func, args=()):
title = JOBID2TITLE[jobid] title = JOBID2TITLE[jobid]
@ -310,7 +328,9 @@ class DupeGuru(Broadcaster):
msg = { msg = {
JobType.Copy: tr("All marked files were copied successfully."), JobType.Copy: tr("All marked files were copied successfully."),
JobType.Move: tr("All marked files were moved successfully."), JobType.Move: tr("All marked files were moved successfully."),
JobType.Delete: tr("All marked files were successfully sent to Trash."), JobType.Delete: tr(
"All marked files were successfully sent to Trash."
),
}[jobid] }[jobid]
self.view.show_message(msg) self.view.show_message(msg)
@ -341,9 +361,9 @@ class DupeGuru(Broadcaster):
if dupes == self.selected_dupes: if dupes == self.selected_dupes:
return return
self.selected_dupes = dupes self.selected_dupes = dupes
self.notify('dupes_selected') self.notify("dupes_selected")
#--- Protected # --- Protected
def _get_fileclasses(self): def _get_fileclasses(self):
if self.app_mode == AppMode.Picture: if self.app_mode == AppMode.Picture:
return [pe.photo.PLAT_SPECIFIC_PHOTO_CLASS] return [pe.photo.PLAT_SPECIFIC_PHOTO_CLASS]
@ -360,7 +380,7 @@ class DupeGuru(Broadcaster):
else: else:
return prioritize.all_categories() return prioritize.all_categories()
#--- Public # --- Public
def add_directory(self, d): def add_directory(self, d):
"""Adds folder ``d`` to :attr:`directories`. """Adds folder ``d`` to :attr:`directories`.
@ -370,7 +390,7 @@ class DupeGuru(Broadcaster):
""" """
try: try:
self.directories.add_path(Path(d)) self.directories.add_path(Path(d))
self.notify('directories_changed') self.notify("directories_changed")
except directories.AlreadyThereError: except directories.AlreadyThereError:
self.view.show_message(tr("'{}' already is in the list.").format(d)) self.view.show_message(tr("'{}' already is in the list.").format(d))
except directories.InvalidPathError: except directories.InvalidPathError:
@ -383,7 +403,9 @@ class DupeGuru(Broadcaster):
if not dupes: if not dupes:
self.view.show_message(MSG_NO_SELECTED_DUPES) self.view.show_message(MSG_NO_SELECTED_DUPES)
return return
msg = tr("All selected %d matches are going to be ignored in all subsequent scans. Continue?") msg = tr(
"All selected %d matches are going to be ignored in all subsequent scans. Continue?"
)
if not self.view.ask_yes_no(msg % len(dupes)): if not self.view.ask_yes_no(msg % len(dupes)):
return return
for dupe in dupes: for dupe in dupes:
@ -400,22 +422,22 @@ class DupeGuru(Broadcaster):
:param str filter: filter to apply :param str filter: filter to apply
""" """
self.results.apply_filter(None) self.results.apply_filter(None)
if self.options['escape_filter_regexp']: if self.options["escape_filter_regexp"]:
filter = escape(filter, set('()[]\\.|+?^')) filter = escape(filter, set("()[]\\.|+?^"))
filter = escape(filter, '*', '.') filter = escape(filter, "*", ".")
self.results.apply_filter(filter) self.results.apply_filter(filter)
self._results_changed() self._results_changed()
def clean_empty_dirs(self, path): def clean_empty_dirs(self, path):
if self.options['clean_empty_dirs']: if self.options["clean_empty_dirs"]:
while delete_if_empty(path, ['.DS_Store']): while delete_if_empty(path, [".DS_Store"]):
path = path.parent() path = path.parent()
def clear_picture_cache(self): def clear_picture_cache(self):
try: try:
os.remove(self._get_picture_cache_path()) os.remove(self._get_picture_cache_path())
except FileNotFoundError: except FileNotFoundError:
pass # we don't care pass # we don't care
def copy_or_move(self, dupe, copy: bool, destination: str, dest_type: DestType): def copy_or_move(self, dupe, copy: bool, destination: str, dest_type: DestType):
source_path = dupe.path source_path = dupe.path
@ -444,6 +466,7 @@ class DupeGuru(Broadcaster):
:param bool copy: If True, duplicates will be copied instead of moved :param bool copy: If True, duplicates will be copied instead of moved
""" """
def do(j): def do(j):
def op(dupe): def op(dupe):
j.add_progress() j.add_progress()
@ -459,7 +482,7 @@ class DupeGuru(Broadcaster):
prompt = tr("Select a directory to {} marked files to").format(opname) prompt = tr("Select a directory to {} marked files to").format(opname)
destination = self.view.select_dest_folder(prompt) destination = self.view.select_dest_folder(prompt)
if destination: if destination:
desttype = self.options['copymove_dest_type'] desttype = self.options["copymove_dest_type"]
jobid = JobType.Copy if copy else JobType.Move jobid = JobType.Copy if copy else JobType.Move
self._start_job(jobid, do) self._start_job(jobid, do)
@ -472,8 +495,9 @@ class DupeGuru(Broadcaster):
if not self.deletion_options.show(self.results.mark_count): if not self.deletion_options.show(self.results.mark_count):
return return
args = [ args = [
self.deletion_options.link_deleted, self.deletion_options.use_hardlinks, self.deletion_options.link_deleted,
self.deletion_options.direct self.deletion_options.use_hardlinks,
self.deletion_options.direct,
] ]
logging.debug("Starting deletion job with args %r", args) logging.debug("Starting deletion job with args %r", args)
self._start_job(JobType.Delete, self._do_delete, args=args) self._start_job(JobType.Delete, self._do_delete, args=args)
@ -495,7 +519,9 @@ class DupeGuru(Broadcaster):
The columns and their order in the resulting CSV file is determined in the same way as in The columns and their order in the resulting CSV file is determined in the same way as in
:meth:`export_to_xhtml`. :meth:`export_to_xhtml`.
""" """
dest_file = self.view.select_dest_file(tr("Select a destination for your exported CSV"), 'csv') dest_file = self.view.select_dest_file(
tr("Select a destination for your exported CSV"), "csv"
)
if dest_file: if dest_file:
colnames, rows = self._get_export_data() colnames, rows = self._get_export_data()
try: try:
@ -505,13 +531,16 @@ class DupeGuru(Broadcaster):
def get_display_info(self, dupe, group, delta=False): def get_display_info(self, dupe, group, delta=False):
def empty_data(): def empty_data():
return {c.name: '---' for c in self.result_table.COLUMNS[1:]} return {c.name: "---" for c in self.result_table.COLUMNS[1:]}
if (dupe is None) or (group is None): if (dupe is None) or (group is None):
return empty_data() return empty_data()
try: try:
return dupe.get_display_info(group, delta) return dupe.get_display_info(group, delta)
except Exception as e: except Exception as e:
logging.warning("Exception on GetDisplayInfo for %s: %s", str(dupe.path), str(e)) logging.warning(
"Exception on GetDisplayInfo for %s: %s", str(dupe.path), str(e)
)
return empty_data() return empty_data()
def invoke_custom_command(self): def invoke_custom_command(self):
@ -521,9 +550,11 @@ class DupeGuru(Broadcaster):
is replaced with that dupe's ref file. If there's no selection, the command is not invoked. is replaced with that dupe's ref file. If there's no selection, the command is not invoked.
If the dupe is a ref, ``%d`` and ``%r`` will be the same. If the dupe is a ref, ``%d`` and ``%r`` will be the same.
""" """
cmd = self.view.get_default('CustomCommand') cmd = self.view.get_default("CustomCommand")
if not cmd: if not cmd:
msg = tr("You have no custom command set up. Set it up in your preferences.") msg = tr(
"You have no custom command set up. Set it up in your preferences."
)
self.view.show_message(msg) self.view.show_message(msg)
return return
if not self.selected_dupes: if not self.selected_dupes:
@ -531,8 +562,8 @@ class DupeGuru(Broadcaster):
dupe = self.selected_dupes[0] dupe = self.selected_dupes[0]
group = self.results.get_group_of_duplicate(dupe) group = self.results.get_group_of_duplicate(dupe)
ref = group.ref ref = group.ref
cmd = cmd.replace('%d', str(dupe.path)) cmd = cmd.replace("%d", str(dupe.path))
cmd = cmd.replace('%r', str(ref.path)) cmd = cmd.replace("%r", str(ref.path))
match = re.match(r'"([^"]+)"(.*)', cmd) match = re.match(r'"([^"]+)"(.*)', cmd)
if match is not None: if match is not None:
# This code here is because subprocess. Popen doesn't seem to accept, under Windows, # This code here is because subprocess. Popen doesn't seem to accept, under Windows,
@ -551,9 +582,9 @@ class DupeGuru(Broadcaster):
is persistent data, is the same as when the last session was closed (when :meth:`save` was is persistent data, is the same as when the last session was closed (when :meth:`save` was
called). called).
""" """
self.directories.load_from_file(op.join(self.appdata, 'last_directories.xml')) self.directories.load_from_file(op.join(self.appdata, "last_directories.xml"))
self.notify('directories_changed') self.notify("directories_changed")
p = op.join(self.appdata, 'ignore_list.xml') p = op.join(self.appdata, "ignore_list.xml")
self.ignore_list.load_from_xml(p) self.ignore_list.load_from_xml(p)
self.ignore_list_dialog.refresh() self.ignore_list_dialog.refresh()
@ -562,8 +593,10 @@ class DupeGuru(Broadcaster):
:param str filename: path of the XML file (created with :meth:`save_as`) to load :param str filename: path of the XML file (created with :meth:`save_as`) to load
""" """
def do(j): def do(j):
self.results.load_from_xml(filename, self._get_file, j) self.results.load_from_xml(filename, self._get_file, j)
self._start_job(JobType.Load, do) self._start_job(JobType.Load, do)
def make_selected_reference(self): def make_selected_reference(self):
@ -588,35 +621,36 @@ class DupeGuru(Broadcaster):
if not self.result_table.power_marker: if not self.result_table.power_marker:
if changed_groups: if changed_groups:
self.selected_dupes = [ self.selected_dupes = [
d for d in self.selected_dupes d
for d in self.selected_dupes
if self.results.get_group_of_duplicate(d).ref is d if self.results.get_group_of_duplicate(d).ref is d
] ]
self.notify('results_changed') self.notify("results_changed")
else: else:
# If we're in "Dupes Only" mode (previously called Power Marker), things are a bit # If we're in "Dupes Only" mode (previously called Power Marker), things are a bit
# different. The refs are not shown in the table, and if our operation is successful, # different. The refs are not shown in the table, and if our operation is successful,
# this means that there's no way to follow our dupe selection. Then, the best thing to # this means that there's no way to follow our dupe selection. Then, the best thing to
# do is to keep our selection index-wise (different dupe selection, but same index # do is to keep our selection index-wise (different dupe selection, but same index
# selection). # selection).
self.notify('results_changed_but_keep_selection') self.notify("results_changed_but_keep_selection")
def mark_all(self): def mark_all(self):
"""Set all dupes in the results as marked. """Set all dupes in the results as marked.
""" """
self.results.mark_all() self.results.mark_all()
self.notify('marking_changed') self.notify("marking_changed")
def mark_none(self): def mark_none(self):
"""Set all dupes in the results as unmarked. """Set all dupes in the results as unmarked.
""" """
self.results.mark_none() self.results.mark_none()
self.notify('marking_changed') self.notify("marking_changed")
def mark_invert(self): def mark_invert(self):
"""Invert the marked state of all dupes in the results. """Invert the marked state of all dupes in the results.
""" """
self.results.mark_invert() self.results.mark_invert()
self.notify('marking_changed') self.notify("marking_changed")
def mark_dupe(self, dupe, marked): def mark_dupe(self, dupe, marked):
"""Change marked status of ``dupe``. """Change marked status of ``dupe``.
@ -629,7 +663,7 @@ class DupeGuru(Broadcaster):
self.results.mark(dupe) self.results.mark(dupe)
else: else:
self.results.unmark(dupe) self.results.unmark(dupe)
self.notify('marking_changed') self.notify("marking_changed")
def open_selected(self): def open_selected(self):
"""Open :attr:`selected_dupes` with their associated application. """Open :attr:`selected_dupes` with their associated application.
@ -656,7 +690,7 @@ class DupeGuru(Broadcaster):
indexes = sorted(indexes, reverse=True) indexes = sorted(indexes, reverse=True)
for index in indexes: for index in indexes:
del self.directories[index] del self.directories[index]
self.notify('directories_changed') self.notify("directories_changed")
except IndexError: except IndexError:
pass pass
@ -669,7 +703,7 @@ class DupeGuru(Broadcaster):
:type duplicates: list of :class:`~core.fs.File` :type duplicates: list of :class:`~core.fs.File`
""" """
self.results.remove_duplicates(self.without_ref(duplicates)) self.results.remove_duplicates(self.without_ref(duplicates))
self.notify('results_changed_but_keep_selection') self.notify("results_changed_but_keep_selection")
def remove_marked(self): def remove_marked(self):
"""Removed marked duplicates from the results (without touching the files themselves). """Removed marked duplicates from the results (without touching the files themselves).
@ -724,7 +758,9 @@ class DupeGuru(Broadcaster):
if group.prioritize(key_func=sort_key): if group.prioritize(key_func=sort_key):
count += 1 count += 1
self._results_changed() self._results_changed()
msg = tr("{} duplicate groups were changed by the re-prioritization.").format(count) msg = tr("{} duplicate groups were changed by the re-prioritization.").format(
count
)
self.view.show_message(msg) self.view.show_message(msg)
def reveal_selected(self): def reveal_selected(self):
@ -734,10 +770,10 @@ class DupeGuru(Broadcaster):
def save(self): def save(self):
if not op.exists(self.appdata): if not op.exists(self.appdata):
os.makedirs(self.appdata) os.makedirs(self.appdata)
self.directories.save_to_file(op.join(self.appdata, 'last_directories.xml')) self.directories.save_to_file(op.join(self.appdata, "last_directories.xml"))
p = op.join(self.appdata, 'ignore_list.xml') p = op.join(self.appdata, "ignore_list.xml")
self.ignore_list.save_to_xml(p) self.ignore_list.save_to_xml(p)
self.notify('save_session') self.notify("save_session")
def save_as(self, filename): def save_as(self, filename):
"""Save results in ``filename``. """Save results in ``filename``.
@ -756,7 +792,9 @@ class DupeGuru(Broadcaster):
""" """
scanner = self.SCANNER_CLASS() scanner = self.SCANNER_CLASS()
if not self.directories.has_any_file(): if not self.directories.has_any_file():
self.view.show_message(tr("The selected directories contain no scannable file.")) self.view.show_message(
tr("The selected directories contain no scannable file.")
)
return return
# Send relevant options down to the scanner instance # Send relevant options down to the scanner instance
for k, v in self.options.items(): for k, v in self.options.items():
@ -771,12 +809,16 @@ class DupeGuru(Broadcaster):
def do(j): def do(j):
j.set_progress(0, tr("Collecting files to scan")) j.set_progress(0, tr("Collecting files to scan"))
if scanner.scan_type == ScanType.Folders: if scanner.scan_type == ScanType.Folders:
files = list(self.directories.get_folders(folderclass=se.fs.Folder, j=j)) files = list(
self.directories.get_folders(folderclass=se.fs.Folder, j=j)
)
else: else:
files = list(self.directories.get_files(fileclasses=self.fileclasses, j=j)) files = list(
if self.options['ignore_hardlink_matches']: self.directories.get_files(fileclasses=self.fileclasses, j=j)
)
if self.options["ignore_hardlink_matches"]:
files = self._remove_hardlink_dupes(files) files = self._remove_hardlink_dupes(files)
logging.info('Scanning %d files' % len(files)) logging.info("Scanning %d files" % len(files))
self.results.groups = scanner.get_dupe_groups(files, self.ignore_list, j) self.results.groups = scanner.get_dupe_groups(files, self.ignore_list, j)
self.discarded_file_count = scanner.discarded_file_count self.discarded_file_count = scanner.discarded_file_count
@ -792,12 +834,16 @@ class DupeGuru(Broadcaster):
markfunc = self.results.mark markfunc = self.results.mark
for dupe in selected: for dupe in selected:
markfunc(dupe) markfunc(dupe)
self.notify('marking_changed') self.notify("marking_changed")
def without_ref(self, dupes): def without_ref(self, dupes):
"""Returns ``dupes`` with all reference elements removed. """Returns ``dupes`` with all reference elements removed.
""" """
return [dupe for dupe in dupes if self.results.get_group_of_duplicate(dupe).ref is not dupe] return [
dupe
for dupe in dupes
if self.results.get_group_of_duplicate(dupe).ref is not dupe
]
def get_default(self, key, fallback_value=None): def get_default(self, key, fallback_value=None):
result = nonone(self.view.get_default(key), fallback_value) result = nonone(self.view.get_default(key), fallback_value)
@ -812,7 +858,7 @@ class DupeGuru(Broadcaster):
def set_default(self, key, value): def set_default(self, key, value):
self.view.set_default(key, value) self.view.set_default(key, value)
#--- Properties # --- Properties
@property @property
def stat_line(self): def stat_line(self):
result = self.results.stat_line result = self.results.stat_line
@ -836,12 +882,21 @@ class DupeGuru(Broadcaster):
@property @property
def METADATA_TO_READ(self): def METADATA_TO_READ(self):
if self.app_mode == AppMode.Picture: if self.app_mode == AppMode.Picture:
return ['size', 'mtime', 'dimensions', 'exif_timestamp'] return ["size", "mtime", "dimensions", "exif_timestamp"]
elif self.app_mode == AppMode.Music: elif self.app_mode == AppMode.Music:
return [ return [
'size', 'mtime', 'duration', 'bitrate', 'samplerate', 'title', 'artist', "size",
'album', 'genre', 'year', 'track', 'comment' "mtime",
"duration",
"bitrate",
"samplerate",
"title",
"artist",
"album",
"genre",
"year",
"track",
"comment",
] ]
else: else:
return ['size', 'mtime'] return ["size", "mtime"]

View File

@ -15,12 +15,13 @@ from hscommon.util import FileOrPath
from . import fs from . import fs
__all__ = [ __all__ = [
'Directories', "Directories",
'DirectoryState', "DirectoryState",
'AlreadyThereError', "AlreadyThereError",
'InvalidPathError', "InvalidPathError",
] ]
class DirectoryState: class DirectoryState:
"""Enum describing how a folder should be considered. """Enum describing how a folder should be considered.
@ -28,16 +29,20 @@ class DirectoryState:
* DirectoryState.Reference: Scan files, but make sure never to delete any of them * DirectoryState.Reference: Scan files, but make sure never to delete any of them
* DirectoryState.Excluded: Don't scan this folder * DirectoryState.Excluded: Don't scan this folder
""" """
Normal = 0 Normal = 0
Reference = 1 Reference = 1
Excluded = 2 Excluded = 2
class AlreadyThereError(Exception): class AlreadyThereError(Exception):
"""The path being added is already in the directory list""" """The path being added is already in the directory list"""
class InvalidPathError(Exception): class InvalidPathError(Exception):
"""The path being added is invalid""" """The path being added is invalid"""
class Directories: class Directories:
"""Holds user folder selection. """Holds user folder selection.
@ -47,7 +52,8 @@ class Directories:
Then, when the user starts the scan, :meth:`get_files` is called to retrieve all files (wrapped Then, when the user starts the scan, :meth:`get_files` is called to retrieve all files (wrapped
in :mod:`core.fs`) that have to be scanned according to the chosen folders/states. in :mod:`core.fs`) that have to be scanned according to the chosen folders/states.
""" """
#---Override
# ---Override
def __init__(self): def __init__(self):
self._dirs = [] self._dirs = []
# {path: state} # {path: state}
@ -68,10 +74,10 @@ class Directories:
def __len__(self): def __len__(self):
return len(self._dirs) return len(self._dirs)
#---Private # ---Private
def _default_state_for_path(self, path): def _default_state_for_path(self, path):
# Override this in subclasses to specify the state of some special folders. # Override this in subclasses to specify the state of some special folders.
if path.name.startswith('.'): # hidden if path.name.startswith("."): # hidden
return DirectoryState.Excluded return DirectoryState.Excluded
def _get_files(self, from_path, fileclasses, j): def _get_files(self, from_path, fileclasses, j):
@ -83,11 +89,13 @@ class Directories:
# Recursively get files from folders with lots of subfolder is expensive. However, there # Recursively get files from folders with lots of subfolder is expensive. However, there
# might be a subfolder in this path that is not excluded. What we want to do is to skim # might be a subfolder in this path that is not excluded. What we want to do is to skim
# through self.states and see if we must continue, or we can stop right here to save time # through self.states and see if we must continue, or we can stop right here to save time
if not any(p[:len(root)] == root for p in self.states): if not any(p[: len(root)] == root for p in self.states):
del dirs[:] del dirs[:]
try: try:
if state != DirectoryState.Excluded: if state != DirectoryState.Excluded:
found_files = [fs.get_file(root + f, fileclasses=fileclasses) for f in files] found_files = [
fs.get_file(root + f, fileclasses=fileclasses) for f in files
]
found_files = [f for f in found_files if f is not None] found_files = [f for f in found_files if f is not None]
# In some cases, directories can be considered as files by dupeGuru, which is # In some cases, directories can be considered as files by dupeGuru, which is
# why we have this line below. In fact, there only one case: Bundle files under # why we have this line below. In fact, there only one case: Bundle files under
@ -97,7 +105,11 @@ class Directories:
if f is not None: if f is not None:
found_files.append(f) found_files.append(f)
dirs.remove(d) dirs.remove(d)
logging.debug("Collected %d files in folder %s", len(found_files), str(from_path)) logging.debug(
"Collected %d files in folder %s",
len(found_files),
str(from_path),
)
for file in found_files: for file in found_files:
file.is_ref = state == DirectoryState.Reference file.is_ref = state == DirectoryState.Reference
yield file yield file
@ -118,7 +130,7 @@ class Directories:
except (EnvironmentError, fs.InvalidPath): except (EnvironmentError, fs.InvalidPath):
pass pass
#---Public # ---Public
def add_path(self, path): def add_path(self, path):
"""Adds ``path`` to self, if not already there. """Adds ``path`` to self, if not already there.
@ -212,21 +224,21 @@ class Directories:
root = ET.parse(infile).getroot() root = ET.parse(infile).getroot()
except Exception: except Exception:
return return
for rdn in root.getiterator('root_directory'): for rdn in root.getiterator("root_directory"):
attrib = rdn.attrib attrib = rdn.attrib
if 'path' not in attrib: if "path" not in attrib:
continue continue
path = attrib['path'] path = attrib["path"]
try: try:
self.add_path(Path(path)) self.add_path(Path(path))
except (AlreadyThereError, InvalidPathError): except (AlreadyThereError, InvalidPathError):
pass pass
for sn in root.getiterator('state'): for sn in root.getiterator("state"):
attrib = sn.attrib attrib = sn.attrib
if not ('path' in attrib and 'value' in attrib): if not ("path" in attrib and "value" in attrib):
continue continue
path = attrib['path'] path = attrib["path"]
state = attrib['value'] state = attrib["value"]
self.states[Path(path)] = int(state) self.states[Path(path)] = int(state)
def save_to_file(self, outfile): def save_to_file(self, outfile):
@ -234,17 +246,17 @@ class Directories:
:param file outfile: path or file pointer to XML file to save to. :param file outfile: path or file pointer to XML file to save to.
""" """
with FileOrPath(outfile, 'wb') as fp: with FileOrPath(outfile, "wb") as fp:
root = ET.Element('directories') root = ET.Element("directories")
for root_path in self: for root_path in self:
root_path_node = ET.SubElement(root, 'root_directory') root_path_node = ET.SubElement(root, "root_directory")
root_path_node.set('path', str(root_path)) root_path_node.set("path", str(root_path))
for path, state in self.states.items(): for path, state in self.states.items():
state_node = ET.SubElement(root, 'state') state_node = ET.SubElement(root, "state")
state_node.set('path', str(path)) state_node.set("path", str(path))
state_node.set('value', str(state)) state_node.set("value", str(state))
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
tree.write(fp, encoding='utf-8') tree.write(fp, encoding="utf-8")
def set_state(self, path, state): def set_state(self, path, state):
"""Set the state of folder at ``path``. """Set the state of folder at ``path``.
@ -259,4 +271,3 @@ class Directories:
if path.is_parent_of(iter_path): if path.is_parent_of(iter_path):
del self.states[iter_path] del self.states[iter_path]
self.states[path] = state self.states[path] = state

View File

@ -17,25 +17,26 @@ from hscommon.util import flatten, multi_replace
from hscommon.trans import tr from hscommon.trans import tr
from hscommon.jobprogress import job from hscommon.jobprogress import job
( (WEIGHT_WORDS, MATCH_SIMILAR_WORDS, NO_FIELD_ORDER,) = range(3)
WEIGHT_WORDS,
MATCH_SIMILAR_WORDS,
NO_FIELD_ORDER,
) = range(3)
JOB_REFRESH_RATE = 100 JOB_REFRESH_RATE = 100
def getwords(s): def getwords(s):
# We decompose the string so that ascii letters with accents can be part of the word. # We decompose the string so that ascii letters with accents can be part of the word.
s = normalize('NFD', s) s = normalize("NFD", s)
s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", ' ').lower() s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", " ").lower()
s = ''.join(c for c in s if c in string.ascii_letters + string.digits + string.whitespace) s = "".join(
return [_f for _f in s.split(' ') if _f] # remove empty elements c for c in s if c in string.ascii_letters + string.digits + string.whitespace
)
return [_f for _f in s.split(" ") if _f] # remove empty elements
def getfields(s): def getfields(s):
fields = [getwords(field) for field in s.split(' - ')] fields = [getwords(field) for field in s.split(" - ")]
return [_f for _f in fields if _f] return [_f for _f in fields if _f]
def unpack_fields(fields): def unpack_fields(fields):
result = [] result = []
for field in fields: for field in fields:
@ -45,6 +46,7 @@ def unpack_fields(fields):
result.append(field) result.append(field)
return result return result
def compare(first, second, flags=()): def compare(first, second, flags=()):
"""Returns the % of words that match between ``first`` and ``second`` """Returns the % of words that match between ``first`` and ``second``
@ -55,11 +57,11 @@ def compare(first, second, flags=()):
return 0 return 0
if any(isinstance(element, list) for element in first): if any(isinstance(element, list) for element in first):
return compare_fields(first, second, flags) return compare_fields(first, second, flags)
second = second[:] #We must use a copy of second because we remove items from it second = second[:] # We must use a copy of second because we remove items from it
match_similar = MATCH_SIMILAR_WORDS in flags match_similar = MATCH_SIMILAR_WORDS in flags
weight_words = WEIGHT_WORDS in flags weight_words = WEIGHT_WORDS in flags
joined = first + second joined = first + second
total_count = (sum(len(word) for word in joined) if weight_words else len(joined)) total_count = sum(len(word) for word in joined) if weight_words else len(joined)
match_count = 0 match_count = 0
in_order = True in_order = True
for word in first: for word in first:
@ -71,12 +73,13 @@ def compare(first, second, flags=()):
if second[0] != word: if second[0] != word:
in_order = False in_order = False
second.remove(word) second.remove(word)
match_count += (len(word) if weight_words else 1) match_count += len(word) if weight_words else 1
result = round(((match_count * 2) / total_count) * 100) result = round(((match_count * 2) / total_count) * 100)
if (result == 100) and (not in_order): if (result == 100) and (not in_order):
result = 99 # We cannot consider a match exact unless the ordering is the same result = 99 # We cannot consider a match exact unless the ordering is the same
return result return result
def compare_fields(first, second, flags=()): def compare_fields(first, second, flags=()):
"""Returns the score for the lowest matching :ref:`fields`. """Returns the score for the lowest matching :ref:`fields`.
@ -87,7 +90,7 @@ def compare_fields(first, second, flags=()):
return 0 return 0
if NO_FIELD_ORDER in flags: if NO_FIELD_ORDER in flags:
results = [] results = []
#We don't want to remove field directly in the list. We must work on a copy. # We don't want to remove field directly in the list. We must work on a copy.
second = second[:] second = second[:]
for field1 in first: for field1 in first:
max = 0 max = 0
@ -101,9 +104,12 @@ def compare_fields(first, second, flags=()):
if matched_field: if matched_field:
second.remove(matched_field) second.remove(matched_field)
else: else:
results = [compare(field1, field2, flags) for field1, field2 in zip(first, second)] results = [
compare(field1, field2, flags) for field1, field2 in zip(first, second)
]
return min(results) if results else 0 return min(results) if results else 0
def build_word_dict(objects, j=job.nulljob): def build_word_dict(objects, j=job.nulljob):
"""Returns a dict of objects mapped by their words. """Returns a dict of objects mapped by their words.
@ -113,11 +119,14 @@ def build_word_dict(objects, j=job.nulljob):
The result will be a dict with words as keys, lists of objects as values. The result will be a dict with words as keys, lists of objects as values.
""" """
result = defaultdict(set) result = defaultdict(set)
for object in j.iter_with_progress(objects, 'Prepared %d/%d files', JOB_REFRESH_RATE): for object in j.iter_with_progress(
objects, "Prepared %d/%d files", JOB_REFRESH_RATE
):
for word in unpack_fields(object.words): for word in unpack_fields(object.words):
result[word].add(object) result[word].add(object)
return result return result
def merge_similar_words(word_dict): def merge_similar_words(word_dict):
"""Take all keys in ``word_dict`` that are similar, and merge them together. """Take all keys in ``word_dict`` that are similar, and merge them together.
@ -126,7 +135,7 @@ def merge_similar_words(word_dict):
a word equal to the other. a word equal to the other.
""" """
keys = list(word_dict.keys()) keys = list(word_dict.keys())
keys.sort(key=len)# we want the shortest word to stay keys.sort(key=len) # we want the shortest word to stay
while keys: while keys:
key = keys.pop(0) key = keys.pop(0)
similars = difflib.get_close_matches(key, keys, 100, 0.8) similars = difflib.get_close_matches(key, keys, 100, 0.8)
@ -138,6 +147,7 @@ def merge_similar_words(word_dict):
del word_dict[similar] del word_dict[similar]
keys.remove(similar) keys.remove(similar)
def reduce_common_words(word_dict, threshold): def reduce_common_words(word_dict, threshold):
"""Remove all objects from ``word_dict`` values where the object count >= ``threshold`` """Remove all objects from ``word_dict`` values where the object count >= ``threshold``
@ -146,7 +156,9 @@ def reduce_common_words(word_dict, threshold):
The exception to this removal are the objects where all the words of the object are common. The exception to this removal are the objects where all the words of the object are common.
Because if we remove them, we will miss some duplicates! Because if we remove them, we will miss some duplicates!
""" """
uncommon_words = set(word for word, objects in word_dict.items() if len(objects) < threshold) uncommon_words = set(
word for word, objects in word_dict.items() if len(objects) < threshold
)
for word, objects in list(word_dict.items()): for word, objects in list(word_dict.items()):
if len(objects) < threshold: if len(objects) < threshold:
continue continue
@ -159,11 +171,13 @@ def reduce_common_words(word_dict, threshold):
else: else:
del word_dict[word] del word_dict[word]
# Writing docstrings in a namedtuple is tricky. From Python 3.3, it's possible to set __doc__, but # Writing docstrings in a namedtuple is tricky. From Python 3.3, it's possible to set __doc__, but
# some research allowed me to find a more elegant solution, which is what is done here. See # some research allowed me to find a more elegant solution, which is what is done here. See
# http://stackoverflow.com/questions/1606436/adding-docstrings-to-namedtuples-in-python # http://stackoverflow.com/questions/1606436/adding-docstrings-to-namedtuples-in-python
class Match(namedtuple('Match', 'first second percentage')):
class Match(namedtuple("Match", "first second percentage")):
"""Represents a match between two :class:`~core.fs.File`. """Represents a match between two :class:`~core.fs.File`.
Regarless of the matching method, when two files are determined to match, a Match pair is created, Regarless of the matching method, when two files are determined to match, a Match pair is created,
@ -182,16 +196,24 @@ class Match(namedtuple('Match', 'first second percentage')):
their match level according to the scan method which found the match. int from 1 to 100. For their match level according to the scan method which found the match. int from 1 to 100. For
exact scan methods, such as Contents scans, this will always be 100. exact scan methods, such as Contents scans, this will always be 100.
""" """
__slots__ = () __slots__ = ()
def get_match(first, second, flags=()): def get_match(first, second, flags=()):
#it is assumed here that first and second both have a "words" attribute # it is assumed here that first and second both have a "words" attribute
percentage = compare(first.words, second.words, flags) percentage = compare(first.words, second.words, flags)
return Match(first, second, percentage) return Match(first, second, percentage)
def getmatches( def getmatches(
objects, min_match_percentage=0, match_similar_words=False, weight_words=False, objects,
no_field_order=False, j=job.nulljob): min_match_percentage=0,
match_similar_words=False,
weight_words=False,
no_field_order=False,
j=job.nulljob,
):
"""Returns a list of :class:`Match` within ``objects`` after fuzzily matching their words. """Returns a list of :class:`Match` within ``objects`` after fuzzily matching their words.
:param objects: List of :class:`~core.fs.File` to match. :param objects: List of :class:`~core.fs.File` to match.
@ -206,7 +228,7 @@ def getmatches(
j = j.start_subjob(2) j = j.start_subjob(2)
sj = j.start_subjob(2) sj = j.start_subjob(2)
for o in objects: for o in objects:
if not hasattr(o, 'words'): if not hasattr(o, "words"):
o.words = getwords(o.name) o.words = getwords(o.name)
word_dict = build_word_dict(objects, sj) word_dict = build_word_dict(objects, sj)
reduce_common_words(word_dict, COMMON_WORD_THRESHOLD) reduce_common_words(word_dict, COMMON_WORD_THRESHOLD)
@ -241,11 +263,15 @@ def getmatches(
except MemoryError: except MemoryError:
# This is the place where the memory usage is at its peak during the scan. # This is the place where the memory usage is at its peak during the scan.
# Just continue the process with an incomplete list of matches. # Just continue the process with an incomplete list of matches.
del compared # This should give us enough room to call logging. del compared # This should give us enough room to call logging.
logging.warning('Memory Overflow. Matches: %d. Word dict: %d' % (len(result), len(word_dict))) logging.warning(
"Memory Overflow. Matches: %d. Word dict: %d"
% (len(result), len(word_dict))
)
return result return result
return result return result
def getmatches_by_contents(files, j=job.nulljob): def getmatches_by_contents(files, j=job.nulljob):
"""Returns a list of :class:`Match` within ``files`` if their contents is the same. """Returns a list of :class:`Match` within ``files`` if their contents is the same.
@ -263,13 +289,14 @@ def getmatches_by_contents(files, j=job.nulljob):
for group in possible_matches: for group in possible_matches:
for first, second in itertools.combinations(group, 2): for first, second in itertools.combinations(group, 2):
if first.is_ref and second.is_ref: if first.is_ref and second.is_ref:
continue # Don't spend time comparing two ref pics together. continue # Don't spend time comparing two ref pics together.
if first.md5partial == second.md5partial: if first.md5partial == second.md5partial:
if first.md5 == second.md5: if first.md5 == second.md5:
result.append(Match(first, second, 100)) result.append(Match(first, second, 100))
j.add_progress(desc=tr("%d matches found") % len(result)) j.add_progress(desc=tr("%d matches found") % len(result))
return result return result
class Group: class Group:
"""A group of :class:`~core.fs.File` that match together. """A group of :class:`~core.fs.File` that match together.
@ -297,7 +324,8 @@ class Group:
Average match percentage of match pairs containing :attr:`ref`. Average match percentage of match pairs containing :attr:`ref`.
""" """
#---Override
# ---Override
def __init__(self): def __init__(self):
self._clear() self._clear()
@ -313,7 +341,7 @@ class Group:
def __len__(self): def __len__(self):
return len(self.ordered) return len(self.ordered)
#---Private # ---Private
def _clear(self): def _clear(self):
self._percentage = None self._percentage = None
self._matches_for_ref = None self._matches_for_ref = None
@ -328,7 +356,7 @@ class Group:
self._matches_for_ref = [match for match in self.matches if ref in match] self._matches_for_ref = [match for match in self.matches if ref in match]
return self._matches_for_ref return self._matches_for_ref
#---Public # ---Public
def add_match(self, match): def add_match(self, match):
"""Adds ``match`` to internal match list and possibly add duplicates to the group. """Adds ``match`` to internal match list and possibly add duplicates to the group.
@ -339,6 +367,7 @@ class Group:
:param tuple match: pair of :class:`~core.fs.File` to add :param tuple match: pair of :class:`~core.fs.File` to add
""" """
def add_candidate(item, match): def add_candidate(item, match):
matches = self.candidates[item] matches = self.candidates[item]
matches.add(match) matches.add(match)
@ -362,7 +391,11 @@ class Group:
You can call this after the duplicate scanning process to free a bit of memory. You can call this after the duplicate scanning process to free a bit of memory.
""" """
discarded = set(m for m in self.matches if not all(obj in self.unordered for obj in [m.first, m.second])) discarded = set(
m
for m in self.matches
if not all(obj in self.unordered for obj in [m.first, m.second])
)
self.matches -= discarded self.matches -= discarded
self.candidates = defaultdict(set) self.candidates = defaultdict(set)
return discarded return discarded
@ -409,7 +442,9 @@ class Group:
self.unordered.remove(item) self.unordered.remove(item)
self._percentage = None self._percentage = None
self._matches_for_ref = None self._matches_for_ref = None
if (len(self) > 1) and any(not getattr(item, 'is_ref', False) for item in self): if (len(self) > 1) and any(
not getattr(item, "is_ref", False) for item in self
):
if discard_matches: if discard_matches:
self.matches = set(m for m in self.matches if item not in m) self.matches = set(m for m in self.matches if item not in m)
else: else:
@ -438,7 +473,9 @@ class Group:
if self._percentage is None: if self._percentage is None:
if self.dupes: if self.dupes:
matches = self._get_matches_for_ref() matches = self._get_matches_for_ref()
self._percentage = sum(match.percentage for match in matches) // len(matches) self._percentage = sum(match.percentage for match in matches) // len(
matches
)
else: else:
self._percentage = 0 self._percentage = 0
return self._percentage return self._percentage
@ -485,7 +522,7 @@ def get_groups(matches):
del dupe2group del dupe2group
del matches del matches
# should free enough memory to continue # should free enough memory to continue
logging.warning('Memory Overflow. Groups: {0}'.format(len(groups))) logging.warning("Memory Overflow. Groups: {0}".format(len(groups)))
# Now that we have a group, we have to discard groups' matches and see if there're any "orphan" # Now that we have a group, we have to discard groups' matches and see if there're any "orphan"
# matches, that is, matches that were candidate in a group but that none of their 2 files were # matches, that is, matches that were candidate in a group but that none of their 2 files were
# accepted in the group. With these orphan groups, it's safe to build additional groups # accepted in the group. With these orphan groups, it's safe to build additional groups
@ -493,9 +530,12 @@ def get_groups(matches):
orphan_matches = [] orphan_matches = []
for group in groups: for group in groups:
orphan_matches += { orphan_matches += {
m for m in group.discard_matches() m
for m in group.discard_matches()
if not any(obj in matched_files for obj in [m.first, m.second]) if not any(obj in matched_files for obj in [m.first, m.second])
} }
if groups and orphan_matches: if groups and orphan_matches:
groups += get_groups(orphan_matches) # no job, as it isn't supposed to take a long time groups += get_groups(
orphan_matches
) # no job, as it isn't supposed to take a long time
return groups return groups

View File

@ -114,36 +114,42 @@ ROW_TEMPLATE = """
CELL_TEMPLATE = """<td>{value}</td>""" CELL_TEMPLATE = """<td>{value}</td>"""
def export_to_xhtml(colnames, rows): def export_to_xhtml(colnames, rows):
# a row is a list of values with the first value being a flag indicating if the row should be indented # a row is a list of values with the first value being a flag indicating if the row should be indented
if rows: if rows:
assert len(rows[0]) == len(colnames) + 1 # + 1 is for the "indented" flag assert len(rows[0]) == len(colnames) + 1 # + 1 is for the "indented" flag
colheaders = ''.join(COLHEADERS_TEMPLATE.format(name=name) for name in colnames) colheaders = "".join(COLHEADERS_TEMPLATE.format(name=name) for name in colnames)
rendered_rows = [] rendered_rows = []
previous_group_id = None previous_group_id = None
for row in rows: for row in rows:
# [2:] is to remove the indented flag + filename # [2:] is to remove the indented flag + filename
if row[0] != previous_group_id: if row[0] != previous_group_id:
# We've just changed dupe group, which means that this dupe is a ref. We don't indent it. # We've just changed dupe group, which means that this dupe is a ref. We don't indent it.
indented = '' indented = ""
else: else:
indented = 'indented' indented = "indented"
filename = row[1] filename = row[1]
cells = ''.join(CELL_TEMPLATE.format(value=value) for value in row[2:]) cells = "".join(CELL_TEMPLATE.format(value=value) for value in row[2:])
rendered_rows.append(ROW_TEMPLATE.format(indented=indented, filename=filename, cells=cells)) rendered_rows.append(
ROW_TEMPLATE.format(indented=indented, filename=filename, cells=cells)
)
previous_group_id = row[0] previous_group_id = row[0]
rendered_rows = ''.join(rendered_rows) rendered_rows = "".join(rendered_rows)
# The main template can't use format because the css code uses {} # The main template can't use format because the css code uses {}
content = MAIN_TEMPLATE.replace('$colheaders', colheaders).replace('$rows', rendered_rows) content = MAIN_TEMPLATE.replace("$colheaders", colheaders).replace(
"$rows", rendered_rows
)
folder = mkdtemp() folder = mkdtemp()
destpath = op.join(folder, 'export.htm') destpath = op.join(folder, "export.htm")
fp = open(destpath, 'wt', encoding='utf-8') fp = open(destpath, "wt", encoding="utf-8")
fp.write(content) fp.write(content)
fp.close() fp.close()
return destpath return destpath
def export_to_csv(dest, colnames, rows): def export_to_csv(dest, colnames, rows):
writer = csv.writer(open(dest, 'wt', encoding='utf-8')) writer = csv.writer(open(dest, "wt", encoding="utf-8"))
writer.writerow(["Group ID"] + colnames) writer.writerow(["Group ID"] + colnames)
for row in rows: for row in rows:
writer.writerow(row) writer.writerow(row)

View File

@ -17,19 +17,20 @@ import logging
from hscommon.util import nonone, get_file_ext from hscommon.util import nonone, get_file_ext
__all__ = [ __all__ = [
'File', "File",
'Folder', "Folder",
'get_file', "get_file",
'get_files', "get_files",
'FSError', "FSError",
'AlreadyExistsError', "AlreadyExistsError",
'InvalidPath', "InvalidPath",
'InvalidDestinationError', "InvalidDestinationError",
'OperationError', "OperationError",
] ]
NOT_SET = object() NOT_SET = object()
class FSError(Exception): class FSError(Exception):
cls_message = "An error has occured on '{name}' in '{parent}'" cls_message = "An error has occured on '{name}' in '{parent}'"
@ -40,8 +41,8 @@ class FSError(Exception):
elif isinstance(fsobject, File): elif isinstance(fsobject, File):
name = fsobject.name name = fsobject.name
else: else:
name = '' name = ""
parentname = str(parent) if parent is not None else '' parentname = str(parent) if parent is not None else ""
Exception.__init__(self, message.format(name=name, parent=parentname)) Exception.__init__(self, message.format(name=name, parent=parentname))
@ -49,32 +50,39 @@ class AlreadyExistsError(FSError):
"The directory or file name we're trying to add already exists" "The directory or file name we're trying to add already exists"
cls_message = "'{name}' already exists in '{parent}'" cls_message = "'{name}' already exists in '{parent}'"
class InvalidPath(FSError): class InvalidPath(FSError):
"The path of self is invalid, and cannot be worked with." "The path of self is invalid, and cannot be worked with."
cls_message = "'{name}' is invalid." cls_message = "'{name}' is invalid."
class InvalidDestinationError(FSError): class InvalidDestinationError(FSError):
"""A copy/move operation has been called, but the destination is invalid.""" """A copy/move operation has been called, but the destination is invalid."""
cls_message = "'{name}' is an invalid destination for this operation." cls_message = "'{name}' is an invalid destination for this operation."
class OperationError(FSError): class OperationError(FSError):
"""A copy/move/delete operation has been called, but the checkup after the """A copy/move/delete operation has been called, but the checkup after the
operation shows that it didn't work.""" operation shows that it didn't work."""
cls_message = "Operation on '{name}' failed." cls_message = "Operation on '{name}' failed."
class File: class File:
"""Represents a file and holds metadata to be used for scanning. """Represents a file and holds metadata to be used for scanning.
""" """
INITIAL_INFO = { INITIAL_INFO = {
'size': 0, "size": 0,
'mtime': 0, "mtime": 0,
'md5': '', "md5": "",
'md5partial': '', "md5partial": "",
} }
# Slots for File make us save quite a bit of memory. In a memory test I've made with a lot of # Slots for File make us save quite a bit of memory. In a memory test I've made with a lot of
# files, I saved 35% memory usage with "unread" files (no _read_info() call) and gains become # files, I saved 35% memory usage with "unread" files (no _read_info() call) and gains become
# even greater when we take into account read attributes (70%!). Yeah, it's worth it. # even greater when we take into account read attributes (70%!). Yeah, it's worth it.
__slots__ = ('path', 'is_ref', 'words') + tuple(INITIAL_INFO.keys()) __slots__ = ("path", "is_ref", "words") + tuple(INITIAL_INFO.keys())
def __init__(self, path): def __init__(self, path):
self.path = path self.path = path
@ -90,25 +98,27 @@ class File:
try: try:
self._read_info(attrname) self._read_info(attrname)
except Exception as e: except Exception as e:
logging.warning("An error '%s' was raised while decoding '%s'", e, repr(self.path)) logging.warning(
"An error '%s' was raised while decoding '%s'", e, repr(self.path)
)
result = object.__getattribute__(self, attrname) result = object.__getattribute__(self, attrname)
if result is NOT_SET: if result is NOT_SET:
result = self.INITIAL_INFO[attrname] result = self.INITIAL_INFO[attrname]
return result return result
#This offset is where we should start reading the file to get a partial md5 # This offset is where we should start reading the file to get a partial md5
#For audio file, it should be where audio data starts # For audio file, it should be where audio data starts
def _get_md5partial_offset_and_size(self): def _get_md5partial_offset_and_size(self):
return (0x4000, 0x4000) #16Kb return (0x4000, 0x4000) # 16Kb
def _read_info(self, field): def _read_info(self, field):
if field in ('size', 'mtime'): if field in ("size", "mtime"):
stats = self.path.stat() stats = self.path.stat()
self.size = nonone(stats.st_size, 0) self.size = nonone(stats.st_size, 0)
self.mtime = nonone(stats.st_mtime, 0) self.mtime = nonone(stats.st_mtime, 0)
elif field == 'md5partial': elif field == "md5partial":
try: try:
fp = self.path.open('rb') fp = self.path.open("rb")
offset, size = self._get_md5partial_offset_and_size() offset, size = self._get_md5partial_offset_and_size()
fp.seek(offset) fp.seek(offset)
partialdata = fp.read(size) partialdata = fp.read(size)
@ -117,14 +127,14 @@ class File:
fp.close() fp.close()
except Exception: except Exception:
pass pass
elif field == 'md5': elif field == "md5":
try: try:
fp = self.path.open('rb') fp = self.path.open("rb")
md5 = hashlib.md5() md5 = hashlib.md5()
# The goal here is to not run out of memory on really big files. However, the chunk # The goal here is to not run out of memory on really big files. However, the chunk
# size has to be large enough so that the python loop isn't too costly in terms of # size has to be large enough so that the python loop isn't too costly in terms of
# CPU. # CPU.
CHUNK_SIZE = 1024 * 1024 # 1 mb CHUNK_SIZE = 1024 * 1024 # 1 mb
filedata = fp.read(CHUNK_SIZE) filedata = fp.read(CHUNK_SIZE)
while filedata: while filedata:
md5.update(filedata) md5.update(filedata)
@ -144,7 +154,7 @@ class File:
for attrname in attrnames: for attrname in attrnames:
getattr(self, attrname) getattr(self, attrname)
#--- Public # --- Public
@classmethod @classmethod
def can_handle(cls, path): def can_handle(cls, path):
"""Returns whether this file wrapper class can handle ``path``. """Returns whether this file wrapper class can handle ``path``.
@ -170,7 +180,7 @@ class File:
""" """
raise NotImplementedError() raise NotImplementedError()
#--- Properties # --- Properties
@property @property
def extension(self): def extension(self):
return get_file_ext(self.name) return get_file_ext(self.name)
@ -189,7 +199,8 @@ class Folder(File):
It has the size/md5 info of a File, but it's value are the sum of its subitems. It has the size/md5 info of a File, but it's value are the sum of its subitems.
""" """
__slots__ = File.__slots__ + ('_subfolders', )
__slots__ = File.__slots__ + ("_subfolders",)
def __init__(self, path): def __init__(self, path):
File.__init__(self, path) File.__init__(self, path)
@ -201,12 +212,12 @@ class Folder(File):
return folders + files return folders + files
def _read_info(self, field): def _read_info(self, field):
if field in {'size', 'mtime'}: if field in {"size", "mtime"}:
size = sum((f.size for f in self._all_items()), 0) size = sum((f.size for f in self._all_items()), 0)
self.size = size self.size = size
stats = self.path.stat() stats = self.path.stat()
self.mtime = nonone(stats.st_mtime, 0) self.mtime = nonone(stats.st_mtime, 0)
elif field in {'md5', 'md5partial'}: elif field in {"md5", "md5partial"}:
# What's sensitive here is that we must make sure that subfiles' # What's sensitive here is that we must make sure that subfiles'
# md5 are always added up in the same order, but we also want a # md5 are always added up in the same order, but we also want a
# different md5 if a file gets moved in a different subdirectory. # different md5 if a file gets moved in a different subdirectory.
@ -214,7 +225,7 @@ class Folder(File):
items = self._all_items() items = self._all_items()
items.sort(key=lambda f: f.path) items.sort(key=lambda f: f.path)
md5s = [getattr(f, field) for f in items] md5s = [getattr(f, field) for f in items]
return b''.join(md5s) return b"".join(md5s)
md5 = hashlib.md5(get_dir_md5_concat()) md5 = hashlib.md5(get_dir_md5_concat())
digest = md5.digest() digest = md5.digest()
@ -223,7 +234,9 @@ class Folder(File):
@property @property
def subfolders(self): def subfolders(self):
if self._subfolders is None: if self._subfolders is None:
subfolders = [p for p in self.path.listdir() if not p.islink() and p.isdir()] subfolders = [
p for p in self.path.listdir() if not p.islink() and p.isdir()
]
self._subfolders = [self.__class__(p) for p in subfolders] self._subfolders = [self.__class__(p) for p in subfolders]
return self._subfolders return self._subfolders
@ -244,6 +257,7 @@ def get_file(path, fileclasses=[File]):
if fileclass.can_handle(path): if fileclass.can_handle(path):
return fileclass(path) return fileclass(path)
def get_files(path, fileclasses=[File]): def get_files(path, fileclasses=[File]):
"""Returns a list of :class:`File` for each file contained in ``path``. """Returns a list of :class:`File` for each file contained in ``path``.

View File

@ -13,4 +13,3 @@ blue, which is supposed to be orange, does the sorting logic, holds selection, e
.. _cross-toolkit: http://www.hardcoded.net/articles/cross-toolkit-software .. _cross-toolkit: http://www.hardcoded.net/articles/cross-toolkit-software
""" """

View File

@ -8,6 +8,7 @@
from hscommon.notify import Listener from hscommon.notify import Listener
class DupeGuruGUIObject(Listener): class DupeGuruGUIObject(Listener):
def __init__(self, app): def __init__(self, app):
Listener.__init__(self, app) Listener.__init__(self, app)
@ -27,4 +28,3 @@ class DupeGuruGUIObject(Listener):
def results_changed_but_keep_selection(self): def results_changed_but_keep_selection(self):
pass pass

View File

@ -1,8 +1,8 @@
# Created On: 2012-05-30 # Created On: 2012-05-30
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import os import os
@ -10,42 +10,46 @@ import os
from hscommon.gui.base import GUIObject from hscommon.gui.base import GUIObject
from hscommon.trans import tr from hscommon.trans import tr
class DeletionOptionsView: class DeletionOptionsView:
"""Expected interface for :class:`DeletionOptions`'s view. """Expected interface for :class:`DeletionOptions`'s view.
*Not actually used in the code. For documentation purposes only.* *Not actually used in the code. For documentation purposes only.*
Our view presents the user with an appropriate way (probably a mix of checkboxes and radio Our view presents the user with an appropriate way (probably a mix of checkboxes and radio
buttons) to set the different flags in :class:`DeletionOptions`. Note that buttons) to set the different flags in :class:`DeletionOptions`. Note that
:attr:`DeletionOptions.use_hardlinks` is only relevant if :attr:`DeletionOptions.link_deleted` :attr:`DeletionOptions.use_hardlinks` is only relevant if :attr:`DeletionOptions.link_deleted`
is true. This is why we toggle the "enabled" state of that flag. is true. This is why we toggle the "enabled" state of that flag.
We expect the view to set :attr:`DeletionOptions.link_deleted` immediately as the user changes We expect the view to set :attr:`DeletionOptions.link_deleted` immediately as the user changes
its value because it will toggle :meth:`set_hardlink_option_enabled` its value because it will toggle :meth:`set_hardlink_option_enabled`
Other than the flags, there's also a prompt message which has a dynamic content, defined by Other than the flags, there's also a prompt message which has a dynamic content, defined by
:meth:`update_msg`. :meth:`update_msg`.
""" """
def update_msg(self, msg: str): def update_msg(self, msg: str):
"""Update the dialog's prompt with ``str``. """Update the dialog's prompt with ``str``.
""" """
def show(self): def show(self):
"""Show the dialog in a modal fashion. """Show the dialog in a modal fashion.
Returns whether the dialog was "accepted" (the user pressed OK). Returns whether the dialog was "accepted" (the user pressed OK).
""" """
def set_hardlink_option_enabled(self, is_enabled: bool): def set_hardlink_option_enabled(self, is_enabled: bool):
"""Enable or disable the widget controlling :attr:`DeletionOptions.use_hardlinks`. """Enable or disable the widget controlling :attr:`DeletionOptions.use_hardlinks`.
""" """
class DeletionOptions(GUIObject): class DeletionOptions(GUIObject):
"""Present the user with deletion options before proceeding. """Present the user with deletion options before proceeding.
When the user activates "Send to trash", we present him with a couple of options that changes When the user activates "Send to trash", we present him with a couple of options that changes
the behavior of that deletion operation. the behavior of that deletion operation.
""" """
def __init__(self): def __init__(self):
GUIObject.__init__(self) GUIObject.__init__(self)
#: Whether symlinks or hardlinks are used when doing :attr:`link_deleted`. #: Whether symlinks or hardlinks are used when doing :attr:`link_deleted`.
@ -54,10 +58,10 @@ class DeletionOptions(GUIObject):
#: Delete dupes directly and don't send to trash. #: Delete dupes directly and don't send to trash.
#: *bool*. *get/set* #: *bool*. *get/set*
self.direct = False self.direct = False
def show(self, mark_count): def show(self, mark_count):
"""Prompt the user with a modal dialog offering our deletion options. """Prompt the user with a modal dialog offering our deletion options.
:param int mark_count: Number of dupes marked for deletion. :param int mark_count: Number of dupes marked for deletion.
:rtype: bool :rtype: bool
:returns: Whether the user accepted the dialog (we cancel deletion if false). :returns: Whether the user accepted the dialog (we cancel deletion if false).
@ -69,7 +73,7 @@ class DeletionOptions(GUIObject):
msg = tr("You are sending {} file(s) to the Trash.").format(mark_count) msg = tr("You are sending {} file(s) to the Trash.").format(mark_count)
self.view.update_msg(msg) self.view.update_msg(msg)
return self.view.show() return self.view.show()
def supports_links(self): def supports_links(self):
"""Returns whether our platform supports symlinks. """Returns whether our platform supports symlinks.
""" """
@ -87,21 +91,19 @@ class DeletionOptions(GUIObject):
except TypeError: except TypeError:
# wrong number of arguments # wrong number of arguments
return True return True
@property @property
def link_deleted(self): def link_deleted(self):
"""Replace deleted dupes with symlinks (or hardlinks) to the dupe group reference. """Replace deleted dupes with symlinks (or hardlinks) to the dupe group reference.
*bool*. *get/set* *bool*. *get/set*
Whether the link is a symlink or hardlink is decided by :attr:`use_hardlinks`. Whether the link is a symlink or hardlink is decided by :attr:`use_hardlinks`.
""" """
return self._link_deleted return self._link_deleted
@link_deleted.setter @link_deleted.setter
def link_deleted(self, value): def link_deleted(self, value):
self._link_deleted = value self._link_deleted = value
hardlinks_enabled = value and self.supports_links() hardlinks_enabled = value and self.supports_links()
self.view.set_hardlink_option_enabled(hardlinks_enabled) self.view.set_hardlink_option_enabled(hardlinks_enabled)

View File

@ -9,6 +9,7 @@
from hscommon.gui.base import GUIObject from hscommon.gui.base import GUIObject
from .base import DupeGuruGUIObject from .base import DupeGuruGUIObject
class DetailsPanel(GUIObject, DupeGuruGUIObject): class DetailsPanel(GUIObject, DupeGuruGUIObject):
def __init__(self, app): def __init__(self, app):
GUIObject.__init__(self, multibind=True) GUIObject.__init__(self, multibind=True)
@ -19,7 +20,7 @@ class DetailsPanel(GUIObject, DupeGuruGUIObject):
self._refresh() self._refresh()
self.view.refresh() self.view.refresh()
#--- Private # --- Private
def _refresh(self): def _refresh(self):
if self.app.selected_dupes: if self.app.selected_dupes:
dupe = self.app.selected_dupes[0] dupe = self.app.selected_dupes[0]
@ -31,18 +32,19 @@ class DetailsPanel(GUIObject, DupeGuruGUIObject):
# we don't want the two sides of the table to display the stats for the same file # we don't want the two sides of the table to display the stats for the same file
ref = group.ref if group is not None and group.ref is not dupe else None ref = group.ref if group is not None and group.ref is not dupe else None
data2 = self.app.get_display_info(ref, group, False) data2 = self.app.get_display_info(ref, group, False)
columns = self.app.result_table.COLUMNS[1:] # first column is the 'marked' column columns = self.app.result_table.COLUMNS[
1:
] # first column is the 'marked' column
self._table = [(c.display, data1[c.name], data2[c.name]) for c in columns] self._table = [(c.display, data1[c.name], data2[c.name]) for c in columns]
#--- Public # --- Public
def row_count(self): def row_count(self):
return len(self._table) return len(self._table)
def row(self, row_index): def row(self, row_index):
return self._table[row_index] return self._table[row_index]
#--- Event Handlers # --- Event Handlers
def dupes_selected(self): def dupes_selected(self):
self._refresh() self._refresh()
self.view.refresh() self.view.refresh()

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-02-06 # Created On: 2010-02-06
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.gui.tree import Tree, Node from hscommon.gui.tree import Tree, Node
@ -13,6 +13,7 @@ from .base import DupeGuruGUIObject
STATE_ORDER = [DirectoryState.Normal, DirectoryState.Reference, DirectoryState.Excluded] STATE_ORDER = [DirectoryState.Normal, DirectoryState.Reference, DirectoryState.Excluded]
# Lazily loads children # Lazily loads children
class DirectoryNode(Node): class DirectoryNode(Node):
def __init__(self, tree, path, name): def __init__(self, tree, path, name):
@ -21,29 +22,31 @@ class DirectoryNode(Node):
self._directory_path = path self._directory_path = path
self._loaded = False self._loaded = False
self._state = STATE_ORDER.index(self._tree.app.directories.get_state(path)) self._state = STATE_ORDER.index(self._tree.app.directories.get_state(path))
def __len__(self): def __len__(self):
if not self._loaded: if not self._loaded:
self._load() self._load()
return Node.__len__(self) return Node.__len__(self)
def _load(self): def _load(self):
self.clear() self.clear()
subpaths = self._tree.app.directories.get_subfolders(self._directory_path) subpaths = self._tree.app.directories.get_subfolders(self._directory_path)
for path in subpaths: for path in subpaths:
self.append(DirectoryNode(self._tree, path, path.name)) self.append(DirectoryNode(self._tree, path, path.name))
self._loaded = True self._loaded = True
def update_all_states(self): def update_all_states(self):
self._state = STATE_ORDER.index(self._tree.app.directories.get_state(self._directory_path)) self._state = STATE_ORDER.index(
self._tree.app.directories.get_state(self._directory_path)
)
for node in self: for node in self:
node.update_all_states() node.update_all_states()
# The state propery is an index to the combobox # The state propery is an index to the combobox
@property @property
def state(self): def state(self):
return self._state return self._state
@state.setter @state.setter
def state(self, value): def state(self, value):
if value == self._state: if value == self._state:
@ -52,29 +55,29 @@ class DirectoryNode(Node):
state = STATE_ORDER[value] state = STATE_ORDER[value]
self._tree.app.directories.set_state(self._directory_path, state) self._tree.app.directories.set_state(self._directory_path, state)
self._tree.update_all_states() self._tree.update_all_states()
class DirectoryTree(Tree, DupeGuruGUIObject): class DirectoryTree(Tree, DupeGuruGUIObject):
#--- model -> view calls: # --- model -> view calls:
# refresh() # refresh()
# refresh_states() # when only states label need to be refreshed # refresh_states() # when only states label need to be refreshed
# #
def __init__(self, app): def __init__(self, app):
Tree.__init__(self) Tree.__init__(self)
DupeGuruGUIObject.__init__(self, app) DupeGuruGUIObject.__init__(self, app)
def _view_updated(self): def _view_updated(self):
self._refresh() self._refresh()
self.view.refresh() self.view.refresh()
def _refresh(self): def _refresh(self):
self.clear() self.clear()
for path in self.app.directories: for path in self.app.directories:
self.append(DirectoryNode(self, path, str(path))) self.append(DirectoryNode(self, path, str(path)))
def add_directory(self, path): def add_directory(self, path):
self.app.add_directory(path) self.app.add_directory(path)
def remove_selected(self): def remove_selected(self):
selected_paths = self.selected_paths selected_paths = self.selected_paths
if not selected_paths: if not selected_paths:
@ -90,18 +93,17 @@ class DirectoryTree(Tree, DupeGuruGUIObject):
newstate = DirectoryState.Normal newstate = DirectoryState.Normal
for node in nodes: for node in nodes:
node.state = newstate node.state = newstate
def select_all(self): def select_all(self):
self.selected_nodes = list(self) self.selected_nodes = list(self)
self.view.refresh() self.view.refresh()
def update_all_states(self): def update_all_states(self):
for node in self: for node in self:
node.update_all_states() node.update_all_states()
self.view.refresh_states() self.view.refresh_states()
#--- Event Handlers # --- Event Handlers
def directories_changed(self): def directories_changed(self):
self._refresh() self._refresh()
self.view.refresh() self.view.refresh()

View File

@ -8,8 +8,9 @@
from hscommon.trans import tr from hscommon.trans import tr
from .ignore_list_table import IgnoreListTable from .ignore_list_table import IgnoreListTable
class IgnoreListDialog: class IgnoreListDialog:
#--- View interface # --- View interface
# show() # show()
# #
@ -21,7 +22,9 @@ class IgnoreListDialog:
def clear(self): def clear(self):
if not self.ignore_list: if not self.ignore_list:
return return
msg = tr("Do you really want to remove all %d items from the ignore list?") % len(self.ignore_list) msg = tr(
"Do you really want to remove all %d items from the ignore list?"
) % len(self.ignore_list)
if self.app.view.ask_yes_no(msg): if self.app.view.ask_yes_no(msg):
self.ignore_list.Clear() self.ignore_list.Clear()
self.refresh() self.refresh()
@ -36,4 +39,3 @@ class IgnoreListDialog:
def show(self): def show(self):
self.view.show() self.view.show()

View File

@ -1,35 +1,36 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2012-03-13 # Created On: 2012-03-13
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.gui.table import GUITable, Row from hscommon.gui.table import GUITable, Row
from hscommon.gui.column import Column, Columns from hscommon.gui.column import Column, Columns
from hscommon.trans import trget from hscommon.trans import trget
coltr = trget('columns') coltr = trget("columns")
class IgnoreListTable(GUITable): class IgnoreListTable(GUITable):
COLUMNS = [ COLUMNS = [
# the str concat below saves us needless localization. # the str concat below saves us needless localization.
Column('path1', coltr("File Path") + " 1"), Column("path1", coltr("File Path") + " 1"),
Column('path2', coltr("File Path") + " 2"), Column("path2", coltr("File Path") + " 2"),
] ]
def __init__(self, ignore_list_dialog): def __init__(self, ignore_list_dialog):
GUITable.__init__(self) GUITable.__init__(self)
self.columns = Columns(self) self.columns = Columns(self)
self.view = None self.view = None
self.dialog = ignore_list_dialog self.dialog = ignore_list_dialog
#--- Override # --- Override
def _fill(self): def _fill(self):
for path1, path2 in self.dialog.ignore_list: for path1, path2 in self.dialog.ignore_list:
self.append(IgnoreListRow(self, path1, path2)) self.append(IgnoreListRow(self, path1, path2))
class IgnoreListRow(Row): class IgnoreListRow(Row):
def __init__(self, table, path1, path2): def __init__(self, table, path1, path2):
@ -38,4 +39,3 @@ class IgnoreListRow(Row):
self.path2_original = path2 self.path2_original = path2
self.path1 = str(path1) self.path1 = str(path1)
self.path2 = str(path2) self.path2 = str(path2)

View File

@ -9,6 +9,7 @@
from hscommon.gui.base import GUIObject from hscommon.gui.base import GUIObject
from hscommon.gui.selectable_list import GUISelectableList from hscommon.gui.selectable_list import GUISelectableList
class CriterionCategoryList(GUISelectableList): class CriterionCategoryList(GUISelectableList):
def __init__(self, dialog): def __init__(self, dialog):
self.dialog = dialog self.dialog = dialog
@ -18,6 +19,7 @@ class CriterionCategoryList(GUISelectableList):
self.dialog.select_category(self.dialog.categories[self.selected_index]) self.dialog.select_category(self.dialog.categories[self.selected_index])
GUISelectableList._update_selection(self) GUISelectableList._update_selection(self)
class PrioritizationList(GUISelectableList): class PrioritizationList(GUISelectableList):
def __init__(self, dialog): def __init__(self, dialog):
self.dialog = dialog self.dialog = dialog
@ -41,6 +43,7 @@ class PrioritizationList(GUISelectableList):
del prilist[i] del prilist[i]
self._refresh_contents() self._refresh_contents()
class PrioritizeDialog(GUIObject): class PrioritizeDialog(GUIObject):
def __init__(self, app): def __init__(self, app):
GUIObject.__init__(self) GUIObject.__init__(self)
@ -52,15 +55,15 @@ class PrioritizeDialog(GUIObject):
self.prioritizations = [] self.prioritizations = []
self.prioritization_list = PrioritizationList(self) self.prioritization_list = PrioritizationList(self)
#--- Override # --- Override
def _view_updated(self): def _view_updated(self):
self.category_list.select(0) self.category_list.select(0)
#--- Private # --- Private
def _sort_key(self, dupe): def _sort_key(self, dupe):
return tuple(crit.sort_key(dupe) for crit in self.prioritizations) return tuple(crit.sort_key(dupe) for crit in self.prioritizations)
#--- Public # --- Public
def select_category(self, category): def select_category(self, category):
self.criteria = category.criteria_list() self.criteria = category.criteria_list()
self.criteria_list[:] = [c.display_value for c in self.criteria] self.criteria_list[:] = [c.display_value for c in self.criteria]

View File

@ -1,29 +1,29 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-04-12 # Created On: 2010-04-12
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon import desktop from hscommon import desktop
from .problem_table import ProblemTable from .problem_table import ProblemTable
class ProblemDialog: class ProblemDialog:
def __init__(self, app): def __init__(self, app):
self.app = app self.app = app
self._selected_dupe = None self._selected_dupe = None
self.problem_table = ProblemTable(self) self.problem_table = ProblemTable(self)
def refresh(self): def refresh(self):
self._selected_dupe = None self._selected_dupe = None
self.problem_table.refresh() self.problem_table.refresh()
def reveal_selected_dupe(self): def reveal_selected_dupe(self):
if self._selected_dupe is not None: if self._selected_dupe is not None:
desktop.reveal_path(self._selected_dupe.path) desktop.reveal_path(self._selected_dupe.path)
def select_dupe(self, dupe): def select_dupe(self, dupe):
self._selected_dupe = dupe self._selected_dupe = dupe

View File

@ -1,39 +1,40 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-04-12 # Created On: 2010-04-12
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.gui.table import GUITable, Row from hscommon.gui.table import GUITable, Row
from hscommon.gui.column import Column, Columns from hscommon.gui.column import Column, Columns
from hscommon.trans import trget from hscommon.trans import trget
coltr = trget('columns') coltr = trget("columns")
class ProblemTable(GUITable): class ProblemTable(GUITable):
COLUMNS = [ COLUMNS = [
Column('path', coltr("File Path")), Column("path", coltr("File Path")),
Column('msg', coltr("Error Message")), Column("msg", coltr("Error Message")),
] ]
def __init__(self, problem_dialog): def __init__(self, problem_dialog):
GUITable.__init__(self) GUITable.__init__(self)
self.columns = Columns(self) self.columns = Columns(self)
self.dialog = problem_dialog self.dialog = problem_dialog
#--- Override # --- Override
def _update_selection(self): def _update_selection(self):
row = self.selected_row row = self.selected_row
dupe = row.dupe if row is not None else None dupe = row.dupe if row is not None else None
self.dialog.select_dupe(dupe) self.dialog.select_dupe(dupe)
def _fill(self): def _fill(self):
problems = self.dialog.app.results.problems problems = self.dialog.app.results.problems
for dupe, msg in problems: for dupe, msg in problems:
self.append(ProblemRow(self, dupe, msg)) self.append(ProblemRow(self, dupe, msg))
class ProblemRow(Row): class ProblemRow(Row):
def __init__(self, table, dupe, msg): def __init__(self, table, dupe, msg):
@ -41,4 +42,3 @@ class ProblemRow(Row):
self.dupe = dupe self.dupe = dupe
self.msg = msg self.msg = msg
self.path = str(dupe.path) self.path = str(dupe.path)

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-02-11 # Created On: 2010-02-11
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from operator import attrgetter from operator import attrgetter
@ -13,6 +13,7 @@ from hscommon.gui.column import Columns
from .base import DupeGuruGUIObject from .base import DupeGuruGUIObject
class DupeRow(Row): class DupeRow(Row):
def __init__(self, table, group, dupe): def __init__(self, table, group, dupe):
Row.__init__(self, table) Row.__init__(self, table)
@ -22,14 +23,14 @@ class DupeRow(Row):
self._data = None self._data = None
self._data_delta = None self._data_delta = None
self._delta_columns = None self._delta_columns = None
def is_cell_delta(self, column_name): def is_cell_delta(self, column_name):
"""Returns whether a cell is in delta mode (orange color). """Returns whether a cell is in delta mode (orange color).
If the result table is in delta mode, returns True if the column is one of the "delta If the result table is in delta mode, returns True if the column is one of the "delta
columns", that is, one of the columns that display a a differential value rather than an columns", that is, one of the columns that display a a differential value rather than an
absolute value. absolute value.
If not, returns True if the dupe's value is different from its ref value. If not, returns True if the dupe's value is different from its ref value.
""" """
if not self.table.delta_values: if not self.table.delta_values:
@ -42,62 +43,64 @@ class DupeRow(Row):
dupe_info = self.data dupe_info = self.data
ref_info = self._group.ref.get_display_info(group=self._group, delta=False) ref_info = self._group.ref.get_display_info(group=self._group, delta=False)
for key, value in dupe_info.items(): for key, value in dupe_info.items():
if (key not in self._delta_columns) and (ref_info[key].lower() != value.lower()): if (key not in self._delta_columns) and (
ref_info[key].lower() != value.lower()
):
self._delta_columns.add(key) self._delta_columns.add(key)
return column_name in self._delta_columns return column_name in self._delta_columns
@property @property
def data(self): def data(self):
if self._data is None: if self._data is None:
self._data = self._app.get_display_info(self._dupe, self._group, False) self._data = self._app.get_display_info(self._dupe, self._group, False)
return self._data return self._data
@property @property
def data_delta(self): def data_delta(self):
if self._data_delta is None: if self._data_delta is None:
self._data_delta = self._app.get_display_info(self._dupe, self._group, True) self._data_delta = self._app.get_display_info(self._dupe, self._group, True)
return self._data_delta return self._data_delta
@property @property
def isref(self): def isref(self):
return self._dupe is self._group.ref return self._dupe is self._group.ref
@property @property
def markable(self): def markable(self):
return self._app.results.is_markable(self._dupe) return self._app.results.is_markable(self._dupe)
@property @property
def marked(self): def marked(self):
return self._app.results.is_marked(self._dupe) return self._app.results.is_marked(self._dupe)
@marked.setter @marked.setter
def marked(self, value): def marked(self, value):
self._app.mark_dupe(self._dupe, value) self._app.mark_dupe(self._dupe, value)
class ResultTable(GUITable, DupeGuruGUIObject): class ResultTable(GUITable, DupeGuruGUIObject):
def __init__(self, app): def __init__(self, app):
GUITable.__init__(self) GUITable.__init__(self)
DupeGuruGUIObject.__init__(self, app) DupeGuruGUIObject.__init__(self, app)
self.columns = Columns(self, prefaccess=app, savename='ResultTable') self.columns = Columns(self, prefaccess=app, savename="ResultTable")
self._power_marker = False self._power_marker = False
self._delta_values = False self._delta_values = False
self._sort_descriptors = ('name', True) self._sort_descriptors = ("name", True)
#--- Override # --- Override
def _view_updated(self): def _view_updated(self):
self._refresh_with_view() self._refresh_with_view()
def _restore_selection(self, previous_selection): def _restore_selection(self, previous_selection):
if self.app.selected_dupes: if self.app.selected_dupes:
to_find = set(self.app.selected_dupes) to_find = set(self.app.selected_dupes)
indexes = [i for i, r in enumerate(self) if r._dupe in to_find] indexes = [i for i, r in enumerate(self) if r._dupe in to_find]
self.selected_indexes = indexes self.selected_indexes = indexes
def _update_selection(self): def _update_selection(self):
rows = self.selected_rows rows = self.selected_rows
self.app._select_dupes(list(map(attrgetter('_dupe'), rows))) self.app._select_dupes(list(map(attrgetter("_dupe"), rows)))
def _fill(self): def _fill(self):
if not self.power_marker: if not self.power_marker:
for group in self.app.results.groups: for group in self.app.results.groups:
@ -108,22 +111,22 @@ class ResultTable(GUITable, DupeGuruGUIObject):
for dupe in self.app.results.dupes: for dupe in self.app.results.dupes:
group = self.app.results.get_group_of_duplicate(dupe) group = self.app.results.get_group_of_duplicate(dupe)
self.append(DupeRow(self, group, dupe)) self.append(DupeRow(self, group, dupe))
def _refresh_with_view(self): def _refresh_with_view(self):
self.refresh() self.refresh()
self.view.show_selected_row() self.view.show_selected_row()
#--- Public # --- Public
def get_row_value(self, index, column): def get_row_value(self, index, column):
try: try:
row = self[index] row = self[index]
except IndexError: except IndexError:
return '---' return "---"
if self.delta_values: if self.delta_values:
return row.data_delta[column] return row.data_delta[column]
else: else:
return row.data[column] return row.data[column]
def rename_selected(self, newname): def rename_selected(self, newname):
row = self.selected_row row = self.selected_row
if row is None: if row is None:
@ -133,7 +136,7 @@ class ResultTable(GUITable, DupeGuruGUIObject):
row._data = None row._data = None
row._data_delta = None row._data_delta = None
return self.app.rename_selected(newname) return self.app.rename_selected(newname)
def sort(self, key, asc): def sort(self, key, asc):
if self.power_marker: if self.power_marker:
self.app.results.sort_dupes(key, asc, self.delta_values) self.app.results.sort_dupes(key, asc, self.delta_values)
@ -141,12 +144,12 @@ class ResultTable(GUITable, DupeGuruGUIObject):
self.app.results.sort_groups(key, asc) self.app.results.sort_groups(key, asc)
self._sort_descriptors = (key, asc) self._sort_descriptors = (key, asc)
self._refresh_with_view() self._refresh_with_view()
#--- Properties # --- Properties
@property @property
def power_marker(self): def power_marker(self):
return self._power_marker return self._power_marker
@power_marker.setter @power_marker.setter
def power_marker(self, value): def power_marker(self, value):
if value == self._power_marker: if value == self._power_marker:
@ -155,29 +158,29 @@ class ResultTable(GUITable, DupeGuruGUIObject):
key, asc = self._sort_descriptors key, asc = self._sort_descriptors
self.sort(key, asc) self.sort(key, asc)
# no need to refresh, it has happened in sort() # no need to refresh, it has happened in sort()
@property @property
def delta_values(self): def delta_values(self):
return self._delta_values return self._delta_values
@delta_values.setter @delta_values.setter
def delta_values(self, value): def delta_values(self, value):
if value == self._delta_values: if value == self._delta_values:
return return
self._delta_values = value self._delta_values = value
self.refresh() self.refresh()
@property @property
def selected_dupe_count(self): def selected_dupe_count(self):
return sum(1 for row in self.selected_rows if not row.isref) return sum(1 for row in self.selected_rows if not row.isref)
#--- Event Handlers # --- Event Handlers
def marking_changed(self): def marking_changed(self):
self.view.invalidate_markings() self.view.invalidate_markings()
def results_changed(self): def results_changed(self):
self._refresh_with_view() self._refresh_with_view()
def results_changed_but_keep_selection(self): def results_changed_but_keep_selection(self):
# What we want to to here is that instead of restoring selected *dupes* after refresh, we # What we want to to here is that instead of restoring selected *dupes* after refresh, we
# restore selected *paths*. # restore selected *paths*.
@ -185,7 +188,6 @@ class ResultTable(GUITable, DupeGuruGUIObject):
self.refresh(refresh_view=False) self.refresh(refresh_view=False)
self.select(indexes) self.select(indexes)
self.view.refresh() self.view.refresh()
def save_session(self): def save_session(self):
self.columns.save_columns() self.columns.save_columns()

View File

@ -1,21 +1,23 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-02-11 # Created On: 2010-02-11
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from .base import DupeGuruGUIObject from .base import DupeGuruGUIObject
class StatsLabel(DupeGuruGUIObject): class StatsLabel(DupeGuruGUIObject):
def _view_updated(self): def _view_updated(self):
self.view.refresh() self.view.refresh()
@property @property
def display(self): def display(self):
return self.app.stat_line return self.app.stat_line
def results_changed(self): def results_changed(self):
self.view.refresh() self.view.refresh()
marking_changed = results_changed marking_changed = results_changed

View File

@ -10,13 +10,15 @@ from xml.etree import ElementTree as ET
from hscommon.util import FileOrPath from hscommon.util import FileOrPath
class IgnoreList: class IgnoreList:
"""An ignore list implementation that is iterable, filterable and exportable to XML. """An ignore list implementation that is iterable, filterable and exportable to XML.
Call Ignore to add an ignore list entry, and AreIgnore to check if 2 items are in the list. Call Ignore to add an ignore list entry, and AreIgnore to check if 2 items are in the list.
When iterated, 2 sized tuples will be returned, the tuples containing 2 items ignored together. When iterated, 2 sized tuples will be returned, the tuples containing 2 items ignored together.
""" """
#---Override
# ---Override
def __init__(self): def __init__(self):
self._ignored = {} self._ignored = {}
self._count = 0 self._count = 0
@ -29,7 +31,7 @@ class IgnoreList:
def __len__(self): def __len__(self):
return self._count return self._count
#---Public # ---Public
def AreIgnored(self, first, second): def AreIgnored(self, first, second):
def do_check(first, second): def do_check(first, second):
try: try:
@ -99,14 +101,14 @@ class IgnoreList:
root = ET.parse(infile).getroot() root = ET.parse(infile).getroot()
except Exception: except Exception:
return return
file_elems = (e for e in root if e.tag == 'file') file_elems = (e for e in root if e.tag == "file")
for fn in file_elems: for fn in file_elems:
file_path = fn.get('path') file_path = fn.get("path")
if not file_path: if not file_path:
continue continue
subfile_elems = (e for e in fn if e.tag == 'file') subfile_elems = (e for e in fn if e.tag == "file")
for sfn in subfile_elems: for sfn in subfile_elems:
subfile_path = sfn.get('path') subfile_path = sfn.get("path")
if subfile_path: if subfile_path:
self.Ignore(file_path, subfile_path) self.Ignore(file_path, subfile_path)
@ -115,15 +117,13 @@ class IgnoreList:
outfile can be a file object or a filename. outfile can be a file object or a filename.
""" """
root = ET.Element('ignore_list') root = ET.Element("ignore_list")
for filename, subfiles in self._ignored.items(): for filename, subfiles in self._ignored.items():
file_node = ET.SubElement(root, 'file') file_node = ET.SubElement(root, "file")
file_node.set('path', filename) file_node.set("path", filename)
for subfilename in subfiles: for subfilename in subfiles:
subfile_node = ET.SubElement(file_node, 'file') subfile_node = ET.SubElement(file_node, "file")
subfile_node.set('path', subfilename) subfile_node.set("path", subfilename)
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
with FileOrPath(outfile, 'wb') as fp: with FileOrPath(outfile, "wb") as fp:
tree.write(fp, encoding='utf-8') tree.write(fp, encoding="utf-8")

View File

@ -2,40 +2,41 @@
# Created On: 2006/02/23 # Created On: 2006/02/23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
class Markable: class Markable:
def __init__(self): def __init__(self):
self.__marked = set() self.__marked = set()
self.__inverted = False self.__inverted = False
#---Virtual # ---Virtual
#About did_mark and did_unmark: They only happen what an object is actually added/removed # About did_mark and did_unmark: They only happen what an object is actually added/removed
# in self.__marked, and is not affected by __inverted. Thus, self.mark while __inverted # in self.__marked, and is not affected by __inverted. Thus, self.mark while __inverted
#is True will launch _DidUnmark. # is True will launch _DidUnmark.
def _did_mark(self, o): def _did_mark(self, o):
pass pass
def _did_unmark(self, o): def _did_unmark(self, o):
pass pass
def _get_markable_count(self): def _get_markable_count(self):
return 0 return 0
def _is_markable(self, o): def _is_markable(self, o):
return True return True
#---Protected # ---Protected
def _remove_mark_flag(self, o): def _remove_mark_flag(self, o):
try: try:
self.__marked.remove(o) self.__marked.remove(o)
self._did_unmark(o) self._did_unmark(o)
except KeyError: except KeyError:
pass pass
#---Public # ---Public
def is_marked(self, o): def is_marked(self, o):
if not self._is_markable(o): if not self._is_markable(o):
return False return False
@ -43,31 +44,31 @@ class Markable:
if self.__inverted: if self.__inverted:
is_marked = not is_marked is_marked = not is_marked
return is_marked return is_marked
def mark(self, o): def mark(self, o):
if self.is_marked(o): if self.is_marked(o):
return False return False
if not self._is_markable(o): if not self._is_markable(o):
return False return False
return self.mark_toggle(o) return self.mark_toggle(o)
def mark_multiple(self, objects): def mark_multiple(self, objects):
for o in objects: for o in objects:
self.mark(o) self.mark(o)
def mark_all(self): def mark_all(self):
self.mark_none() self.mark_none()
self.__inverted = True self.__inverted = True
def mark_invert(self): def mark_invert(self):
self.__inverted = not self.__inverted self.__inverted = not self.__inverted
def mark_none(self): def mark_none(self):
for o in self.__marked: for o in self.__marked:
self._did_unmark(o) self._did_unmark(o)
self.__marked = set() self.__marked = set()
self.__inverted = False self.__inverted = False
def mark_toggle(self, o): def mark_toggle(self, o):
try: try:
self.__marked.remove(o) self.__marked.remove(o)
@ -78,32 +79,33 @@ class Markable:
self.__marked.add(o) self.__marked.add(o)
self._did_mark(o) self._did_mark(o)
return True return True
def mark_toggle_multiple(self, objects): def mark_toggle_multiple(self, objects):
for o in objects: for o in objects:
self.mark_toggle(o) self.mark_toggle(o)
def unmark(self, o): def unmark(self, o):
if not self.is_marked(o): if not self.is_marked(o):
return False return False
return self.mark_toggle(o) return self.mark_toggle(o)
def unmark_multiple(self, objects): def unmark_multiple(self, objects):
for o in objects: for o in objects:
self.unmark(o) self.unmark(o)
#--- Properties # --- Properties
@property @property
def mark_count(self): def mark_count(self):
if self.__inverted: if self.__inverted:
return self._get_markable_count() - len(self.__marked) return self._get_markable_count() - len(self.__marked)
else: else:
return len(self.__marked) return len(self.__marked)
@property @property
def mark_inverted(self): def mark_inverted(self):
return self.__inverted return self.__inverted
class MarkableList(list, Markable): class MarkableList(list, Markable):
def __init__(self): def __init__(self):
list.__init__(self) list.__init__(self)

View File

@ -1 +1 @@
from . import fs, prioritize, result_table, scanner # noqa from . import fs, prioritize, result_table, scanner # noqa

View File

@ -13,25 +13,37 @@ from core.util import format_timestamp, format_perc, format_words, format_dupe_c
from core import fs from core import fs
TAG_FIELDS = { TAG_FIELDS = {
'audiosize', 'duration', 'bitrate', 'samplerate', 'title', 'artist', "audiosize",
'album', 'genre', 'year', 'track', 'comment' "duration",
"bitrate",
"samplerate",
"title",
"artist",
"album",
"genre",
"year",
"track",
"comment",
} }
class MusicFile(fs.File): class MusicFile(fs.File):
INITIAL_INFO = fs.File.INITIAL_INFO.copy() INITIAL_INFO = fs.File.INITIAL_INFO.copy()
INITIAL_INFO.update({ INITIAL_INFO.update(
'audiosize': 0, {
'bitrate': 0, "audiosize": 0,
'duration': 0, "bitrate": 0,
'samplerate': 0, "duration": 0,
'artist': '', "samplerate": 0,
'album': '', "artist": "",
'title': '', "album": "",
'genre': '', "title": "",
'comment': '', "genre": "",
'year': '', "comment": "",
'track': 0, "year": "",
}) "track": 0,
}
)
__slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys()) __slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys())
@classmethod @classmethod
@ -60,26 +72,26 @@ class MusicFile(fs.File):
else: else:
percentage = group.percentage percentage = group.percentage
dupe_count = len(group.dupes) dupe_count = len(group.dupes)
dupe_folder_path = getattr(self, 'display_folder_path', self.folder_path) dupe_folder_path = getattr(self, "display_folder_path", self.folder_path)
return { return {
'name': self.name, "name": self.name,
'folder_path': str(dupe_folder_path), "folder_path": str(dupe_folder_path),
'size': format_size(size, 2, 2, False), "size": format_size(size, 2, 2, False),
'duration': format_time(duration, with_hours=False), "duration": format_time(duration, with_hours=False),
'bitrate': str(bitrate), "bitrate": str(bitrate),
'samplerate': str(samplerate), "samplerate": str(samplerate),
'extension': self.extension, "extension": self.extension,
'mtime': format_timestamp(mtime, delta and m), "mtime": format_timestamp(mtime, delta and m),
'title': self.title, "title": self.title,
'artist': self.artist, "artist": self.artist,
'album': self.album, "album": self.album,
'genre': self.genre, "genre": self.genre,
'year': self.year, "year": self.year,
'track': str(self.track), "track": str(self.track),
'comment': self.comment, "comment": self.comment,
'percentage': format_perc(percentage), "percentage": format_perc(percentage),
'words': format_words(self.words) if hasattr(self, 'words') else '', "words": format_words(self.words) if hasattr(self, "words") else "",
'dupe_count': format_dupe_count(dupe_count), "dupe_count": format_dupe_count(dupe_count),
} }
def _get_md5partial_offset_and_size(self): def _get_md5partial_offset_and_size(self):
@ -101,4 +113,3 @@ class MusicFile(fs.File):
self.comment = f.comment self.comment = f.comment
self.year = f.year self.year = f.year
self.track = f.track self.track = f.track

View File

@ -8,11 +8,16 @@
from hscommon.trans import trget from hscommon.trans import trget
from core.prioritize import ( from core.prioritize import (
KindCategory, FolderCategory, FilenameCategory, NumericalCategory, KindCategory,
SizeCategory, MtimeCategory FolderCategory,
FilenameCategory,
NumericalCategory,
SizeCategory,
MtimeCategory,
) )
coltr = trget('columns') coltr = trget("columns")
class DurationCategory(NumericalCategory): class DurationCategory(NumericalCategory):
NAME = coltr("Duration") NAME = coltr("Duration")
@ -20,21 +25,29 @@ class DurationCategory(NumericalCategory):
def extract_value(self, dupe): def extract_value(self, dupe):
return dupe.duration return dupe.duration
class BitrateCategory(NumericalCategory): class BitrateCategory(NumericalCategory):
NAME = coltr("Bitrate") NAME = coltr("Bitrate")
def extract_value(self, dupe): def extract_value(self, dupe):
return dupe.bitrate return dupe.bitrate
class SamplerateCategory(NumericalCategory): class SamplerateCategory(NumericalCategory):
NAME = coltr("Samplerate") NAME = coltr("Samplerate")
def extract_value(self, dupe): def extract_value(self, dupe):
return dupe.samplerate return dupe.samplerate
def all_categories(): def all_categories():
return [ return [
KindCategory, FolderCategory, FilenameCategory, SizeCategory, DurationCategory, KindCategory,
BitrateCategory, SamplerateCategory, MtimeCategory FolderCategory,
FilenameCategory,
SizeCategory,
DurationCategory,
BitrateCategory,
SamplerateCategory,
MtimeCategory,
] ]

View File

@ -1,8 +1,8 @@
# Created On: 2011-11-27 # Created On: 2011-11-27
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.gui.column import Column from hscommon.gui.column import Column
@ -10,28 +10,29 @@ from hscommon.trans import trget
from core.gui.result_table import ResultTable as ResultTableBase from core.gui.result_table import ResultTable as ResultTableBase
coltr = trget('columns') coltr = trget("columns")
class ResultTable(ResultTableBase): class ResultTable(ResultTableBase):
COLUMNS = [ COLUMNS = [
Column('marked', ''), Column("marked", ""),
Column('name', coltr("Filename")), Column("name", coltr("Filename")),
Column('folder_path', coltr("Folder"), visible=False, optional=True), Column("folder_path", coltr("Folder"), visible=False, optional=True),
Column('size', coltr("Size (MB)"), optional=True), Column("size", coltr("Size (MB)"), optional=True),
Column('duration', coltr("Time"), optional=True), Column("duration", coltr("Time"), optional=True),
Column('bitrate', coltr("Bitrate"), optional=True), Column("bitrate", coltr("Bitrate"), optional=True),
Column('samplerate', coltr("Sample Rate"), visible=False, optional=True), Column("samplerate", coltr("Sample Rate"), visible=False, optional=True),
Column('extension', coltr("Kind"), optional=True), Column("extension", coltr("Kind"), optional=True),
Column('mtime', coltr("Modification"), visible=False, optional=True), Column("mtime", coltr("Modification"), visible=False, optional=True),
Column('title', coltr("Title"), visible=False, optional=True), Column("title", coltr("Title"), visible=False, optional=True),
Column('artist', coltr("Artist"), visible=False, optional=True), Column("artist", coltr("Artist"), visible=False, optional=True),
Column('album', coltr("Album"), visible=False, optional=True), Column("album", coltr("Album"), visible=False, optional=True),
Column('genre', coltr("Genre"), visible=False, optional=True), Column("genre", coltr("Genre"), visible=False, optional=True),
Column('year', coltr("Year"), visible=False, optional=True), Column("year", coltr("Year"), visible=False, optional=True),
Column('track', coltr("Track Number"), visible=False, optional=True), Column("track", coltr("Track Number"), visible=False, optional=True),
Column('comment', coltr("Comment"), visible=False, optional=True), Column("comment", coltr("Comment"), visible=False, optional=True),
Column('percentage', coltr("Match %"), optional=True), Column("percentage", coltr("Match %"), optional=True),
Column('words', coltr("Words Used"), visible=False, optional=True), Column("words", coltr("Words Used"), visible=False, optional=True),
Column('dupe_count', coltr("Dupe Count"), visible=False, optional=True), Column("dupe_count", coltr("Dupe Count"), visible=False, optional=True),
] ]
DELTA_COLUMNS = {'size', 'duration', 'bitrate', 'samplerate', 'mtime'} DELTA_COLUMNS = {"size", "duration", "bitrate", "samplerate", "mtime"}

View File

@ -8,6 +8,7 @@ from hscommon.trans import tr
from core.scanner import Scanner as ScannerBase, ScanOption, ScanType from core.scanner import Scanner as ScannerBase, ScanOption, ScanType
class ScannerME(ScannerBase): class ScannerME(ScannerBase):
@staticmethod @staticmethod
def _key_func(dupe): def _key_func(dupe):
@ -22,5 +23,3 @@ class ScannerME(ScannerBase):
ScanOption(ScanType.Tag, tr("Tags")), ScanOption(ScanType.Tag, tr("Tags")),
ScanOption(ScanType.Contents, tr("Contents")), ScanOption(ScanType.Contents, tr("Contents")),
] ]

View File

@ -1 +1,12 @@
from . import block, cache, exif, iphoto_plist, matchblock, matchexif, photo, prioritize, result_table, scanner # noqa from . import ( # noqa
block,
cache,
exif,
iphoto_plist,
matchblock,
matchexif,
photo,
prioritize,
result_table,
scanner,
)

View File

@ -6,7 +6,7 @@
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from ._block import NoBlocksError, DifferentBlockCountError, avgdiff, getblocks2 # NOQA from ._block import NoBlocksError, DifferentBlockCountError, avgdiff, getblocks2 # NOQA
# Converted to C # Converted to C
# def getblock(image): # def getblock(image):

View File

@ -4,7 +4,8 @@
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from ._cache import string_to_colors # noqa from ._cache import string_to_colors # noqa
def colors_to_string(colors): def colors_to_string(colors):
"""Transform the 3 sized tuples 'colors' into a hex string. """Transform the 3 sized tuples 'colors' into a hex string.
@ -12,7 +13,8 @@ def colors_to_string(colors):
[(0,100,255)] --> 0064ff [(0,100,255)] --> 0064ff
[(1,2,3),(4,5,6)] --> 010203040506 [(1,2,3),(4,5,6)] --> 010203040506
""" """
return ''.join('%02x%02x%02x' % (r, g, b) for r, g, b in colors) return "".join("%02x%02x%02x" % (r, g, b) for r, g, b in colors)
# This function is an important bottleneck of dupeGuru PE. It has been converted to C. # This function is an important bottleneck of dupeGuru PE. It has been converted to C.
# def string_to_colors(s): # def string_to_colors(s):
@ -23,4 +25,3 @@ def colors_to_string(colors):
# number = int(s[i:i+6], 16) # number = int(s[i:i+6], 16)
# result.append((number >> 16, (number >> 8) & 0xff, number & 0xff)) # result.append((number >> 16, (number >> 8) & 0xff, number & 0xff))
# return result # return result

View File

@ -12,29 +12,36 @@ from collections import namedtuple
from .cache import string_to_colors, colors_to_string from .cache import string_to_colors, colors_to_string
def wrap_path(path): def wrap_path(path):
return 'path:{}'.format(path) return "path:{}".format(path)
def unwrap_path(key): def unwrap_path(key):
return key[5:] return key[5:]
def wrap_id(path): def wrap_id(path):
return 'id:{}'.format(path) return "id:{}".format(path)
def unwrap_id(key): def unwrap_id(key):
return int(key[3:]) return int(key[3:])
CacheRow = namedtuple('CacheRow', 'id path blocks mtime')
CacheRow = namedtuple("CacheRow", "id path blocks mtime")
class ShelveCache: class ShelveCache:
"""A class to cache picture blocks in a shelve backend. """A class to cache picture blocks in a shelve backend.
""" """
def __init__(self, db=None, readonly=False): def __init__(self, db=None, readonly=False):
self.istmp = db is None self.istmp = db is None
if self.istmp: if self.istmp:
self.dtmp = tempfile.mkdtemp() self.dtmp = tempfile.mkdtemp()
self.ftmp = db = op.join(self.dtmp, 'tmpdb') self.ftmp = db = op.join(self.dtmp, "tmpdb")
flag = 'r' if readonly else 'c' flag = "r" if readonly else "c"
self.shelve = shelve.open(db, flag) self.shelve = shelve.open(db, flag)
self.maxid = self._compute_maxid() self.maxid = self._compute_maxid()
@ -54,10 +61,10 @@ class ShelveCache:
return string_to_colors(self.shelve[skey].blocks) return string_to_colors(self.shelve[skey].blocks)
def __iter__(self): def __iter__(self):
return (unwrap_path(k) for k in self.shelve if k.startswith('path:')) return (unwrap_path(k) for k in self.shelve if k.startswith("path:"))
def __len__(self): def __len__(self):
return sum(1 for k in self.shelve if k.startswith('path:')) return sum(1 for k in self.shelve if k.startswith("path:"))
def __setitem__(self, path_str, blocks): def __setitem__(self, path_str, blocks):
blocks = colors_to_string(blocks) blocks = colors_to_string(blocks)
@ -74,7 +81,9 @@ class ShelveCache:
self.shelve[wrap_id(rowid)] = wrap_path(path_str) self.shelve[wrap_id(rowid)] = wrap_path(path_str)
def _compute_maxid(self): def _compute_maxid(self):
return max((unwrap_id(k) for k in self.shelve if k.startswith('id:')), default=1) return max(
(unwrap_id(k) for k in self.shelve if k.startswith("id:")), default=1
)
def _get_new_id(self): def _get_new_id(self):
self.maxid += 1 self.maxid += 1
@ -133,4 +142,3 @@ class ShelveCache:
# #402 and #439. I don't think it hurts to silently ignore the error, so that's # #402 and #439. I don't think it hurts to silently ignore the error, so that's
# what we do # what we do
pass pass

View File

@ -11,10 +11,12 @@ import sqlite3 as sqlite
from .cache import string_to_colors, colors_to_string from .cache import string_to_colors, colors_to_string
class SqliteCache: class SqliteCache:
"""A class to cache picture blocks in a sqlite backend. """A class to cache picture blocks in a sqlite backend.
""" """
def __init__(self, db=':memory:', readonly=False):
def __init__(self, db=":memory:", readonly=False):
# readonly is not used in the sqlite version of the cache # readonly is not used in the sqlite version of the cache
self.dbname = db self.dbname = db
self.con = None self.con = None
@ -67,34 +69,40 @@ class SqliteCache:
try: try:
self.con.execute(sql, [blocks, mtime, path_str]) self.con.execute(sql, [blocks, mtime, path_str])
except sqlite.OperationalError: except sqlite.OperationalError:
logging.warning('Picture cache could not set value for key %r', path_str) logging.warning("Picture cache could not set value for key %r", path_str)
except sqlite.DatabaseError as e: except sqlite.DatabaseError as e:
logging.warning('DatabaseError while setting value for key %r: %s', path_str, str(e)) logging.warning(
"DatabaseError while setting value for key %r: %s", path_str, str(e)
)
def _create_con(self, second_try=False): def _create_con(self, second_try=False):
def create_tables(): def create_tables():
logging.debug("Creating picture cache tables.") logging.debug("Creating picture cache tables.")
self.con.execute("drop table if exists pictures") self.con.execute("drop table if exists pictures")
self.con.execute("drop index if exists idx_path") self.con.execute("drop index if exists idx_path")
self.con.execute("create table pictures(path TEXT, mtime INTEGER, blocks TEXT)") self.con.execute(
"create table pictures(path TEXT, mtime INTEGER, blocks TEXT)"
)
self.con.execute("create index idx_path on pictures (path)") self.con.execute("create index idx_path on pictures (path)")
self.con = sqlite.connect(self.dbname, isolation_level=None) self.con = sqlite.connect(self.dbname, isolation_level=None)
try: try:
self.con.execute("select path, mtime, blocks from pictures where 1=2") self.con.execute("select path, mtime, blocks from pictures where 1=2")
except sqlite.OperationalError: # new db except sqlite.OperationalError: # new db
create_tables() create_tables()
except sqlite.DatabaseError as e: # corrupted db except sqlite.DatabaseError as e: # corrupted db
if second_try: if second_try:
raise # Something really strange is happening raise # Something really strange is happening
logging.warning('Could not create picture cache because of an error: %s', str(e)) logging.warning(
"Could not create picture cache because of an error: %s", str(e)
)
self.con.close() self.con.close()
os.remove(self.dbname) os.remove(self.dbname)
self._create_con(second_try=True) self._create_con(second_try=True)
def clear(self): def clear(self):
self.close() self.close()
if self.dbname != ':memory:': if self.dbname != ":memory:":
os.remove(self.dbname) os.remove(self.dbname)
self._create_con() self._create_con()
@ -117,7 +125,9 @@ class SqliteCache:
raise ValueError(path) raise ValueError(path)
def get_multiple(self, rowids): def get_multiple(self, rowids):
sql = "select rowid, blocks from pictures where rowid in (%s)" % ','.join(map(str, rowids)) sql = "select rowid, blocks from pictures where rowid in (%s)" % ",".join(
map(str, rowids)
)
cur = self.con.execute(sql) cur = self.con.execute(sql)
return ((rowid, string_to_colors(blocks)) for rowid, blocks in cur) return ((rowid, string_to_colors(blocks)) for rowid, blocks in cur)
@ -138,6 +148,7 @@ class SqliteCache:
continue continue
todelete.append(rowid) todelete.append(rowid)
if todelete: if todelete:
sql = "delete from pictures where rowid in (%s)" % ','.join(map(str, todelete)) sql = "delete from pictures where rowid in (%s)" % ",".join(
map(str, todelete)
)
self.con.execute(sql) self.con.execute(sql)

View File

@ -83,17 +83,17 @@ EXIF_TAGS = {
0xA003: "PixelYDimension", 0xA003: "PixelYDimension",
0xA004: "RelatedSoundFile", 0xA004: "RelatedSoundFile",
0xA005: "InteroperabilityIFDPointer", 0xA005: "InteroperabilityIFDPointer",
0xA20B: "FlashEnergy", # 0x920B in TIFF/EP 0xA20B: "FlashEnergy", # 0x920B in TIFF/EP
0xA20C: "SpatialFrequencyResponse", # 0x920C - - 0xA20C: "SpatialFrequencyResponse", # 0x920C - -
0xA20E: "FocalPlaneXResolution", # 0x920E - - 0xA20E: "FocalPlaneXResolution", # 0x920E - -
0xA20F: "FocalPlaneYResolution", # 0x920F - - 0xA20F: "FocalPlaneYResolution", # 0x920F - -
0xA210: "FocalPlaneResolutionUnit", # 0x9210 - - 0xA210: "FocalPlaneResolutionUnit", # 0x9210 - -
0xA214: "SubjectLocation", # 0x9214 - - 0xA214: "SubjectLocation", # 0x9214 - -
0xA215: "ExposureIndex", # 0x9215 - - 0xA215: "ExposureIndex", # 0x9215 - -
0xA217: "SensingMethod", # 0x9217 - - 0xA217: "SensingMethod", # 0x9217 - -
0xA300: "FileSource", 0xA300: "FileSource",
0xA301: "SceneType", 0xA301: "SceneType",
0xA302: "CFAPattern", # 0x828E in TIFF/EP 0xA302: "CFAPattern", # 0x828E in TIFF/EP
0xA401: "CustomRendered", 0xA401: "CustomRendered",
0xA402: "ExposureMode", 0xA402: "ExposureMode",
0xA403: "WhiteBalance", 0xA403: "WhiteBalance",
@ -148,17 +148,18 @@ GPS_TA0GS = {
0x1B: "GPSProcessingMethod", 0x1B: "GPSProcessingMethod",
0x1C: "GPSAreaInformation", 0x1C: "GPSAreaInformation",
0x1D: "GPSDateStamp", 0x1D: "GPSDateStamp",
0x1E: "GPSDifferential" 0x1E: "GPSDifferential",
} }
INTEL_ENDIAN = ord('I') INTEL_ENDIAN = ord("I")
MOTOROLA_ENDIAN = ord('M') MOTOROLA_ENDIAN = ord("M")
# About MAX_COUNT: It's possible to have corrupted exif tags where the entry count is way too high # About MAX_COUNT: It's possible to have corrupted exif tags where the entry count is way too high
# and thus makes us loop, not endlessly, but for heck of a long time for nothing. Therefore, we put # and thus makes us loop, not endlessly, but for heck of a long time for nothing. Therefore, we put
# an arbitrary limit on the entry count we'll allow ourselves to read and any IFD reporting more # an arbitrary limit on the entry count we'll allow ourselves to read and any IFD reporting more
# entries than that will be considered corrupt. # entries than that will be considered corrupt.
MAX_COUNT = 0xffff MAX_COUNT = 0xFFFF
def s2n_motorola(bytes): def s2n_motorola(bytes):
x = 0 x = 0
@ -166,6 +167,7 @@ def s2n_motorola(bytes):
x = (x << 8) | c x = (x << 8) | c
return x return x
def s2n_intel(bytes): def s2n_intel(bytes):
x = 0 x = 0
y = 0 y = 0
@ -174,13 +176,14 @@ def s2n_intel(bytes):
y = y + 8 y = y + 8
return x return x
class Fraction: class Fraction:
def __init__(self, num, den): def __init__(self, num, den):
self.num = num self.num = num
self.den = den self.den = den
def __repr__(self): def __repr__(self):
return '%d/%d' % (self.num, self.den) return "%d/%d" % (self.num, self.den)
class TIFF_file: class TIFF_file:
@ -190,16 +193,22 @@ class TIFF_file:
self.s2nfunc = s2n_intel if self.endian == INTEL_ENDIAN else s2n_motorola self.s2nfunc = s2n_intel if self.endian == INTEL_ENDIAN else s2n_motorola
def s2n(self, offset, length, signed=0, debug=False): def s2n(self, offset, length, signed=0, debug=False):
slice = self.data[offset:offset+length] slice = self.data[offset : offset + length]
val = self.s2nfunc(slice) val = self.s2nfunc(slice)
# Sign extension ? # Sign extension ?
if signed: if signed:
msb = 1 << (8*length - 1) msb = 1 << (8 * length - 1)
if val & msb: if val & msb:
val = val - (msb << 1) val = val - (msb << 1)
if debug: if debug:
logging.debug(self.endian) logging.debug(self.endian)
logging.debug("Slice for offset %d length %d: %r and value: %d", offset, length, slice, val) logging.debug(
"Slice for offset %d length %d: %r and value: %d",
offset,
length,
slice,
val,
)
return val return val
def first_IFD(self): def first_IFD(self):
@ -225,30 +234,31 @@ class TIFF_file:
return [] return []
a = [] a = []
for i in range(entries): for i in range(entries):
entry = ifd + 2 + 12*i entry = ifd + 2 + 12 * i
tag = self.s2n(entry, 2) tag = self.s2n(entry, 2)
type = self.s2n(entry+2, 2) type = self.s2n(entry + 2, 2)
if not 1 <= type <= 10: if not 1 <= type <= 10:
continue # not handled continue # not handled
typelen = [1, 1, 2, 4, 8, 1, 1, 2, 4, 8][type-1] typelen = [1, 1, 2, 4, 8, 1, 1, 2, 4, 8][type - 1]
count = self.s2n(entry+4, 4) count = self.s2n(entry + 4, 4)
if count > MAX_COUNT: if count > MAX_COUNT:
logging.debug("Probably corrupt. Aborting.") logging.debug("Probably corrupt. Aborting.")
return [] return []
offset = entry+8 offset = entry + 8
if count*typelen > 4: if count * typelen > 4:
offset = self.s2n(offset, 4) offset = self.s2n(offset, 4)
if type == 2: if type == 2:
# Special case: nul-terminated ASCII string # Special case: nul-terminated ASCII string
values = str(self.data[offset:offset+count-1], encoding='latin-1') values = str(self.data[offset : offset + count - 1], encoding="latin-1")
else: else:
values = [] values = []
signed = (type == 6 or type >= 8) signed = type == 6 or type >= 8
for j in range(count): for j in range(count):
if type in {5, 10}: if type in {5, 10}:
# The type is either 5 or 10 # The type is either 5 or 10
value_j = Fraction(self.s2n(offset, 4, signed), value_j = Fraction(
self.s2n(offset+4, 4, signed)) self.s2n(offset, 4, signed), self.s2n(offset + 4, 4, signed)
)
else: else:
# Not a fraction # Not a fraction
value_j = self.s2n(offset, typelen, signed) value_j = self.s2n(offset, typelen, signed)
@ -258,32 +268,37 @@ class TIFF_file:
a.append((tag, type, values)) a.append((tag, type, values))
return a return a
def read_exif_header(fp): def read_exif_header(fp):
# If `fp`'s first bytes are not exif, it tries to find it in the next 4kb # If `fp`'s first bytes are not exif, it tries to find it in the next 4kb
def isexif(data): def isexif(data):
return data[0:4] == b'\377\330\377\341' and data[6:10] == b'Exif' return data[0:4] == b"\377\330\377\341" and data[6:10] == b"Exif"
data = fp.read(12) data = fp.read(12)
if isexif(data): if isexif(data):
return data return data
# ok, not exif, try to find it # ok, not exif, try to find it
large_data = fp.read(4096) large_data = fp.read(4096)
try: try:
index = large_data.index(b'Exif') index = large_data.index(b"Exif")
data = large_data[index-6:index+6] data = large_data[index - 6 : index + 6]
# large_data omits the first 12 bytes, and the index is at the middle of the header, so we # large_data omits the first 12 bytes, and the index is at the middle of the header, so we
# must seek index + 18 # must seek index + 18
fp.seek(index+18) fp.seek(index + 18)
return data return data
except ValueError: except ValueError:
raise ValueError("Not an Exif file") raise ValueError("Not an Exif file")
def get_fields(fp): def get_fields(fp):
data = read_exif_header(fp) data = read_exif_header(fp)
length = data[4] * 256 + data[5] length = data[4] * 256 + data[5]
logging.debug("Exif header length: %d bytes", length) logging.debug("Exif header length: %d bytes", length)
data = fp.read(length-8) data = fp.read(length - 8)
data_format = data[0] data_format = data[0]
logging.debug("%s format", {INTEL_ENDIAN: 'Intel', MOTOROLA_ENDIAN: 'Motorola'}[data_format]) logging.debug(
"%s format", {INTEL_ENDIAN: "Intel", MOTOROLA_ENDIAN: "Motorola"}[data_format]
)
T = TIFF_file(data) T = TIFF_file(data)
# There may be more than one IFD per file, but we only read the first one because others are # There may be more than one IFD per file, but we only read the first one because others are
# most likely thumbnails. # most likely thumbnails.
@ -294,9 +309,9 @@ def get_fields(fp):
try: try:
stag = EXIF_TAGS[tag] stag = EXIF_TAGS[tag]
except KeyError: except KeyError:
stag = '0x%04X' % tag stag = "0x%04X" % tag
if stag in result: if stag in result:
return # don't overwrite data return # don't overwrite data
result[stag] = values result[stag] = values
logging.debug("IFD at offset %d", main_IFD_offset) logging.debug("IFD at offset %d", main_IFD_offset)

View File

@ -1,24 +1,26 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2014-03-15 # Created On: 2014-03-15
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import plistlib import plistlib
class IPhotoPlistParser(plistlib._PlistParser): class IPhotoPlistParser(plistlib._PlistParser):
"""A parser for iPhoto plists. """A parser for iPhoto plists.
iPhoto plists tend to be malformed, so we have to subclass the built-in parser to be a bit more iPhoto plists tend to be malformed, so we have to subclass the built-in parser to be a bit more
lenient. lenient.
""" """
def __init__(self): def __init__(self):
plistlib._PlistParser.__init__(self, use_builtin_types=True, dict_type=dict) plistlib._PlistParser.__init__(self, use_builtin_types=True, dict_type=dict)
# For debugging purposes, we remember the last bit of data to be analyzed so that we can # For debugging purposes, we remember the last bit of data to be analyzed so that we can
# log it in case of an exception # log it in case of an exception
self.lastdata = '' self.lastdata = ""
def get_data(self): def get_data(self):
self.lastdata = plistlib._PlistParser.get_data(self) self.lastdata = plistlib._PlistParser.get_data(self)

View File

@ -48,14 +48,18 @@ except Exception:
logging.warning("Had problems to determine cpu count on launch.") logging.warning("Had problems to determine cpu count on launch.")
RESULTS_QUEUE_LIMIT = 8 RESULTS_QUEUE_LIMIT = 8
def get_cache(cache_path, readonly=False): def get_cache(cache_path, readonly=False):
if cache_path.endswith('shelve'): if cache_path.endswith("shelve"):
from .cache_shelve import ShelveCache from .cache_shelve import ShelveCache
return ShelveCache(cache_path, readonly=readonly) return ShelveCache(cache_path, readonly=readonly)
else: else:
from .cache_sqlite import SqliteCache from .cache_sqlite import SqliteCache
return SqliteCache(cache_path, readonly=readonly) return SqliteCache(cache_path, readonly=readonly)
def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob): def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
# The MemoryError handlers in there use logging without first caring about whether or not # The MemoryError handlers in there use logging without first caring about whether or not
# there is enough memory left to carry on the operation because it is assumed that the # there is enough memory left to carry on the operation because it is assumed that the
@ -63,7 +67,7 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
# time that MemoryError is raised. # time that MemoryError is raised.
cache = get_cache(cache_path) cache = get_cache(cache_path)
cache.purge_outdated() cache.purge_outdated()
prepared = [] # only pictures for which there was no error getting blocks prepared = [] # only pictures for which there was no error getting blocks
try: try:
for picture in j.iter_with_progress(pictures, tr("Analyzed %d/%d pictures")): for picture in j.iter_with_progress(pictures, tr("Analyzed %d/%d pictures")):
if not picture.path: if not picture.path:
@ -77,7 +81,7 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
picture.unicode_path = str(picture.path) picture.unicode_path = str(picture.path)
logging.debug("Analyzing picture at %s", picture.unicode_path) logging.debug("Analyzing picture at %s", picture.unicode_path)
if with_dimensions: if with_dimensions:
picture.dimensions # pre-read dimensions picture.dimensions # pre-read dimensions
try: try:
if picture.unicode_path not in cache: if picture.unicode_path not in cache:
blocks = picture.get_blocks(BLOCK_COUNT_PER_SIDE) blocks = picture.get_blocks(BLOCK_COUNT_PER_SIDE)
@ -86,32 +90,45 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
except (IOError, ValueError) as e: except (IOError, ValueError) as e:
logging.warning(str(e)) logging.warning(str(e))
except MemoryError: except MemoryError:
logging.warning("Ran out of memory while reading %s of size %d", picture.unicode_path, picture.size) logging.warning(
if picture.size < 10 * 1024 * 1024: # We're really running out of memory "Ran out of memory while reading %s of size %d",
picture.unicode_path,
picture.size,
)
if (
picture.size < 10 * 1024 * 1024
): # We're really running out of memory
raise raise
except MemoryError: except MemoryError:
logging.warning('Ran out of memory while preparing pictures') logging.warning("Ran out of memory while preparing pictures")
cache.close() cache.close()
return prepared return prepared
def get_chunks(pictures): def get_chunks(pictures):
min_chunk_count = multiprocessing.cpu_count() * 2 # have enough chunks to feed all subprocesses min_chunk_count = (
multiprocessing.cpu_count() * 2
) # have enough chunks to feed all subprocesses
chunk_count = len(pictures) // DEFAULT_CHUNK_SIZE chunk_count = len(pictures) // DEFAULT_CHUNK_SIZE
chunk_count = max(min_chunk_count, chunk_count) chunk_count = max(min_chunk_count, chunk_count)
chunk_size = (len(pictures) // chunk_count) + 1 chunk_size = (len(pictures) // chunk_count) + 1
chunk_size = max(MIN_CHUNK_SIZE, chunk_size) chunk_size = max(MIN_CHUNK_SIZE, chunk_size)
logging.info( logging.info(
"Creating %d chunks with a chunk size of %d for %d pictures", chunk_count, "Creating %d chunks with a chunk size of %d for %d pictures",
chunk_size, len(pictures) chunk_count,
chunk_size,
len(pictures),
) )
chunks = [pictures[i:i+chunk_size] for i in range(0, len(pictures), chunk_size)] chunks = [pictures[i : i + chunk_size] for i in range(0, len(pictures), chunk_size)]
return chunks return chunks
def get_match(first, second, percentage): def get_match(first, second, percentage):
if percentage < 0: if percentage < 0:
percentage = 0 percentage = 0
return Match(first, second, percentage) return Match(first, second, percentage)
def async_compare(ref_ids, other_ids, dbname, threshold, picinfo): def async_compare(ref_ids, other_ids, dbname, threshold, picinfo):
# The list of ids in ref_ids have to be compared to the list of ids in other_ids. other_ids # The list of ids in ref_ids have to be compared to the list of ids in other_ids. other_ids
# can be None. In this case, ref_ids has to be compared with itself # can be None. In this case, ref_ids has to be compared with itself
@ -142,6 +159,7 @@ def async_compare(ref_ids, other_ids, dbname, threshold, picinfo):
cache.close() cache.close()
return results return results
def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljob): def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljob):
def get_picinfo(p): def get_picinfo(p):
if match_scaled: if match_scaled:
@ -160,11 +178,16 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
async_results.remove(result) async_results.remove(result)
comparison_count += 1 comparison_count += 1
# About the NOQA below: I think there's a bug in pyflakes. To investigate... # About the NOQA below: I think there's a bug in pyflakes. To investigate...
progress_msg = tr("Performed %d/%d chunk matches") % (comparison_count, len(comparisons_to_do)) # NOQA progress_msg = tr("Performed %d/%d chunk matches") % (
comparison_count,
len(comparisons_to_do),
) # NOQA
j.set_progress(comparison_count, progress_msg) j.set_progress(comparison_count, progress_msg)
j = j.start_subjob([3, 7]) j = j.start_subjob([3, 7])
pictures = prepare_pictures(pictures, cache_path, with_dimensions=not match_scaled, j=j) pictures = prepare_pictures(
pictures, cache_path, with_dimensions=not match_scaled, j=j
)
j = j.start_subjob([9, 1], tr("Preparing for matching")) j = j.start_subjob([9, 1], tr("Preparing for matching"))
cache = get_cache(cache_path) cache = get_cache(cache_path)
id2picture = {} id2picture = {}
@ -175,7 +198,7 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
except ValueError: except ValueError:
pass pass
cache.close() cache.close()
pictures = [p for p in pictures if hasattr(p, 'cache_id')] pictures = [p for p in pictures if hasattr(p, "cache_id")]
pool = multiprocessing.Pool() pool = multiprocessing.Pool()
async_results = [] async_results = []
matches = [] matches = []
@ -203,9 +226,17 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
# some wiggle room, log about the incident, and stop matching right here. We then process # some wiggle room, log about the incident, and stop matching right here. We then process
# the matches we have. The rest of the process doesn't allocate much and we should be # the matches we have. The rest of the process doesn't allocate much and we should be
# alright. # alright.
del comparisons_to_do, chunks, pictures # some wiggle room for the next statements del (
logging.warning("Ran out of memory when scanning! We had %d matches.", len(matches)) comparisons_to_do,
del matches[-len(matches)//3:] # some wiggle room to ensure we don't run out of memory again. chunks,
pictures,
) # some wiggle room for the next statements
logging.warning(
"Ran out of memory when scanning! We had %d matches.", len(matches)
)
del matches[
-len(matches) // 3 :
] # some wiggle room to ensure we don't run out of memory again.
pool.close() pool.close()
result = [] result = []
myiter = j.iter_with_progress( myiter = j.iter_with_progress(
@ -220,10 +251,10 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
if percentage == 100 and ref.md5 != other.md5: if percentage == 100 and ref.md5 != other.md5:
percentage = 99 percentage = 99
if percentage >= threshold: if percentage >= threshold:
ref.dimensions # pre-read dimensions for display in results ref.dimensions # pre-read dimensions for display in results
other.dimensions other.dimensions
result.append(get_match(ref, other, percentage)) result.append(get_match(ref, other, percentage))
return result return result
multiprocessing.freeze_support()
multiprocessing.freeze_support()

View File

@ -13,14 +13,15 @@ from hscommon.trans import tr
from core.engine import Match from core.engine import Match
def getmatches(files, match_scaled, j): def getmatches(files, match_scaled, j):
timestamp2pic = defaultdict(set) timestamp2pic = defaultdict(set)
for picture in j.iter_with_progress(files, tr("Read EXIF of %d/%d pictures")): for picture in j.iter_with_progress(files, tr("Read EXIF of %d/%d pictures")):
timestamp = picture.exif_timestamp timestamp = picture.exif_timestamp
if timestamp: if timestamp:
timestamp2pic[timestamp].add(picture) timestamp2pic[timestamp].add(picture)
if '0000:00:00 00:00:00' in timestamp2pic: # very likely false matches if "0000:00:00 00:00:00" in timestamp2pic: # very likely false matches
del timestamp2pic['0000:00:00 00:00:00'] del timestamp2pic["0000:00:00 00:00:00"]
matches = [] matches = []
for pictures in timestamp2pic.values(): for pictures in timestamp2pic.values():
for p1, p2 in combinations(pictures, 2): for p1, p2 in combinations(pictures, 2):
@ -28,4 +29,3 @@ def getmatches(files, match_scaled, j):
continue continue
matches.append(Match(p1, p2, 100)) matches.append(Match(p1, p2, 100))
return matches return matches

View File

@ -14,23 +14,22 @@ from . import exif
# This global value is set by the platform-specific subclasser of the Photo base class # This global value is set by the platform-specific subclasser of the Photo base class
PLAT_SPECIFIC_PHOTO_CLASS = None PLAT_SPECIFIC_PHOTO_CLASS = None
def format_dimensions(dimensions): def format_dimensions(dimensions):
return '%d x %d' % (dimensions[0], dimensions[1]) return "%d x %d" % (dimensions[0], dimensions[1])
def get_delta_dimensions(value, ref_value): def get_delta_dimensions(value, ref_value):
return (value[0]-ref_value[0], value[1]-ref_value[1]) return (value[0] - ref_value[0], value[1] - ref_value[1])
class Photo(fs.File): class Photo(fs.File):
INITIAL_INFO = fs.File.INITIAL_INFO.copy() INITIAL_INFO = fs.File.INITIAL_INFO.copy()
INITIAL_INFO.update({ INITIAL_INFO.update({"dimensions": (0, 0), "exif_timestamp": ""})
'dimensions': (0, 0),
'exif_timestamp': '',
})
__slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys()) __slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys())
# These extensions are supported on all platforms # These extensions are supported on all platforms
HANDLED_EXTS = {'png', 'jpg', 'jpeg', 'gif', 'bmp', 'tiff', 'tif'} HANDLED_EXTS = {"png", "jpg", "jpeg", "gif", "bmp", "tiff", "tif"}
def _plat_get_dimensions(self): def _plat_get_dimensions(self):
raise NotImplementedError() raise NotImplementedError()
@ -39,25 +38,25 @@ class Photo(fs.File):
raise NotImplementedError() raise NotImplementedError()
def _get_orientation(self): def _get_orientation(self):
if not hasattr(self, '_cached_orientation'): if not hasattr(self, "_cached_orientation"):
try: try:
with self.path.open('rb') as fp: with self.path.open("rb") as fp:
exifdata = exif.get_fields(fp) exifdata = exif.get_fields(fp)
# the value is a list (probably one-sized) of ints # the value is a list (probably one-sized) of ints
orientations = exifdata['Orientation'] orientations = exifdata["Orientation"]
self._cached_orientation = orientations[0] self._cached_orientation = orientations[0]
except Exception: # Couldn't read EXIF data, no transforms except Exception: # Couldn't read EXIF data, no transforms
self._cached_orientation = 0 self._cached_orientation = 0
return self._cached_orientation return self._cached_orientation
def _get_exif_timestamp(self): def _get_exif_timestamp(self):
try: try:
with self.path.open('rb') as fp: with self.path.open("rb") as fp:
exifdata = exif.get_fields(fp) exifdata = exif.get_fields(fp)
return exifdata['DateTimeOriginal'] return exifdata["DateTimeOriginal"]
except Exception: except Exception:
logging.info("Couldn't read EXIF of picture: %s", self.path) logging.info("Couldn't read EXIF of picture: %s", self.path)
return '' return ""
@classmethod @classmethod
def can_handle(cls, path): def can_handle(cls, path):
@ -79,28 +78,27 @@ class Photo(fs.File):
else: else:
percentage = group.percentage percentage = group.percentage
dupe_count = len(group.dupes) dupe_count = len(group.dupes)
dupe_folder_path = getattr(self, 'display_folder_path', self.folder_path) dupe_folder_path = getattr(self, "display_folder_path", self.folder_path)
return { return {
'name': self.name, "name": self.name,
'folder_path': str(dupe_folder_path), "folder_path": str(dupe_folder_path),
'size': format_size(size, 0, 1, False), "size": format_size(size, 0, 1, False),
'extension': self.extension, "extension": self.extension,
'dimensions': format_dimensions(dimensions), "dimensions": format_dimensions(dimensions),
'exif_timestamp': self.exif_timestamp, "exif_timestamp": self.exif_timestamp,
'mtime': format_timestamp(mtime, delta and m), "mtime": format_timestamp(mtime, delta and m),
'percentage': format_perc(percentage), "percentage": format_perc(percentage),
'dupe_count': format_dupe_count(dupe_count), "dupe_count": format_dupe_count(dupe_count),
} }
def _read_info(self, field): def _read_info(self, field):
fs.File._read_info(self, field) fs.File._read_info(self, field)
if field == 'dimensions': if field == "dimensions":
self.dimensions = self._plat_get_dimensions() self.dimensions = self._plat_get_dimensions()
if self._get_orientation() in {5, 6, 7, 8}: if self._get_orientation() in {5, 6, 7, 8}:
self.dimensions = (self.dimensions[1], self.dimensions[0]) self.dimensions = (self.dimensions[1], self.dimensions[0])
elif field == 'exif_timestamp': elif field == "exif_timestamp":
self.exif_timestamp = self._get_exif_timestamp() self.exif_timestamp = self._get_exif_timestamp()
def get_blocks(self, block_count_per_side): def get_blocks(self, block_count_per_side):
return self._plat_get_blocks(block_count_per_side, self._get_orientation()) return self._plat_get_blocks(block_count_per_side, self._get_orientation())

View File

@ -8,11 +8,16 @@
from hscommon.trans import trget from hscommon.trans import trget
from core.prioritize import ( from core.prioritize import (
KindCategory, FolderCategory, FilenameCategory, NumericalCategory, KindCategory,
SizeCategory, MtimeCategory FolderCategory,
FilenameCategory,
NumericalCategory,
SizeCategory,
MtimeCategory,
) )
coltr = trget('columns') coltr = trget("columns")
class DimensionsCategory(NumericalCategory): class DimensionsCategory(NumericalCategory):
NAME = coltr("Dimensions") NAME = coltr("Dimensions")
@ -24,8 +29,13 @@ class DimensionsCategory(NumericalCategory):
width, height = value width, height = value
return (-width, -height) return (-width, -height)
def all_categories(): def all_categories():
return [ return [
KindCategory, FolderCategory, FilenameCategory, SizeCategory, DimensionsCategory, KindCategory,
MtimeCategory FolderCategory,
FilenameCategory,
SizeCategory,
DimensionsCategory,
MtimeCategory,
] ]

View File

@ -1,8 +1,8 @@
# Created On: 2011-11-27 # Created On: 2011-11-27
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.gui.column import Column from hscommon.gui.column import Column
@ -10,19 +10,20 @@ from hscommon.trans import trget
from core.gui.result_table import ResultTable as ResultTableBase from core.gui.result_table import ResultTable as ResultTableBase
coltr = trget('columns') coltr = trget("columns")
class ResultTable(ResultTableBase): class ResultTable(ResultTableBase):
COLUMNS = [ COLUMNS = [
Column('marked', ''), Column("marked", ""),
Column('name', coltr("Filename")), Column("name", coltr("Filename")),
Column('folder_path', coltr("Folder"), optional=True), Column("folder_path", coltr("Folder"), optional=True),
Column('size', coltr("Size (KB)"), optional=True), Column("size", coltr("Size (KB)"), optional=True),
Column('extension', coltr("Kind"), visible=False, optional=True), Column("extension", coltr("Kind"), visible=False, optional=True),
Column('dimensions', coltr("Dimensions"), optional=True), Column("dimensions", coltr("Dimensions"), optional=True),
Column('exif_timestamp', coltr("EXIF Timestamp"), visible=False, optional=True), Column("exif_timestamp", coltr("EXIF Timestamp"), visible=False, optional=True),
Column('mtime', coltr("Modification"), visible=False, optional=True), Column("mtime", coltr("Modification"), visible=False, optional=True),
Column('percentage', coltr("Match %"), optional=True), Column("percentage", coltr("Match %"), optional=True),
Column('dupe_count', coltr("Dupe Count"), visible=False, optional=True), Column("dupe_count", coltr("Dupe Count"), visible=False, optional=True),
] ]
DELTA_COLUMNS = {'size', 'dimensions', 'mtime'} DELTA_COLUMNS = {"size", "dimensions", "mtime"}

View File

@ -10,6 +10,7 @@ from core.scanner import Scanner, ScanType, ScanOption
from . import matchblock, matchexif from . import matchblock, matchexif
class ScannerPE(Scanner): class ScannerPE(Scanner):
cache_path = None cache_path = None
match_scaled = False match_scaled = False
@ -28,10 +29,9 @@ class ScannerPE(Scanner):
cache_path=self.cache_path, cache_path=self.cache_path,
threshold=self.min_match_percentage, threshold=self.min_match_percentage,
match_scaled=self.match_scaled, match_scaled=self.match_scaled,
j=j j=j,
) )
elif self.scan_type == ScanType.ExifTimestamp: elif self.scan_type == ScanType.ExifTimestamp:
return matchexif.getmatches(files, self.match_scaled, j) return matchexif.getmatches(files, self.match_scaled, j)
else: else:
raise Exception("Invalid scan type") raise Exception("Invalid scan type")

View File

@ -1,48 +1,50 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2011/09/07 # Created On: 2011/09/07
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.util import dedupe, flatten, rem_file_ext from hscommon.util import dedupe, flatten, rem_file_ext
from hscommon.trans import trget, tr from hscommon.trans import trget, tr
coltr = trget('columns') coltr = trget("columns")
class CriterionCategory: class CriterionCategory:
NAME = "Undefined" NAME = "Undefined"
def __init__(self, results): def __init__(self, results):
self.results = results self.results = results
#--- Virtual # --- Virtual
def extract_value(self, dupe): def extract_value(self, dupe):
raise NotImplementedError() raise NotImplementedError()
def format_criterion_value(self, value): def format_criterion_value(self, value):
return value return value
def sort_key(self, dupe, crit_value): def sort_key(self, dupe, crit_value):
raise NotImplementedError() raise NotImplementedError()
def criteria_list(self): def criteria_list(self):
raise NotImplementedError() raise NotImplementedError()
class Criterion: class Criterion:
def __init__(self, category, value): def __init__(self, category, value):
self.category = category self.category = category
self.value = value self.value = value
self.display_value = category.format_criterion_value(value) self.display_value = category.format_criterion_value(value)
def sort_key(self, dupe): def sort_key(self, dupe):
return self.category.sort_key(dupe, self.value) return self.category.sort_key(dupe, self.value)
@property @property
def display(self): def display(self):
return "{} ({})".format(self.category.NAME, self.display_value) return "{} ({})".format(self.category.NAME, self.display_value)
class ValueListCategory(CriterionCategory): class ValueListCategory(CriterionCategory):
def sort_key(self, dupe, crit_value): def sort_key(self, dupe, crit_value):
@ -52,45 +54,47 @@ class ValueListCategory(CriterionCategory):
return 0 return 0
else: else:
return 1 return 1
def criteria_list(self): def criteria_list(self):
dupes = flatten(g[:] for g in self.results.groups) dupes = flatten(g[:] for g in self.results.groups)
values = sorted(dedupe(self.extract_value(d) for d in dupes)) values = sorted(dedupe(self.extract_value(d) for d in dupes))
return [Criterion(self, value) for value in values] return [Criterion(self, value) for value in values]
class KindCategory(ValueListCategory): class KindCategory(ValueListCategory):
NAME = coltr("Kind") NAME = coltr("Kind")
def extract_value(self, dupe): def extract_value(self, dupe):
value = dupe.extension value = dupe.extension
if not value: if not value:
value = tr("None") value = tr("None")
return value return value
class FolderCategory(ValueListCategory): class FolderCategory(ValueListCategory):
NAME = coltr("Folder") NAME = coltr("Folder")
def extract_value(self, dupe): def extract_value(self, dupe):
return dupe.folder_path return dupe.folder_path
def format_criterion_value(self, value): def format_criterion_value(self, value):
return str(value) return str(value)
def sort_key(self, dupe, crit_value): def sort_key(self, dupe, crit_value):
value = self.extract_value(dupe) value = self.extract_value(dupe)
if value[:len(crit_value)] == crit_value: if value[: len(crit_value)] == crit_value:
return 0 return 0
else: else:
return 1 return 1
class FilenameCategory(CriterionCategory): class FilenameCategory(CriterionCategory):
NAME = coltr("Filename") NAME = coltr("Filename")
ENDS_WITH_NUMBER = 0 ENDS_WITH_NUMBER = 0
DOESNT_END_WITH_NUMBER = 1 DOESNT_END_WITH_NUMBER = 1
LONGEST = 2 LONGEST = 2
SHORTEST = 3 SHORTEST = 3
def format_criterion_value(self, value): def format_criterion_value(self, value):
return { return {
self.ENDS_WITH_NUMBER: tr("Ends with number"), self.ENDS_WITH_NUMBER: tr("Ends with number"),
@ -98,10 +102,10 @@ class FilenameCategory(CriterionCategory):
self.LONGEST: tr("Longest"), self.LONGEST: tr("Longest"),
self.SHORTEST: tr("Shortest"), self.SHORTEST: tr("Shortest"),
}[value] }[value]
def extract_value(self, dupe): def extract_value(self, dupe):
return rem_file_ext(dupe.name) return rem_file_ext(dupe.name)
def sort_key(self, dupe, crit_value): def sort_key(self, dupe, crit_value):
value = self.extract_value(dupe) value = self.extract_value(dupe)
if crit_value in {self.ENDS_WITH_NUMBER, self.DOESNT_END_WITH_NUMBER}: if crit_value in {self.ENDS_WITH_NUMBER, self.DOESNT_END_WITH_NUMBER}:
@ -113,50 +117,57 @@ class FilenameCategory(CriterionCategory):
else: else:
value = len(value) value = len(value)
if crit_value == self.LONGEST: if crit_value == self.LONGEST:
value *= -1 # We want the biggest values on top value *= -1 # We want the biggest values on top
return value return value
def criteria_list(self): def criteria_list(self):
return [Criterion(self, crit_value) for crit_value in [ return [
self.ENDS_WITH_NUMBER, Criterion(self, crit_value)
self.DOESNT_END_WITH_NUMBER, for crit_value in [
self.LONGEST, self.ENDS_WITH_NUMBER,
self.SHORTEST, self.DOESNT_END_WITH_NUMBER,
]] self.LONGEST,
self.SHORTEST,
]
]
class NumericalCategory(CriterionCategory): class NumericalCategory(CriterionCategory):
HIGHEST = 0 HIGHEST = 0
LOWEST = 1 LOWEST = 1
def format_criterion_value(self, value): def format_criterion_value(self, value):
return tr("Highest") if value == self.HIGHEST else tr("Lowest") return tr("Highest") if value == self.HIGHEST else tr("Lowest")
def invert_numerical_value(self, value): # Virtual def invert_numerical_value(self, value): # Virtual
return value * -1 return value * -1
def sort_key(self, dupe, crit_value): def sort_key(self, dupe, crit_value):
value = self.extract_value(dupe) value = self.extract_value(dupe)
if crit_value == self.HIGHEST: # we want highest values on top if crit_value == self.HIGHEST: # we want highest values on top
value = self.invert_numerical_value(value) value = self.invert_numerical_value(value)
return value return value
def criteria_list(self): def criteria_list(self):
return [Criterion(self, self.HIGHEST), Criterion(self, self.LOWEST)] return [Criterion(self, self.HIGHEST), Criterion(self, self.LOWEST)]
class SizeCategory(NumericalCategory): class SizeCategory(NumericalCategory):
NAME = coltr("Size") NAME = coltr("Size")
def extract_value(self, dupe): def extract_value(self, dupe):
return dupe.size return dupe.size
class MtimeCategory(NumericalCategory): class MtimeCategory(NumericalCategory):
NAME = coltr("Modification") NAME = coltr("Modification")
def extract_value(self, dupe): def extract_value(self, dupe):
return dupe.mtime return dupe.mtime
def format_criterion_value(self, value): def format_criterion_value(self, value):
return tr("Newest") if value == self.HIGHEST else tr("Oldest") return tr("Newest") if value == self.HIGHEST else tr("Oldest")
def all_categories(): def all_categories():
return [KindCategory, FolderCategory, FilenameCategory, SizeCategory, MtimeCategory] return [KindCategory, FolderCategory, FilenameCategory, SizeCategory, MtimeCategory]

View File

@ -20,6 +20,7 @@ from hscommon.trans import tr
from . import engine from . import engine
from .markable import Markable from .markable import Markable
class Results(Markable): class Results(Markable):
"""Manages a collection of duplicate :class:`~core.engine.Group`. """Manages a collection of duplicate :class:`~core.engine.Group`.
@ -34,21 +35,22 @@ class Results(Markable):
A list of all duplicates (:class:`~core.fs.File` instances), without ref, contained in the A list of all duplicates (:class:`~core.fs.File` instances), without ref, contained in the
currently managed :attr:`groups`. currently managed :attr:`groups`.
""" """
#---Override
# ---Override
def __init__(self, app): def __init__(self, app):
Markable.__init__(self) Markable.__init__(self)
self.__groups = [] self.__groups = []
self.__group_of_duplicate = {} self.__group_of_duplicate = {}
self.__groups_sort_descriptor = None # This is a tuple (key, asc) self.__groups_sort_descriptor = None # This is a tuple (key, asc)
self.__dupes = None self.__dupes = None
self.__dupes_sort_descriptor = None # This is a tuple (key, asc, delta) self.__dupes_sort_descriptor = None # This is a tuple (key, asc, delta)
self.__filters = None self.__filters = None
self.__filtered_dupes = None self.__filtered_dupes = None
self.__filtered_groups = None self.__filtered_groups = None
self.__recalculate_stats() self.__recalculate_stats()
self.__marked_size = 0 self.__marked_size = 0
self.app = app self.app = app
self.problems = [] # (dupe, error_msg) self.problems = [] # (dupe, error_msg)
self.is_modified = False self.is_modified = False
def _did_mark(self, dupe): def _did_mark(self, dupe):
@ -90,7 +92,7 @@ class Results(Markable):
else: else:
Markable.mark_none(self) Markable.mark_none(self)
#---Private # ---Private
def __get_dupe_list(self): def __get_dupe_list(self):
if self.__dupes is None: if self.__dupes is None:
self.__dupes = flatten(group.dupes for group in self.groups) self.__dupes = flatten(group.dupes for group in self.groups)
@ -98,10 +100,13 @@ class Results(Markable):
# This is debug logging to try to figure out #44 # This is debug logging to try to figure out #44
logging.warning( logging.warning(
"There is a None value in the Results' dupe list. dupes: %r groups: %r", "There is a None value in the Results' dupe list. dupes: %r groups: %r",
self.__dupes, self.groups self.__dupes,
self.groups,
) )
if self.__filtered_dupes: if self.__filtered_dupes:
self.__dupes = [dupe for dupe in self.__dupes if dupe in self.__filtered_dupes] self.__dupes = [
dupe for dupe in self.__dupes if dupe in self.__filtered_dupes
]
sd = self.__dupes_sort_descriptor sd = self.__dupes_sort_descriptor
if sd: if sd:
self.sort_dupes(sd[0], sd[1], sd[2]) self.sort_dupes(sd[0], sd[1], sd[2])
@ -120,10 +125,18 @@ class Results(Markable):
total_count = self.__total_count total_count = self.__total_count
total_size = self.__total_size total_size = self.__total_size
else: else:
mark_count = len([dupe for dupe in self.__filtered_dupes if self.is_marked(dupe)]) mark_count = len(
marked_size = sum(dupe.size for dupe in self.__filtered_dupes if self.is_marked(dupe)) [dupe for dupe in self.__filtered_dupes if self.is_marked(dupe)]
total_count = len([dupe for dupe in self.__filtered_dupes if self.is_markable(dupe)]) )
total_size = sum(dupe.size for dupe in self.__filtered_dupes if self.is_markable(dupe)) marked_size = sum(
dupe.size for dupe in self.__filtered_dupes if self.is_marked(dupe)
)
total_count = len(
[dupe for dupe in self.__filtered_dupes if self.is_markable(dupe)]
)
total_size = sum(
dupe.size for dupe in self.__filtered_dupes if self.is_markable(dupe)
)
if self.mark_inverted: if self.mark_inverted:
marked_size = self.__total_size - marked_size marked_size = self.__total_size - marked_size
result = tr("%d / %d (%s / %s) duplicates marked.") % ( result = tr("%d / %d (%s / %s) duplicates marked.") % (
@ -133,7 +146,7 @@ class Results(Markable):
format_size(total_size, 2), format_size(total_size, 2),
) )
if self.__filters: if self.__filters:
result += tr(" filter: %s") % ' --> '.join(self.__filters) result += tr(" filter: %s") % " --> ".join(self.__filters)
return result return result
def __recalculate_stats(self): def __recalculate_stats(self):
@ -151,7 +164,7 @@ class Results(Markable):
for g in self.__groups: for g in self.__groups:
for dupe in g: for dupe in g:
self.__group_of_duplicate[dupe] = g self.__group_of_duplicate[dupe] = g
if not hasattr(dupe, 'is_ref'): if not hasattr(dupe, "is_ref"):
dupe.is_ref = False dupe.is_ref = False
self.is_modified = bool(self.__groups) self.is_modified = bool(self.__groups)
old_filters = nonone(self.__filters, []) old_filters = nonone(self.__filters, [])
@ -159,7 +172,7 @@ class Results(Markable):
for filter_str in old_filters: for filter_str in old_filters:
self.apply_filter(filter_str) self.apply_filter(filter_str)
#---Public # ---Public
def apply_filter(self, filter_str): def apply_filter(self, filter_str):
"""Applies a filter ``filter_str`` to :attr:`groups` """Applies a filter ``filter_str`` to :attr:`groups`
@ -182,11 +195,15 @@ class Results(Markable):
try: try:
filter_re = re.compile(filter_str, re.IGNORECASE) filter_re = re.compile(filter_str, re.IGNORECASE)
except re.error: except re.error:
return # don't apply this filter. return # don't apply this filter.
self.__filters.append(filter_str) self.__filters.append(filter_str)
if self.__filtered_dupes is None: if self.__filtered_dupes is None:
self.__filtered_dupes = flatten(g[:] for g in self.groups) self.__filtered_dupes = flatten(g[:] for g in self.groups)
self.__filtered_dupes = set(dupe for dupe in self.__filtered_dupes if filter_re.search(str(dupe.path))) self.__filtered_dupes = set(
dupe
for dupe in self.__filtered_dupes
if filter_re.search(str(dupe.path))
)
filtered_groups = set() filtered_groups = set()
for dupe in self.__filtered_dupes: for dupe in self.__filtered_dupes:
filtered_groups.add(self.get_group_of_duplicate(dupe)) filtered_groups.add(self.get_group_of_duplicate(dupe))
@ -214,6 +231,7 @@ class Results(Markable):
:param get_file: a function f(path) returning a :class:`~core.fs.File` wrapping the path. :param get_file: a function f(path) returning a :class:`~core.fs.File` wrapping the path.
:param j: A :ref:`job progress instance <jobs>`. :param j: A :ref:`job progress instance <jobs>`.
""" """
def do_match(ref_file, other_files, group): def do_match(ref_file, other_files, group):
if not other_files: if not other_files:
return return
@ -223,31 +241,31 @@ class Results(Markable):
self.apply_filter(None) self.apply_filter(None)
root = ET.parse(infile).getroot() root = ET.parse(infile).getroot()
group_elems = list(root.getiterator('group')) group_elems = list(root.getiterator("group"))
groups = [] groups = []
marked = set() marked = set()
for group_elem in j.iter_with_progress(group_elems, every=100): for group_elem in j.iter_with_progress(group_elems, every=100):
group = engine.Group() group = engine.Group()
dupes = [] dupes = []
for file_elem in group_elem.getiterator('file'): for file_elem in group_elem.getiterator("file"):
path = file_elem.get('path') path = file_elem.get("path")
words = file_elem.get('words', '') words = file_elem.get("words", "")
if not path: if not path:
continue continue
file = get_file(path) file = get_file(path)
if file is None: if file is None:
continue continue
file.words = words.split(',') file.words = words.split(",")
file.is_ref = file_elem.get('is_ref') == 'y' file.is_ref = file_elem.get("is_ref") == "y"
dupes.append(file) dupes.append(file)
if file_elem.get('marked') == 'y': if file_elem.get("marked") == "y":
marked.add(file) marked.add(file)
for match_elem in group_elem.getiterator('match'): for match_elem in group_elem.getiterator("match"):
try: try:
attrs = match_elem.attrib attrs = match_elem.attrib
first_file = dupes[int(attrs['first'])] first_file = dupes[int(attrs["first"])]
second_file = dupes[int(attrs['second'])] second_file = dupes[int(attrs["second"])]
percentage = int(attrs['percentage']) percentage = int(attrs["percentage"])
group.add_match(engine.Match(first_file, second_file, percentage)) group.add_match(engine.Match(first_file, second_file, percentage))
except (IndexError, KeyError, ValueError): except (IndexError, KeyError, ValueError):
# Covers missing attr, non-int values and indexes out of bounds # Covers missing attr, non-int values and indexes out of bounds
@ -339,9 +357,9 @@ class Results(Markable):
:param outfile: file object or path. :param outfile: file object or path.
""" """
self.apply_filter(None) self.apply_filter(None)
root = ET.Element('results') root = ET.Element("results")
for g in self.groups: for g in self.groups:
group_elem = ET.SubElement(root, 'group') group_elem = ET.SubElement(root, "group")
dupe2index = {} dupe2index = {}
for index, d in enumerate(g): for index, d in enumerate(g):
dupe2index[d] = index dupe2index[d] = index
@ -349,24 +367,24 @@ class Results(Markable):
words = engine.unpack_fields(d.words) words = engine.unpack_fields(d.words)
except AttributeError: except AttributeError:
words = () words = ()
file_elem = ET.SubElement(group_elem, 'file') file_elem = ET.SubElement(group_elem, "file")
try: try:
file_elem.set('path', str(d.path)) file_elem.set("path", str(d.path))
file_elem.set('words', ','.join(words)) file_elem.set("words", ",".join(words))
except ValueError: # If there's an invalid character, just skip the file except ValueError: # If there's an invalid character, just skip the file
file_elem.set('path', '') file_elem.set("path", "")
file_elem.set('is_ref', ('y' if d.is_ref else 'n')) file_elem.set("is_ref", ("y" if d.is_ref else "n"))
file_elem.set('marked', ('y' if self.is_marked(d) else 'n')) file_elem.set("marked", ("y" if self.is_marked(d) else "n"))
for match in g.matches: for match in g.matches:
match_elem = ET.SubElement(group_elem, 'match') match_elem = ET.SubElement(group_elem, "match")
match_elem.set('first', str(dupe2index[match.first])) match_elem.set("first", str(dupe2index[match.first]))
match_elem.set('second', str(dupe2index[match.second])) match_elem.set("second", str(dupe2index[match.second]))
match_elem.set('percentage', str(int(match.percentage))) match_elem.set("percentage", str(int(match.percentage)))
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
def do_write(outfile): def do_write(outfile):
with FileOrPath(outfile, 'wb') as fp: with FileOrPath(outfile, "wb") as fp:
tree.write(fp, encoding='utf-8') tree.write(fp, encoding="utf-8")
try: try:
do_write(outfile) do_write(outfile)
@ -392,7 +410,9 @@ class Results(Markable):
""" """
if not self.__dupes: if not self.__dupes:
self.__get_dupe_list() self.__get_dupe_list()
keyfunc = lambda d: self.app._get_dupe_sort_key(d, lambda: self.get_group_of_duplicate(d), key, delta) keyfunc = lambda d: self.app._get_dupe_sort_key(
d, lambda: self.get_group_of_duplicate(d), key, delta
)
self.__dupes.sort(key=keyfunc, reverse=not asc) self.__dupes.sort(key=keyfunc, reverse=not asc)
self.__dupes_sort_descriptor = (key, asc, delta) self.__dupes_sort_descriptor = (key, asc, delta)
@ -408,8 +428,7 @@ class Results(Markable):
self.groups.sort(key=keyfunc, reverse=not asc) self.groups.sort(key=keyfunc, reverse=not asc)
self.__groups_sort_descriptor = (key, asc) self.__groups_sort_descriptor = (key, asc)
#---Properties # ---Properties
dupes = property(__get_dupe_list) dupes = property(__get_dupe_list)
groups = property(__get_groups, __set_groups) groups = property(__get_groups, __set_groups)
stat_line = property(__get_stat_line) stat_line = property(__get_stat_line)

View File

@ -19,6 +19,7 @@ from . import engine
# there will be some nasty bugs popping up (ScanType is used in core when in should exclusively be # there will be some nasty bugs popping up (ScanType is used in core when in should exclusively be
# used in core_*). One day I'll clean this up. # used in core_*). One day I'll clean this up.
class ScanType: class ScanType:
Filename = 0 Filename = 0
Fields = 1 Fields = 1
@ -27,23 +28,26 @@ class ScanType:
Folders = 4 Folders = 4
Contents = 5 Contents = 5
#PE # PE
FuzzyBlock = 10 FuzzyBlock = 10
ExifTimestamp = 11 ExifTimestamp = 11
ScanOption = namedtuple('ScanOption', 'scan_type label')
SCANNABLE_TAGS = ['track', 'artist', 'album', 'title', 'genre', 'year'] ScanOption = namedtuple("ScanOption", "scan_type label")
SCANNABLE_TAGS = ["track", "artist", "album", "title", "genre", "year"]
RE_DIGIT_ENDING = re.compile(r"\d+|\(\d+\)|\[\d+\]|{\d+}")
RE_DIGIT_ENDING = re.compile(r'\d+|\(\d+\)|\[\d+\]|{\d+}')
def is_same_with_digit(name, refname): def is_same_with_digit(name, refname):
# Returns True if name is the same as refname, but with digits (with brackets or not) at the end # Returns True if name is the same as refname, but with digits (with brackets or not) at the end
if not name.startswith(refname): if not name.startswith(refname):
return False return False
end = name[len(refname):].strip() end = name[len(refname) :].strip()
return RE_DIGIT_ENDING.match(end) is not None return RE_DIGIT_ENDING.match(end) is not None
def remove_dupe_paths(files): def remove_dupe_paths(files):
# Returns files with duplicates-by-path removed. Files with the exact same path are considered # Returns files with duplicates-by-path removed. Files with the exact same path are considered
# duplicates and only the first file to have a path is kept. In certain cases, we have files # duplicates and only the first file to have a path is kept. In certain cases, we have files
@ -57,25 +61,29 @@ def remove_dupe_paths(files):
if normalized in path2file: if normalized in path2file:
try: try:
if op.samefile(normalized, str(path2file[normalized].path)): if op.samefile(normalized, str(path2file[normalized].path)):
continue # same file, it's a dupe continue # same file, it's a dupe
else: else:
pass # We don't treat them as dupes pass # We don't treat them as dupes
except OSError: except OSError:
continue # File doesn't exist? Well, treat them as dupes continue # File doesn't exist? Well, treat them as dupes
else: else:
path2file[normalized] = f path2file[normalized] = f
result.append(f) result.append(f)
return result return result
class Scanner: class Scanner:
def __init__(self): def __init__(self):
self.discarded_file_count = 0 self.discarded_file_count = 0
def _getmatches(self, files, j): def _getmatches(self, files, j):
if self.size_threshold or self.scan_type in {ScanType.Contents, ScanType.Folders}: if self.size_threshold or self.scan_type in {
ScanType.Contents,
ScanType.Folders,
}:
j = j.start_subjob([2, 8]) j = j.start_subjob([2, 8])
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")): for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
f.size # pre-read, makes a smoother progress if read here (especially for bundles) f.size # pre-read, makes a smoother progress if read here (especially for bundles)
if self.size_threshold: if self.size_threshold:
files = [f for f in files if f.size >= self.size_threshold] files = [f for f in files if f.size >= self.size_threshold]
if self.scan_type in {ScanType.Contents, ScanType.Folders}: if self.scan_type in {ScanType.Contents, ScanType.Folders}:
@ -83,12 +91,12 @@ class Scanner:
else: else:
j = j.start_subjob([2, 8]) j = j.start_subjob([2, 8])
kw = {} kw = {}
kw['match_similar_words'] = self.match_similar_words kw["match_similar_words"] = self.match_similar_words
kw['weight_words'] = self.word_weighting kw["weight_words"] = self.word_weighting
kw['min_match_percentage'] = self.min_match_percentage kw["min_match_percentage"] = self.min_match_percentage
if self.scan_type == ScanType.FieldsNoOrder: if self.scan_type == ScanType.FieldsNoOrder:
self.scan_type = ScanType.Fields self.scan_type = ScanType.Fields
kw['no_field_order'] = True kw["no_field_order"] = True
func = { func = {
ScanType.Filename: lambda f: engine.getwords(rem_file_ext(f.name)), ScanType.Filename: lambda f: engine.getwords(rem_file_ext(f.name)),
ScanType.Fields: lambda f: engine.getfields(rem_file_ext(f.name)), ScanType.Fields: lambda f: engine.getfields(rem_file_ext(f.name)),
@ -111,9 +119,9 @@ class Scanner:
def _tie_breaker(ref, dupe): def _tie_breaker(ref, dupe):
refname = rem_file_ext(ref.name).lower() refname = rem_file_ext(ref.name).lower()
dupename = rem_file_ext(dupe.name).lower() dupename = rem_file_ext(dupe.name).lower()
if 'copy' in dupename: if "copy" in dupename:
return False return False
if 'copy' in refname: if "copy" in refname:
return True return True
if is_same_with_digit(dupename, refname): if is_same_with_digit(dupename, refname):
return False return False
@ -130,12 +138,12 @@ class Scanner:
raise NotImplementedError() raise NotImplementedError()
def get_dupe_groups(self, files, ignore_list=None, j=job.nulljob): def get_dupe_groups(self, files, ignore_list=None, j=job.nulljob):
for f in (f for f in files if not hasattr(f, 'is_ref')): for f in (f for f in files if not hasattr(f, "is_ref")):
f.is_ref = False f.is_ref = False
files = remove_dupe_paths(files) files = remove_dupe_paths(files)
logging.info("Getting matches. Scan type: %d", self.scan_type) logging.info("Getting matches. Scan type: %d", self.scan_type)
matches = self._getmatches(files, j) matches = self._getmatches(files, j)
logging.info('Found %d matches' % len(matches)) logging.info("Found %d matches" % len(matches))
j.set_progress(100, tr("Almost done! Fiddling with results...")) j.set_progress(100, tr("Almost done! Fiddling with results..."))
# In removing what we call here "false matches", we first want to remove, if we scan by # In removing what we call here "false matches", we first want to remove, if we scan by
# folders, we want to remove folder matches for which the parent is also in a match (they're # folders, we want to remove folder matches for which the parent is also in a match (they're
@ -153,20 +161,38 @@ class Scanner:
toremove.add(p) toremove.add(p)
else: else:
last_parent_path = p last_parent_path = p
matches = [m for m in matches if m.first.path not in toremove or m.second.path not in toremove] matches = [
m
for m in matches
if m.first.path not in toremove or m.second.path not in toremove
]
if not self.mix_file_kind: if not self.mix_file_kind:
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)] matches = [
matches = [m for m in matches if m.first.path.exists() and m.second.path.exists()] m
for m in matches
if get_file_ext(m.first.name) == get_file_ext(m.second.name)
]
matches = [
m for m in matches if m.first.path.exists() and m.second.path.exists()
]
matches = [m for m in matches if not (m.first.is_ref and m.second.is_ref)] matches = [m for m in matches if not (m.first.is_ref and m.second.is_ref)]
if ignore_list: if ignore_list:
matches = [ matches = [
m for m in matches m
for m in matches
if not ignore_list.AreIgnored(str(m.first.path), str(m.second.path)) if not ignore_list.AreIgnored(str(m.first.path), str(m.second.path))
] ]
logging.info('Grouping matches') logging.info("Grouping matches")
groups = engine.get_groups(matches) groups = engine.get_groups(matches)
if self.scan_type in {ScanType.Filename, ScanType.Fields, ScanType.FieldsNoOrder, ScanType.Tag}: if self.scan_type in {
matched_files = dedupe([m.first for m in matches] + [m.second for m in matches]) ScanType.Filename,
ScanType.Fields,
ScanType.FieldsNoOrder,
ScanType.Tag,
}:
matched_files = dedupe(
[m.first for m in matches] + [m.second for m in matches]
)
self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups) self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups)
else: else:
# Ticket #195 # Ticket #195
@ -181,7 +207,7 @@ class Scanner:
# reporting discarded matches. # reporting discarded matches.
self.discarded_file_count = 0 self.discarded_file_count = 0
groups = [g for g in groups if any(not f.is_ref for f in g)] groups = [g for g in groups if any(not f.is_ref for f in g)]
logging.info('Created %d groups' % len(groups)) logging.info("Created %d groups" % len(groups))
for g in groups: for g in groups:
g.prioritize(self._key_func, self._tie_breaker) g.prioritize(self._key_func, self._tie_breaker)
return groups return groups
@ -190,7 +216,6 @@ class Scanner:
min_match_percentage = 80 min_match_percentage = 80
mix_file_kind = True mix_file_kind = True
scan_type = ScanType.Filename scan_type = ScanType.Filename
scanned_tags = {'artist', 'title'} scanned_tags = {"artist", "title"}
size_threshold = 0 size_threshold = 0
word_weighting = False word_weighting = False

View File

@ -1 +1 @@
from . import fs, result_table, scanner # noqa from . import fs, result_table, scanner # noqa

View File

@ -11,6 +11,7 @@ from hscommon.util import format_size
from core import fs from core import fs
from core.util import format_timestamp, format_perc, format_words, format_dupe_count from core.util import format_timestamp, format_perc, format_words, format_dupe_count
def get_display_info(dupe, group, delta): def get_display_info(dupe, group, delta):
size = dupe.size size = dupe.size
mtime = dupe.mtime mtime = dupe.mtime
@ -26,16 +27,17 @@ def get_display_info(dupe, group, delta):
percentage = group.percentage percentage = group.percentage
dupe_count = len(group.dupes) dupe_count = len(group.dupes)
return { return {
'name': dupe.name, "name": dupe.name,
'folder_path': str(dupe.folder_path), "folder_path": str(dupe.folder_path),
'size': format_size(size, 0, 1, False), "size": format_size(size, 0, 1, False),
'extension': dupe.extension, "extension": dupe.extension,
'mtime': format_timestamp(mtime, delta and m), "mtime": format_timestamp(mtime, delta and m),
'percentage': format_perc(percentage), "percentage": format_perc(percentage),
'words': format_words(dupe.words) if hasattr(dupe, 'words') else '', "words": format_words(dupe.words) if hasattr(dupe, "words") else "",
'dupe_count': format_dupe_count(dupe_count), "dupe_count": format_dupe_count(dupe_count),
} }
class File(fs.File): class File(fs.File):
def get_display_info(self, group, delta): def get_display_info(self, group, delta):
return get_display_info(self, group, delta) return get_display_info(self, group, delta)
@ -44,4 +46,3 @@ class File(fs.File):
class Folder(fs.Folder): class Folder(fs.Folder):
def get_display_info(self, group, delta): def get_display_info(self, group, delta):
return get_display_info(self, group, delta) return get_display_info(self, group, delta)

View File

@ -1,8 +1,8 @@
# Created On: 2011-11-27 # Created On: 2011-11-27
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.gui.column import Column from hscommon.gui.column import Column
@ -10,18 +10,19 @@ from hscommon.trans import trget
from core.gui.result_table import ResultTable as ResultTableBase from core.gui.result_table import ResultTable as ResultTableBase
coltr = trget('columns') coltr = trget("columns")
class ResultTable(ResultTableBase): class ResultTable(ResultTableBase):
COLUMNS = [ COLUMNS = [
Column('marked', ''), Column("marked", ""),
Column('name', coltr("Filename")), Column("name", coltr("Filename")),
Column('folder_path', coltr("Folder"), optional=True), Column("folder_path", coltr("Folder"), optional=True),
Column('size', coltr("Size (KB)"), optional=True), Column("size", coltr("Size (KB)"), optional=True),
Column('extension', coltr("Kind"), visible=False, optional=True), Column("extension", coltr("Kind"), visible=False, optional=True),
Column('mtime', coltr("Modification"), visible=False, optional=True), Column("mtime", coltr("Modification"), visible=False, optional=True),
Column('percentage', coltr("Match %"), optional=True), Column("percentage", coltr("Match %"), optional=True),
Column('words', coltr("Words Used"), visible=False, optional=True), Column("words", coltr("Words Used"), visible=False, optional=True),
Column('dupe_count', coltr("Dupe Count"), visible=False, optional=True), Column("dupe_count", coltr("Dupe Count"), visible=False, optional=True),
] ]
DELTA_COLUMNS = {'size', 'mtime'} DELTA_COLUMNS = {"size", "mtime"}

View File

@ -8,6 +8,7 @@ from hscommon.trans import tr
from core.scanner import Scanner as ScannerBase, ScanOption, ScanType from core.scanner import Scanner as ScannerBase, ScanOption, ScanType
class ScannerSE(ScannerBase): class ScannerSE(ScannerBase):
@staticmethod @staticmethod
def get_scan_options(): def get_scan_options():
@ -16,4 +17,3 @@ class ScannerSE(ScannerBase):
ScanOption(ScanType.Contents, tr("Contents")), ScanOption(ScanType.Contents, tr("Contents")),
ScanOption(ScanType.Folders, tr("Folders")), ScanOption(ScanType.Folders, tr("Folders")),
] ]

View File

@ -20,93 +20,106 @@ from .results_test import GetTestGroups
from .. import app, fs, engine from .. import app, fs, engine
from ..scanner import ScanType from ..scanner import ScanType
def add_fake_files_to_directories(directories, files): def add_fake_files_to_directories(directories, files):
directories.get_files = lambda j=None: iter(files) directories.get_files = lambda j=None: iter(files)
directories._dirs.append('this is just so Scan() doesnt return 3') directories._dirs.append("this is just so Scan() doesnt return 3")
class TestCaseDupeGuru: class TestCaseDupeGuru:
def test_apply_filter_calls_results_apply_filter(self, monkeypatch): def test_apply_filter_calls_results_apply_filter(self, monkeypatch):
dgapp = TestApp().app dgapp = TestApp().app
monkeypatch.setattr(dgapp.results, 'apply_filter', log_calls(dgapp.results.apply_filter)) monkeypatch.setattr(
dgapp.apply_filter('foo') dgapp.results, "apply_filter", log_calls(dgapp.results.apply_filter)
)
dgapp.apply_filter("foo")
eq_(2, len(dgapp.results.apply_filter.calls)) eq_(2, len(dgapp.results.apply_filter.calls))
call = dgapp.results.apply_filter.calls[0] call = dgapp.results.apply_filter.calls[0]
assert call['filter_str'] is None assert call["filter_str"] is None
call = dgapp.results.apply_filter.calls[1] call = dgapp.results.apply_filter.calls[1]
eq_('foo', call['filter_str']) eq_("foo", call["filter_str"])
def test_apply_filter_escapes_regexp(self, monkeypatch): def test_apply_filter_escapes_regexp(self, monkeypatch):
dgapp = TestApp().app dgapp = TestApp().app
monkeypatch.setattr(dgapp.results, 'apply_filter', log_calls(dgapp.results.apply_filter)) monkeypatch.setattr(
dgapp.apply_filter('()[]\\.|+?^abc') dgapp.results, "apply_filter", log_calls(dgapp.results.apply_filter)
)
dgapp.apply_filter("()[]\\.|+?^abc")
call = dgapp.results.apply_filter.calls[1] call = dgapp.results.apply_filter.calls[1]
eq_('\\(\\)\\[\\]\\\\\\.\\|\\+\\?\\^abc', call['filter_str']) eq_("\\(\\)\\[\\]\\\\\\.\\|\\+\\?\\^abc", call["filter_str"])
dgapp.apply_filter('(*)') # In "simple mode", we want the * to behave as a wilcard dgapp.apply_filter(
"(*)"
) # In "simple mode", we want the * to behave as a wilcard
call = dgapp.results.apply_filter.calls[3] call = dgapp.results.apply_filter.calls[3]
eq_(r'\(.*\)', call['filter_str']) eq_(r"\(.*\)", call["filter_str"])
dgapp.options['escape_filter_regexp'] = False dgapp.options["escape_filter_regexp"] = False
dgapp.apply_filter('(abc)') dgapp.apply_filter("(abc)")
call = dgapp.results.apply_filter.calls[5] call = dgapp.results.apply_filter.calls[5]
eq_('(abc)', call['filter_str']) eq_("(abc)", call["filter_str"])
def test_copy_or_move(self, tmpdir, monkeypatch): def test_copy_or_move(self, tmpdir, monkeypatch):
# The goal here is just to have a test for a previous blowup I had. I know my test coverage # The goal here is just to have a test for a previous blowup I had. I know my test coverage
# for this unit is pathetic. What's done is done. My approach now is to add tests for # for this unit is pathetic. What's done is done. My approach now is to add tests for
# every change I want to make. The blowup was caused by a missing import. # every change I want to make. The blowup was caused by a missing import.
p = Path(str(tmpdir)) p = Path(str(tmpdir))
p['foo'].open('w').close() p["foo"].open("w").close()
monkeypatch.setattr(hscommon.conflict, 'smart_copy', log_calls(lambda source_path, dest_path: None)) monkeypatch.setattr(
hscommon.conflict,
"smart_copy",
log_calls(lambda source_path, dest_path: None),
)
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher. # XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
monkeypatch.setattr(app, 'smart_copy', hscommon.conflict.smart_copy) monkeypatch.setattr(app, "smart_copy", hscommon.conflict.smart_copy)
monkeypatch.setattr(os, 'makedirs', lambda path: None) # We don't want the test to create that fake directory monkeypatch.setattr(
os, "makedirs", lambda path: None
) # We don't want the test to create that fake directory
dgapp = TestApp().app dgapp = TestApp().app
dgapp.directories.add_path(p) dgapp.directories.add_path(p)
[f] = dgapp.directories.get_files() [f] = dgapp.directories.get_files()
dgapp.copy_or_move(f, True, 'some_destination', 0) dgapp.copy_or_move(f, True, "some_destination", 0)
eq_(1, len(hscommon.conflict.smart_copy.calls)) eq_(1, len(hscommon.conflict.smart_copy.calls))
call = hscommon.conflict.smart_copy.calls[0] call = hscommon.conflict.smart_copy.calls[0]
eq_(call['dest_path'], op.join('some_destination', 'foo')) eq_(call["dest_path"], op.join("some_destination", "foo"))
eq_(call['source_path'], f.path) eq_(call["source_path"], f.path)
def test_copy_or_move_clean_empty_dirs(self, tmpdir, monkeypatch): def test_copy_or_move_clean_empty_dirs(self, tmpdir, monkeypatch):
tmppath = Path(str(tmpdir)) tmppath = Path(str(tmpdir))
sourcepath = tmppath['source'] sourcepath = tmppath["source"]
sourcepath.mkdir() sourcepath.mkdir()
sourcepath['myfile'].open('w') sourcepath["myfile"].open("w")
app = TestApp().app app = TestApp().app
app.directories.add_path(tmppath) app.directories.add_path(tmppath)
[myfile] = app.directories.get_files() [myfile] = app.directories.get_files()
monkeypatch.setattr(app, 'clean_empty_dirs', log_calls(lambda path: None)) monkeypatch.setattr(app, "clean_empty_dirs", log_calls(lambda path: None))
app.copy_or_move(myfile, False, tmppath['dest'], 0) app.copy_or_move(myfile, False, tmppath["dest"], 0)
calls = app.clean_empty_dirs.calls calls = app.clean_empty_dirs.calls
eq_(1, len(calls)) eq_(1, len(calls))
eq_(sourcepath, calls[0]['path']) eq_(sourcepath, calls[0]["path"])
def test_Scan_with_objects_evaluating_to_false(self): def test_Scan_with_objects_evaluating_to_false(self):
class FakeFile(fs.File): class FakeFile(fs.File):
def __bool__(self): def __bool__(self):
return False return False
# At some point, any() was used in a wrong way that made Scan() wrongly return 1 # At some point, any() was used in a wrong way that made Scan() wrongly return 1
app = TestApp().app app = TestApp().app
f1, f2 = [FakeFile('foo') for i in range(2)] f1, f2 = [FakeFile("foo") for i in range(2)]
f1.is_ref, f2.is_ref = (False, False) f1.is_ref, f2.is_ref = (False, False)
assert not (bool(f1) and bool(f2)) assert not (bool(f1) and bool(f2))
add_fake_files_to_directories(app.directories, [f1, f2]) add_fake_files_to_directories(app.directories, [f1, f2])
app.start_scanning() # no exception app.start_scanning() # no exception
@mark.skipif("not hasattr(os, 'link')") @mark.skipif("not hasattr(os, 'link')")
def test_ignore_hardlink_matches(self, tmpdir): def test_ignore_hardlink_matches(self, tmpdir):
# If the ignore_hardlink_matches option is set, don't match files hardlinking to the same # If the ignore_hardlink_matches option is set, don't match files hardlinking to the same
# inode. # inode.
tmppath = Path(str(tmpdir)) tmppath = Path(str(tmpdir))
tmppath['myfile'].open('w').write('foo') tmppath["myfile"].open("w").write("foo")
os.link(str(tmppath['myfile']), str(tmppath['hardlink'])) os.link(str(tmppath["myfile"]), str(tmppath["hardlink"]))
app = TestApp().app app = TestApp().app
app.directories.add_path(tmppath) app.directories.add_path(tmppath)
app.options['scan_type'] = ScanType.Contents app.options["scan_type"] = ScanType.Contents
app.options['ignore_hardlink_matches'] = True app.options["ignore_hardlink_matches"] = True
app.start_scanning() app.start_scanning()
eq_(len(app.results.groups), 0) eq_(len(app.results.groups), 0)
@ -116,27 +129,32 @@ class TestCaseDupeGuru:
# making the selected row None. Don't crash when it happens. # making the selected row None. Don't crash when it happens.
dgapp = TestApp().app dgapp = TestApp().app
# selected_row is None because there's no result. # selected_row is None because there's no result.
assert not dgapp.result_table.rename_selected('foo') # no crash assert not dgapp.result_table.rename_selected("foo") # no crash
class TestCaseDupeGuru_clean_empty_dirs: class TestCaseDupeGuru_clean_empty_dirs:
def pytest_funcarg__do_setup(self, request): def pytest_funcarg__do_setup(self, request):
monkeypatch = request.getfuncargvalue('monkeypatch') monkeypatch = request.getfuncargvalue("monkeypatch")
monkeypatch.setattr(hscommon.util, 'delete_if_empty', log_calls(lambda path, files_to_delete=[]: None)) monkeypatch.setattr(
hscommon.util,
"delete_if_empty",
log_calls(lambda path, files_to_delete=[]: None),
)
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher. # XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
monkeypatch.setattr(app, 'delete_if_empty', hscommon.util.delete_if_empty) monkeypatch.setattr(app, "delete_if_empty", hscommon.util.delete_if_empty)
self.app = TestApp().app self.app = TestApp().app
def test_option_off(self, do_setup): def test_option_off(self, do_setup):
self.app.clean_empty_dirs(Path('/foo/bar')) self.app.clean_empty_dirs(Path("/foo/bar"))
eq_(0, len(hscommon.util.delete_if_empty.calls)) eq_(0, len(hscommon.util.delete_if_empty.calls))
def test_option_on(self, do_setup): def test_option_on(self, do_setup):
self.app.options['clean_empty_dirs'] = True self.app.options["clean_empty_dirs"] = True
self.app.clean_empty_dirs(Path('/foo/bar')) self.app.clean_empty_dirs(Path("/foo/bar"))
calls = hscommon.util.delete_if_empty.calls calls = hscommon.util.delete_if_empty.calls
eq_(1, len(calls)) eq_(1, len(calls))
eq_(Path('/foo/bar'), calls[0]['path']) eq_(Path("/foo/bar"), calls[0]["path"])
eq_(['.DS_Store'], calls[0]['files_to_delete']) eq_([".DS_Store"], calls[0]["files_to_delete"])
def test_recurse_up(self, do_setup, monkeypatch): def test_recurse_up(self, do_setup, monkeypatch):
# delete_if_empty must be recursively called up in the path until it returns False # delete_if_empty must be recursively called up in the path until it returns False
@ -144,16 +162,16 @@ class TestCaseDupeGuru_clean_empty_dirs:
def mock_delete_if_empty(path, files_to_delete=[]): def mock_delete_if_empty(path, files_to_delete=[]):
return len(path) > 1 return len(path) > 1
monkeypatch.setattr(hscommon.util, 'delete_if_empty', mock_delete_if_empty) monkeypatch.setattr(hscommon.util, "delete_if_empty", mock_delete_if_empty)
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher. # XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
monkeypatch.setattr(app, 'delete_if_empty', mock_delete_if_empty) monkeypatch.setattr(app, "delete_if_empty", mock_delete_if_empty)
self.app.options['clean_empty_dirs'] = True self.app.options["clean_empty_dirs"] = True
self.app.clean_empty_dirs(Path('not-empty/empty/empty')) self.app.clean_empty_dirs(Path("not-empty/empty/empty"))
calls = hscommon.util.delete_if_empty.calls calls = hscommon.util.delete_if_empty.calls
eq_(3, len(calls)) eq_(3, len(calls))
eq_(Path('not-empty/empty/empty'), calls[0]['path']) eq_(Path("not-empty/empty/empty"), calls[0]["path"])
eq_(Path('not-empty/empty'), calls[1]['path']) eq_(Path("not-empty/empty"), calls[1]["path"])
eq_(Path('not-empty'), calls[2]['path']) eq_(Path("not-empty"), calls[2]["path"])
class TestCaseDupeGuruWithResults: class TestCaseDupeGuruWithResults:
@ -166,10 +184,10 @@ class TestCaseDupeGuruWithResults:
self.dtree = app.dtree self.dtree = app.dtree
self.rtable = app.rtable self.rtable = app.rtable
self.rtable.refresh() self.rtable.refresh()
tmpdir = request.getfuncargvalue('tmpdir') tmpdir = request.getfuncargvalue("tmpdir")
tmppath = Path(str(tmpdir)) tmppath = Path(str(tmpdir))
tmppath['foo'].mkdir() tmppath["foo"].mkdir()
tmppath['bar'].mkdir() tmppath["bar"].mkdir()
self.app.directories.add_path(tmppath) self.app.directories.add_path(tmppath)
def test_GetObjects(self, do_setup): def test_GetObjects(self, do_setup):
@ -187,8 +205,8 @@ class TestCaseDupeGuruWithResults:
def test_GetObjects_after_sort(self, do_setup): def test_GetObjects_after_sort(self, do_setup):
objects = self.objects objects = self.objects
groups = self.groups[:] # we need an un-sorted reference groups = self.groups[:] # we need an un-sorted reference
self.rtable.sort('name', False) self.rtable.sort("name", False)
r = self.rtable[1] r = self.rtable[1]
assert r._group is groups[1] assert r._group is groups[1]
assert r._dupe is objects[4] assert r._dupe is objects[4]
@ -198,7 +216,7 @@ class TestCaseDupeGuruWithResults:
self.rtable.select([1, 2, 3]) self.rtable.select([1, 2, 3])
self.app.remove_selected() self.app.remove_selected()
# The first 2 dupes have been removed. The 3rd one is a ref. it stays there, in first pos. # The first 2 dupes have been removed. The 3rd one is a ref. it stays there, in first pos.
eq_(self.rtable.selected_indexes, [1]) # no exception eq_(self.rtable.selected_indexes, [1]) # no exception
def test_selectResultNodePaths(self, do_setup): def test_selectResultNodePaths(self, do_setup):
app = self.app app = self.app
@ -220,9 +238,9 @@ class TestCaseDupeGuruWithResults:
def test_selectResultNodePaths_after_sort(self, do_setup): def test_selectResultNodePaths_after_sort(self, do_setup):
app = self.app app = self.app
objects = self.objects objects = self.objects
groups = self.groups[:] #To keep the old order in memory groups = self.groups[:] # To keep the old order in memory
self.rtable.sort('name', False) #0 self.rtable.sort("name", False) # 0
#Now, the group order is supposed to be reversed # Now, the group order is supposed to be reversed
self.rtable.select([1, 2, 3]) self.rtable.select([1, 2, 3])
eq_(len(app.selected_dupes), 3) eq_(len(app.selected_dupes), 3)
assert app.selected_dupes[0] is objects[4] assert app.selected_dupes[0] is objects[4]
@ -242,13 +260,13 @@ class TestCaseDupeGuruWithResults:
self.rtable.power_marker = True self.rtable.power_marker = True
self.rtable.select([0, 1, 2]) self.rtable.select([0, 1, 2])
app.remove_selected() app.remove_selected()
eq_(self.rtable.selected_indexes, []) # no exception eq_(self.rtable.selected_indexes, []) # no exception
def test_selectPowerMarkerRows_after_sort(self, do_setup): def test_selectPowerMarkerRows_after_sort(self, do_setup):
app = self.app app = self.app
objects = self.objects objects = self.objects
self.rtable.power_marker = True self.rtable.power_marker = True
self.rtable.sort('name', False) self.rtable.sort("name", False)
self.rtable.select([0, 1, 2]) self.rtable.select([0, 1, 2])
eq_(len(app.selected_dupes), 3) eq_(len(app.selected_dupes), 3)
assert app.selected_dupes[0] is objects[4] assert app.selected_dupes[0] is objects[4]
@ -285,11 +303,11 @@ class TestCaseDupeGuruWithResults:
def test_refreshDetailsWithSelected(self, do_setup): def test_refreshDetailsWithSelected(self, do_setup):
self.rtable.select([1, 4]) self.rtable.select([1, 4])
eq_(self.dpanel.row(0), ('Filename', 'bar bleh', 'foo bar')) eq_(self.dpanel.row(0), ("Filename", "bar bleh", "foo bar"))
self.dpanel.view.check_gui_calls(['refresh']) self.dpanel.view.check_gui_calls(["refresh"])
self.rtable.select([]) self.rtable.select([])
eq_(self.dpanel.row(0), ('Filename', '---', '---')) eq_(self.dpanel.row(0), ("Filename", "---", "---"))
self.dpanel.view.check_gui_calls(['refresh']) self.dpanel.view.check_gui_calls(["refresh"])
def test_makeSelectedReference(self, do_setup): def test_makeSelectedReference(self, do_setup):
app = self.app app = self.app
@ -300,12 +318,14 @@ class TestCaseDupeGuruWithResults:
assert groups[0].ref is objects[1] assert groups[0].ref is objects[1]
assert groups[1].ref is objects[4] assert groups[1].ref is objects[4]
def test_makeSelectedReference_by_selecting_two_dupes_in_the_same_group(self, do_setup): def test_makeSelectedReference_by_selecting_two_dupes_in_the_same_group(
self, do_setup
):
app = self.app app = self.app
objects = self.objects objects = self.objects
groups = self.groups groups = self.groups
self.rtable.select([1, 2, 4]) self.rtable.select([1, 2, 4])
#Only [0, 0] and [1, 0] must go ref, not [0, 1] because it is a part of the same group # Only [0, 0] and [1, 0] must go ref, not [0, 1] because it is a part of the same group
app.make_selected_reference() app.make_selected_reference()
assert groups[0].ref is objects[1] assert groups[0].ref is objects[1]
assert groups[1].ref is objects[4] assert groups[1].ref is objects[4]
@ -314,7 +334,7 @@ class TestCaseDupeGuruWithResults:
app = self.app app = self.app
self.rtable.select([1, 4]) self.rtable.select([1, 4])
app.remove_selected() app.remove_selected()
eq_(len(app.results.dupes), 1) # the first path is now selected eq_(len(app.results.dupes), 1) # the first path is now selected
app.remove_selected() app.remove_selected()
eq_(len(app.results.dupes), 0) eq_(len(app.results.dupes), 0)
@ -336,27 +356,27 @@ class TestCaseDupeGuruWithResults:
def test_addDirectory_does_not_exist(self, do_setup): def test_addDirectory_does_not_exist(self, do_setup):
app = self.app app = self.app
app.add_directory('/does_not_exist') app.add_directory("/does_not_exist")
eq_(len(app.view.messages), 1) eq_(len(app.view.messages), 1)
assert "exist" in app.view.messages[0] assert "exist" in app.view.messages[0]
def test_ignore(self, do_setup): def test_ignore(self, do_setup):
app = self.app app = self.app
self.rtable.select([4]) #The dupe of the second, 2 sized group self.rtable.select([4]) # The dupe of the second, 2 sized group
app.add_selected_to_ignore_list() app.add_selected_to_ignore_list()
eq_(len(app.ignore_list), 1) eq_(len(app.ignore_list), 1)
self.rtable.select([1]) #first dupe of the 3 dupes group self.rtable.select([1]) # first dupe of the 3 dupes group
app.add_selected_to_ignore_list() app.add_selected_to_ignore_list()
#BOTH the ref and the other dupe should have been added # BOTH the ref and the other dupe should have been added
eq_(len(app.ignore_list), 3) eq_(len(app.ignore_list), 3)
def test_purgeIgnoreList(self, do_setup, tmpdir): def test_purgeIgnoreList(self, do_setup, tmpdir):
app = self.app app = self.app
p1 = str(tmpdir.join('file1')) p1 = str(tmpdir.join("file1"))
p2 = str(tmpdir.join('file2')) p2 = str(tmpdir.join("file2"))
open(p1, 'w').close() open(p1, "w").close()
open(p2, 'w').close() open(p2, "w").close()
dne = '/does_not_exist' dne = "/does_not_exist"
app.ignore_list.Ignore(dne, p1) app.ignore_list.Ignore(dne, p1)
app.ignore_list.Ignore(p2, dne) app.ignore_list.Ignore(p2, dne)
app.ignore_list.Ignore(p1, p2) app.ignore_list.Ignore(p1, p2)
@ -381,9 +401,11 @@ class TestCaseDupeGuruWithResults:
# When doing a scan with results being present prior to the scan, correctly invalidate the # When doing a scan with results being present prior to the scan, correctly invalidate the
# results table. # results table.
app = self.app app = self.app
app.JOB = Job(1, lambda *args, **kw: False) # Cancels the task app.JOB = Job(1, lambda *args, **kw: False) # Cancels the task
add_fake_files_to_directories(app.directories, self.objects) # We want the scan to at least start add_fake_files_to_directories(
app.start_scanning() # will be cancelled immediately app.directories, self.objects
) # We want the scan to at least start
app.start_scanning() # will be cancelled immediately
eq_(len(app.result_table), 0) eq_(len(app.result_table), 0)
def test_selected_dupes_after_removal(self, do_setup): def test_selected_dupes_after_removal(self, do_setup):
@ -401,21 +423,21 @@ class TestCaseDupeGuruWithResults:
# Ref #238 # Ref #238
self.rtable.delta_values = True self.rtable.delta_values = True
self.rtable.power_marker = True self.rtable.power_marker = True
self.rtable.sort('dupe_count', False) self.rtable.sort("dupe_count", False)
# don't crash # don't crash
self.rtable.sort('percentage', False) self.rtable.sort("percentage", False)
# don't crash # don't crash
class TestCaseDupeGuru_renameSelected: class TestCaseDupeGuru_renameSelected:
def pytest_funcarg__do_setup(self, request): def pytest_funcarg__do_setup(self, request):
tmpdir = request.getfuncargvalue('tmpdir') tmpdir = request.getfuncargvalue("tmpdir")
p = Path(str(tmpdir)) p = Path(str(tmpdir))
fp = open(str(p['foo bar 1']), mode='w') fp = open(str(p["foo bar 1"]), mode="w")
fp.close() fp.close()
fp = open(str(p['foo bar 2']), mode='w') fp = open(str(p["foo bar 2"]), mode="w")
fp.close() fp.close()
fp = open(str(p['foo bar 3']), mode='w') fp = open(str(p["foo bar 3"]), mode="w")
fp.close() fp.close()
files = fs.get_files(p) files = fs.get_files(p)
for f in files: for f in files:
@ -437,46 +459,46 @@ class TestCaseDupeGuru_renameSelected:
app = self.app app = self.app
g = self.groups[0] g = self.groups[0]
self.rtable.select([1]) self.rtable.select([1])
assert app.rename_selected('renamed') assert app.rename_selected("renamed")
names = [p.name for p in self.p.listdir()] names = [p.name for p in self.p.listdir()]
assert 'renamed' in names assert "renamed" in names
assert 'foo bar 2' not in names assert "foo bar 2" not in names
eq_(g.dupes[0].name, 'renamed') eq_(g.dupes[0].name, "renamed")
def test_none_selected(self, do_setup, monkeypatch): def test_none_selected(self, do_setup, monkeypatch):
app = self.app app = self.app
g = self.groups[0] g = self.groups[0]
self.rtable.select([]) self.rtable.select([])
monkeypatch.setattr(logging, 'warning', log_calls(lambda msg: None)) monkeypatch.setattr(logging, "warning", log_calls(lambda msg: None))
assert not app.rename_selected('renamed') assert not app.rename_selected("renamed")
msg = logging.warning.calls[0]['msg'] msg = logging.warning.calls[0]["msg"]
eq_('dupeGuru Warning: list index out of range', msg) eq_("dupeGuru Warning: list index out of range", msg)
names = [p.name for p in self.p.listdir()] names = [p.name for p in self.p.listdir()]
assert 'renamed' not in names assert "renamed" not in names
assert 'foo bar 2' in names assert "foo bar 2" in names
eq_(g.dupes[0].name, 'foo bar 2') eq_(g.dupes[0].name, "foo bar 2")
def test_name_already_exists(self, do_setup, monkeypatch): def test_name_already_exists(self, do_setup, monkeypatch):
app = self.app app = self.app
g = self.groups[0] g = self.groups[0]
self.rtable.select([1]) self.rtable.select([1])
monkeypatch.setattr(logging, 'warning', log_calls(lambda msg: None)) monkeypatch.setattr(logging, "warning", log_calls(lambda msg: None))
assert not app.rename_selected('foo bar 1') assert not app.rename_selected("foo bar 1")
msg = logging.warning.calls[0]['msg'] msg = logging.warning.calls[0]["msg"]
assert msg.startswith('dupeGuru Warning: \'foo bar 1\' already exists in') assert msg.startswith("dupeGuru Warning: 'foo bar 1' already exists in")
names = [p.name for p in self.p.listdir()] names = [p.name for p in self.p.listdir()]
assert 'foo bar 1' in names assert "foo bar 1" in names
assert 'foo bar 2' in names assert "foo bar 2" in names
eq_(g.dupes[0].name, 'foo bar 2') eq_(g.dupes[0].name, "foo bar 2")
class TestAppWithDirectoriesInTree: class TestAppWithDirectoriesInTree:
def pytest_funcarg__do_setup(self, request): def pytest_funcarg__do_setup(self, request):
tmpdir = request.getfuncargvalue('tmpdir') tmpdir = request.getfuncargvalue("tmpdir")
p = Path(str(tmpdir)) p = Path(str(tmpdir))
p['sub1'].mkdir() p["sub1"].mkdir()
p['sub2'].mkdir() p["sub2"].mkdir()
p['sub3'].mkdir() p["sub3"].mkdir()
app = TestApp() app = TestApp()
self.app = app.app self.app = app.app
self.dtree = app.dtree self.dtree = app.dtree
@ -487,12 +509,11 @@ class TestAppWithDirectoriesInTree:
# Setting a node state to something also affect subnodes. These subnodes must be correctly # Setting a node state to something also affect subnodes. These subnodes must be correctly
# refreshed. # refreshed.
node = self.dtree[0] node = self.dtree[0]
eq_(len(node), 3) # a len() call is required for subnodes to be loaded eq_(len(node), 3) # a len() call is required for subnodes to be loaded
subnode = node[0] subnode = node[0]
node.state = 1 # the state property is a state index node.state = 1 # the state property is a state index
node = self.dtree[0] node = self.dtree[0]
eq_(len(node), 3) eq_(len(node), 3)
subnode = node[0] subnode = node[0]
eq_(subnode.state, 1) eq_(subnode.state, 1)
self.dtree.view.check_gui_calls(['refresh_states']) self.dtree.view.check_gui_calls(["refresh_states"])

View File

@ -4,7 +4,7 @@
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.testutil import TestApp as TestAppBase, CallLogger, eq_, with_app # noqa from hscommon.testutil import TestApp as TestAppBase, CallLogger, eq_, with_app # noqa
from hscommon.path import Path from hscommon.path import Path
from hscommon.util import get_file_ext, format_size from hscommon.util import get_file_ext, format_size
from hscommon.gui.column import Column from hscommon.gui.column import Column
@ -17,6 +17,7 @@ from ..app import DupeGuru as DupeGuruBase
from ..gui.result_table import ResultTable as ResultTableBase from ..gui.result_table import ResultTable as ResultTableBase
from ..gui.prioritize_dialog import PrioritizeDialog from ..gui.prioritize_dialog import PrioritizeDialog
class DupeGuruView: class DupeGuruView:
JOB = nulljob JOB = nulljob
@ -39,28 +40,32 @@ class DupeGuruView:
self.messages.append(msg) self.messages.append(msg)
def ask_yes_no(self, prompt): def ask_yes_no(self, prompt):
return True # always answer yes return True # always answer yes
def create_results_window(self): def create_results_window(self):
pass pass
class ResultTable(ResultTableBase): class ResultTable(ResultTableBase):
COLUMNS = [ COLUMNS = [
Column('marked', ''), Column("marked", ""),
Column('name', 'Filename'), Column("name", "Filename"),
Column('folder_path', 'Directory'), Column("folder_path", "Directory"),
Column('size', 'Size (KB)'), Column("size", "Size (KB)"),
Column('extension', 'Kind'), Column("extension", "Kind"),
] ]
DELTA_COLUMNS = {'size', } DELTA_COLUMNS = {
"size",
}
class DupeGuru(DupeGuruBase): class DupeGuru(DupeGuruBase):
NAME = 'dupeGuru' NAME = "dupeGuru"
METADATA_TO_READ = ['size'] METADATA_TO_READ = ["size"]
def __init__(self): def __init__(self):
DupeGuruBase.__init__(self, DupeGuruView()) DupeGuruBase.__init__(self, DupeGuruView())
self.appdata = '/tmp' self.appdata = "/tmp"
self._recreate_result_table() self._recreate_result_table()
def _prioritization_categories(self): def _prioritization_categories(self):
@ -78,7 +83,7 @@ class NamedObject:
def __init__(self, name="foobar", with_words=False, size=1, folder=None): def __init__(self, name="foobar", with_words=False, size=1, folder=None):
self.name = name self.name = name
if folder is None: if folder is None:
folder = 'basepath' folder = "basepath"
self._folder = Path(folder) self._folder = Path(folder)
self.size = size self.size = size
self.md5partial = name self.md5partial = name
@ -88,7 +93,7 @@ class NamedObject:
self.is_ref = False self.is_ref = False
def __bool__(self): def __bool__(self):
return False #Make sure that operations are made correctly when the bool value of files is false. return False # Make sure that operations are made correctly when the bool value of files is false.
def get_display_info(self, group, delta): def get_display_info(self, group, delta):
size = self.size size = self.size
@ -97,10 +102,10 @@ class NamedObject:
r = group.ref r = group.ref
size -= r.size size -= r.size
return { return {
'name': self.name, "name": self.name,
'folder_path': str(self.folder_path), "folder_path": str(self.folder_path),
'size': format_size(size, 0, 1, False), "size": format_size(size, 0, 1, False),
'extension': self.extension if hasattr(self, 'extension') else '---', "extension": self.extension if hasattr(self, "extension") else "---",
} }
@property @property
@ -115,6 +120,7 @@ class NamedObject:
def extension(self): def extension(self):
return get_file_ext(self.name) return get_file_ext(self.name)
# Returns a group set that looks like that: # Returns a group set that looks like that:
# "foo bar" (1) # "foo bar" (1)
# "bar bleh" (1024) # "bar bleh" (1024)
@ -127,21 +133,24 @@ def GetTestGroups():
NamedObject("bar bleh"), NamedObject("bar bleh"),
NamedObject("foo bleh"), NamedObject("foo bleh"),
NamedObject("ibabtu"), NamedObject("ibabtu"),
NamedObject("ibabtu") NamedObject("ibabtu"),
] ]
objects[1].size = 1024 objects[1].size = 1024
matches = engine.getmatches(objects) #we should have 5 matches matches = engine.getmatches(objects) # we should have 5 matches
groups = engine.get_groups(matches) #We should have 2 groups groups = engine.get_groups(matches) # We should have 2 groups
for g in groups: for g in groups:
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is g.prioritize(
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first. lambda x: objects.index(x)
) # We want the dupes to be in the same order as the list is
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first.
return (objects, matches, groups) return (objects, matches, groups)
class TestApp(TestAppBase): class TestApp(TestAppBase):
def __init__(self): def __init__(self):
def link_gui(gui): def link_gui(gui):
gui.view = self.make_logger() gui.view = self.make_logger()
if hasattr(gui, 'columns'): # tables if hasattr(gui, "columns"): # tables
gui.columns.view = self.make_logger() gui.columns.view = self.make_logger()
return gui return gui
@ -166,7 +175,7 @@ class TestApp(TestAppBase):
# rtable is a property because its instance can be replaced during execution # rtable is a property because its instance can be replaced during execution
return self.app.result_table return self.app.result_table
#--- Helpers # --- Helpers
def select_pri_criterion(self, name): def select_pri_criterion(self, name):
# Select a main prioritize criterion by name instead of by index. Makes tests more # Select a main prioritize criterion by name instead of by index. Makes tests more
# maintainable. # maintainable.

View File

@ -13,13 +13,18 @@ try:
except ImportError: except ImportError:
skip("Can't import the block module, probably hasn't been compiled.") skip("Can't import the block module, probably hasn't been compiled.")
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
def my_avgdiff(
first, second, limit=768, min_iter=3
): # this is so I don't have to re-write every call
return avgdiff(first, second, limit, min_iter) return avgdiff(first, second, limit, min_iter)
BLACK = (0, 0, 0) BLACK = (0, 0, 0)
RED = (0xff, 0, 0) RED = (0xFF, 0, 0)
GREEN = (0, 0xff, 0) GREEN = (0, 0xFF, 0)
BLUE = (0, 0, 0xff) BLUE = (0, 0, 0xFF)
class FakeImage: class FakeImage:
def __init__(self, size, data): def __init__(self, size, data):
@ -37,16 +42,20 @@ class FakeImage:
pixels.append(pixel) pixels.append(pixel)
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels) return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
def empty(): def empty():
return FakeImage((0, 0), []) return FakeImage((0, 0), [])
def single_pixel(): #one red pixel
return FakeImage((1, 1), [(0xff, 0, 0)]) def single_pixel(): # one red pixel
return FakeImage((1, 1), [(0xFF, 0, 0)])
def four_pixels(): def four_pixels():
pixels = [RED, (0, 0x80, 0xff), (0x80, 0, 0), (0, 0x40, 0x80)] pixels = [RED, (0, 0x80, 0xFF), (0x80, 0, 0), (0, 0x40, 0x80)]
return FakeImage((2, 2), pixels) return FakeImage((2, 2), pixels)
class TestCasegetblock: class TestCasegetblock:
def test_single_pixel(self): def test_single_pixel(self):
im = single_pixel() im = single_pixel()
@ -60,9 +69,9 @@ class TestCasegetblock:
def test_four_pixels(self): def test_four_pixels(self):
im = four_pixels() im = four_pixels()
[b] = getblocks2(im, 1) [b] = getblocks2(im, 1)
meanred = (0xff + 0x80) // 4 meanred = (0xFF + 0x80) // 4
meangreen = (0x80 + 0x40) // 4 meangreen = (0x80 + 0x40) // 4
meanblue = (0xff + 0x80) // 4 meanblue = (0xFF + 0x80) // 4
eq_((meanred, meangreen, meanblue), b) eq_((meanred, meangreen, meanblue), b)
@ -158,6 +167,7 @@ class TestCasegetblock:
# eq_(BLACK, blocks[3]) # eq_(BLACK, blocks[3])
# #
class TestCasegetblocks2: class TestCasegetblocks2:
def test_empty_image(self): def test_empty_image(self):
im = empty() im = empty()
@ -169,9 +179,9 @@ class TestCasegetblocks2:
blocks = getblocks2(im, 1) blocks = getblocks2(im, 1)
eq_(1, len(blocks)) eq_(1, len(blocks))
block = blocks[0] block = blocks[0]
meanred = (0xff + 0x80) // 4 meanred = (0xFF + 0x80) // 4
meangreen = (0x80 + 0x40) // 4 meangreen = (0x80 + 0x40) // 4
meanblue = (0xff + 0x80) // 4 meanblue = (0xFF + 0x80) // 4
eq_((meanred, meangreen, meanblue), block) eq_((meanred, meangreen, meanblue), block)
def test_four_blocks_all_black(self): def test_four_blocks_all_black(self):
@ -225,25 +235,25 @@ class TestCaseavgdiff:
my_avgdiff([b, b], [b]) my_avgdiff([b, b], [b])
def test_first_arg_is_empty_but_not_second(self): def test_first_arg_is_empty_but_not_second(self):
#Don't return 0 (as when the 2 lists are empty), raise! # Don't return 0 (as when the 2 lists are empty), raise!
b = (0, 0, 0) b = (0, 0, 0)
with raises(DifferentBlockCountError): with raises(DifferentBlockCountError):
my_avgdiff([], [b]) my_avgdiff([], [b])
def test_limit(self): def test_limit(self):
ref = (0, 0, 0) ref = (0, 0, 0)
b1 = (10, 10, 10) #avg 30 b1 = (10, 10, 10) # avg 30
b2 = (20, 20, 20) #avg 45 b2 = (20, 20, 20) # avg 45
b3 = (30, 30, 30) #avg 60 b3 = (30, 30, 30) # avg 60
blocks1 = [ref, ref, ref] blocks1 = [ref, ref, ref]
blocks2 = [b1, b2, b3] blocks2 = [b1, b2, b3]
eq_(45, my_avgdiff(blocks1, blocks2, 44)) eq_(45, my_avgdiff(blocks1, blocks2, 44))
def test_min_iterations(self): def test_min_iterations(self):
ref = (0, 0, 0) ref = (0, 0, 0)
b1 = (10, 10, 10) #avg 30 b1 = (10, 10, 10) # avg 30
b2 = (20, 20, 20) #avg 45 b2 = (20, 20, 20) # avg 45
b3 = (10, 10, 10) #avg 40 b3 = (10, 10, 10) # avg 40
blocks1 = [ref, ref, ref] blocks1 = [ref, ref, ref]
blocks2 = [b1, b2, b3] blocks2 = [b1, b2, b3]
eq_(40, my_avgdiff(blocks1, blocks2, 45 - 1, 3)) eq_(40, my_avgdiff(blocks1, blocks2, 45 - 1, 3))

View File

@ -16,34 +16,35 @@ try:
except ImportError: except ImportError:
skip("Can't import the cache module, probably hasn't been compiled.") skip("Can't import the cache module, probably hasn't been compiled.")
class TestCasecolors_to_string: class TestCasecolors_to_string:
def test_no_color(self): def test_no_color(self):
eq_('', colors_to_string([])) eq_("", colors_to_string([]))
def test_single_color(self): def test_single_color(self):
eq_('000000', colors_to_string([(0, 0, 0)])) eq_("000000", colors_to_string([(0, 0, 0)]))
eq_('010101', colors_to_string([(1, 1, 1)])) eq_("010101", colors_to_string([(1, 1, 1)]))
eq_('0a141e', colors_to_string([(10, 20, 30)])) eq_("0a141e", colors_to_string([(10, 20, 30)]))
def test_two_colors(self): def test_two_colors(self):
eq_('000102030405', colors_to_string([(0, 1, 2), (3, 4, 5)])) eq_("000102030405", colors_to_string([(0, 1, 2), (3, 4, 5)]))
class TestCasestring_to_colors: class TestCasestring_to_colors:
def test_empty(self): def test_empty(self):
eq_([], string_to_colors('')) eq_([], string_to_colors(""))
def test_single_color(self): def test_single_color(self):
eq_([(0, 0, 0)], string_to_colors('000000')) eq_([(0, 0, 0)], string_to_colors("000000"))
eq_([(2, 3, 4)], string_to_colors('020304')) eq_([(2, 3, 4)], string_to_colors("020304"))
eq_([(10, 20, 30)], string_to_colors('0a141e')) eq_([(10, 20, 30)], string_to_colors("0a141e"))
def test_two_colors(self): def test_two_colors(self):
eq_([(10, 20, 30), (40, 50, 60)], string_to_colors('0a141e28323c')) eq_([(10, 20, 30), (40, 50, 60)], string_to_colors("0a141e28323c"))
def test_incomplete_color(self): def test_incomplete_color(self):
# don't return anything if it's not a complete color # don't return anything if it's not a complete color
eq_([], string_to_colors('102')) eq_([], string_to_colors("102"))
class BaseTestCaseCache: class BaseTestCaseCache:
@ -54,58 +55,58 @@ class BaseTestCaseCache:
c = self.get_cache() c = self.get_cache()
eq_(0, len(c)) eq_(0, len(c))
with raises(KeyError): with raises(KeyError):
c['foo'] c["foo"]
def test_set_then_retrieve_blocks(self): def test_set_then_retrieve_blocks(self):
c = self.get_cache() c = self.get_cache()
b = [(0, 0, 0), (1, 2, 3)] b = [(0, 0, 0), (1, 2, 3)]
c['foo'] = b c["foo"] = b
eq_(b, c['foo']) eq_(b, c["foo"])
def test_delitem(self): def test_delitem(self):
c = self.get_cache() c = self.get_cache()
c['foo'] = '' c["foo"] = ""
del c['foo'] del c["foo"]
assert 'foo' not in c assert "foo" not in c
with raises(KeyError): with raises(KeyError):
del c['foo'] del c["foo"]
def test_persistance(self, tmpdir): def test_persistance(self, tmpdir):
DBNAME = tmpdir.join('hstest.db') DBNAME = tmpdir.join("hstest.db")
c = self.get_cache(str(DBNAME)) c = self.get_cache(str(DBNAME))
c['foo'] = [(1, 2, 3)] c["foo"] = [(1, 2, 3)]
del c del c
c = self.get_cache(str(DBNAME)) c = self.get_cache(str(DBNAME))
eq_([(1, 2, 3)], c['foo']) eq_([(1, 2, 3)], c["foo"])
def test_filter(self): def test_filter(self):
c = self.get_cache() c = self.get_cache()
c['foo'] = '' c["foo"] = ""
c['bar'] = '' c["bar"] = ""
c['baz'] = '' c["baz"] = ""
c.filter(lambda p: p != 'bar') #only 'bar' is removed c.filter(lambda p: p != "bar") # only 'bar' is removed
eq_(2, len(c)) eq_(2, len(c))
assert 'foo' in c assert "foo" in c
assert 'baz' in c assert "baz" in c
assert 'bar' not in c assert "bar" not in c
def test_clear(self): def test_clear(self):
c = self.get_cache() c = self.get_cache()
c['foo'] = '' c["foo"] = ""
c['bar'] = '' c["bar"] = ""
c['baz'] = '' c["baz"] = ""
c.clear() c.clear()
eq_(0, len(c)) eq_(0, len(c))
assert 'foo' not in c assert "foo" not in c
assert 'baz' not in c assert "baz" not in c
assert 'bar' not in c assert "bar" not in c
def test_by_id(self): def test_by_id(self):
# it's possible to use the cache by referring to the files by their row_id # it's possible to use the cache by referring to the files by their row_id
c = self.get_cache() c = self.get_cache()
b = [(0, 0, 0), (1, 2, 3)] b = [(0, 0, 0), (1, 2, 3)]
c['foo'] = b c["foo"] = b
foo_id = c.get_id('foo') foo_id = c.get_id("foo")
eq_(c[foo_id], b) eq_(c[foo_id], b)
@ -120,16 +121,16 @@ class TestCaseSqliteCache(BaseTestCaseCache):
# If we don't do this monkeypatching, we get a weird exception about trying to flush a # If we don't do this monkeypatching, we get a weird exception about trying to flush a
# closed file. I've tried setting logging level and stuff, but nothing worked. So, there we # closed file. I've tried setting logging level and stuff, but nothing worked. So, there we
# go, a dirty monkeypatch. # go, a dirty monkeypatch.
monkeypatch.setattr(logging, 'warning', lambda *args, **kw: None) monkeypatch.setattr(logging, "warning", lambda *args, **kw: None)
dbname = str(tmpdir.join('foo.db')) dbname = str(tmpdir.join("foo.db"))
fp = open(dbname, 'w') fp = open(dbname, "w")
fp.write('invalid sqlite content') fp.write("invalid sqlite content")
fp.close() fp.close()
c = self.get_cache(dbname) # should not raise a DatabaseError c = self.get_cache(dbname) # should not raise a DatabaseError
c['foo'] = [(1, 2, 3)] c["foo"] = [(1, 2, 3)]
del c del c
c = self.get_cache(dbname) c = self.get_cache(dbname)
eq_(c['foo'], [(1, 2, 3)]) eq_(c["foo"], [(1, 2, 3)])
class TestCaseShelveCache(BaseTestCaseCache): class TestCaseShelveCache(BaseTestCaseCache):
@ -161,4 +162,3 @@ class TestCaseCacheSQLEscape:
del c["foo'bar"] del c["foo'bar"]
except KeyError: except KeyError:
assert False assert False

View File

@ -1 +1 @@
from hscommon.testutil import pytest_funcarg__app # noqa from hscommon.testutil import pytest_funcarg__app # noqa

View File

@ -14,91 +14,105 @@ from hscommon.path import Path
from hscommon.testutil import eq_ from hscommon.testutil import eq_
from ..fs import File from ..fs import File
from ..directories import Directories, DirectoryState, AlreadyThereError, InvalidPathError from ..directories import (
Directories,
DirectoryState,
AlreadyThereError,
InvalidPathError,
)
def create_fake_fs(rootpath): def create_fake_fs(rootpath):
# We have it as a separate function because other units are using it. # We have it as a separate function because other units are using it.
rootpath = rootpath['fs'] rootpath = rootpath["fs"]
rootpath.mkdir() rootpath.mkdir()
rootpath['dir1'].mkdir() rootpath["dir1"].mkdir()
rootpath['dir2'].mkdir() rootpath["dir2"].mkdir()
rootpath['dir3'].mkdir() rootpath["dir3"].mkdir()
fp = rootpath['file1.test'].open('w') fp = rootpath["file1.test"].open("w")
fp.write('1') fp.write("1")
fp.close() fp.close()
fp = rootpath['file2.test'].open('w') fp = rootpath["file2.test"].open("w")
fp.write('12') fp.write("12")
fp.close() fp.close()
fp = rootpath['file3.test'].open('w') fp = rootpath["file3.test"].open("w")
fp.write('123') fp.write("123")
fp.close() fp.close()
fp = rootpath['dir1']['file1.test'].open('w') fp = rootpath["dir1"]["file1.test"].open("w")
fp.write('1') fp.write("1")
fp.close() fp.close()
fp = rootpath['dir2']['file2.test'].open('w') fp = rootpath["dir2"]["file2.test"].open("w")
fp.write('12') fp.write("12")
fp.close() fp.close()
fp = rootpath['dir3']['file3.test'].open('w') fp = rootpath["dir3"]["file3.test"].open("w")
fp.write('123') fp.write("123")
fp.close() fp.close()
return rootpath return rootpath
testpath = None testpath = None
def setup_module(module): def setup_module(module):
# In this unit, we have tests depending on two directory structure. One with only one file in it # In this unit, we have tests depending on two directory structure. One with only one file in it
# and another with a more complex structure. # and another with a more complex structure.
testpath = Path(tempfile.mkdtemp()) testpath = Path(tempfile.mkdtemp())
module.testpath = testpath module.testpath = testpath
rootpath = testpath['onefile'] rootpath = testpath["onefile"]
rootpath.mkdir() rootpath.mkdir()
fp = rootpath['test.txt'].open('w') fp = rootpath["test.txt"].open("w")
fp.write('test_data') fp.write("test_data")
fp.close() fp.close()
create_fake_fs(testpath) create_fake_fs(testpath)
def teardown_module(module): def teardown_module(module):
shutil.rmtree(str(module.testpath)) shutil.rmtree(str(module.testpath))
def test_empty(): def test_empty():
d = Directories() d = Directories()
eq_(len(d), 0) eq_(len(d), 0)
assert 'foobar' not in d assert "foobar" not in d
def test_add_path(): def test_add_path():
d = Directories() d = Directories()
p = testpath['onefile'] p = testpath["onefile"]
d.add_path(p) d.add_path(p)
eq_(1, len(d)) eq_(1, len(d))
assert p in d assert p in d
assert (p['foobar']) in d assert (p["foobar"]) in d
assert p.parent() not in d assert p.parent() not in d
p = testpath['fs'] p = testpath["fs"]
d.add_path(p) d.add_path(p)
eq_(2, len(d)) eq_(2, len(d))
assert p in d assert p in d
def test_AddPath_when_path_is_already_there(): def test_AddPath_when_path_is_already_there():
d = Directories() d = Directories()
p = testpath['onefile'] p = testpath["onefile"]
d.add_path(p) d.add_path(p)
with raises(AlreadyThereError): with raises(AlreadyThereError):
d.add_path(p) d.add_path(p)
with raises(AlreadyThereError): with raises(AlreadyThereError):
d.add_path(p['foobar']) d.add_path(p["foobar"])
eq_(1, len(d)) eq_(1, len(d))
def test_add_path_containing_paths_already_there(): def test_add_path_containing_paths_already_there():
d = Directories() d = Directories()
d.add_path(testpath['onefile']) d.add_path(testpath["onefile"])
eq_(1, len(d)) eq_(1, len(d))
d.add_path(testpath) d.add_path(testpath)
eq_(len(d), 1) eq_(len(d), 1)
eq_(d[0], testpath) eq_(d[0], testpath)
def test_AddPath_non_latin(tmpdir): def test_AddPath_non_latin(tmpdir):
p = Path(str(tmpdir)) p = Path(str(tmpdir))
to_add = p['unicode\u201a'] to_add = p["unicode\u201a"]
os.mkdir(str(to_add)) os.mkdir(str(to_add))
d = Directories() d = Directories()
try: try:
@ -106,63 +120,69 @@ def test_AddPath_non_latin(tmpdir):
except UnicodeDecodeError: except UnicodeDecodeError:
assert False assert False
def test_del(): def test_del():
d = Directories() d = Directories()
d.add_path(testpath['onefile']) d.add_path(testpath["onefile"])
try: try:
del d[1] del d[1]
assert False assert False
except IndexError: except IndexError:
pass pass
d.add_path(testpath['fs']) d.add_path(testpath["fs"])
del d[1] del d[1]
eq_(1, len(d)) eq_(1, len(d))
def test_states(): def test_states():
d = Directories() d = Directories()
p = testpath['onefile'] p = testpath["onefile"]
d.add_path(p) d.add_path(p)
eq_(DirectoryState.Normal, d.get_state(p)) eq_(DirectoryState.Normal, d.get_state(p))
d.set_state(p, DirectoryState.Reference) d.set_state(p, DirectoryState.Reference)
eq_(DirectoryState.Reference, d.get_state(p)) eq_(DirectoryState.Reference, d.get_state(p))
eq_(DirectoryState.Reference, d.get_state(p['dir1'])) eq_(DirectoryState.Reference, d.get_state(p["dir1"]))
eq_(1, len(d.states)) eq_(1, len(d.states))
eq_(p, list(d.states.keys())[0]) eq_(p, list(d.states.keys())[0])
eq_(DirectoryState.Reference, d.states[p]) eq_(DirectoryState.Reference, d.states[p])
def test_get_state_with_path_not_there(): def test_get_state_with_path_not_there():
# When the path's not there, just return DirectoryState.Normal # When the path's not there, just return DirectoryState.Normal
d = Directories() d = Directories()
d.add_path(testpath['onefile']) d.add_path(testpath["onefile"])
eq_(d.get_state(testpath), DirectoryState.Normal) eq_(d.get_state(testpath), DirectoryState.Normal)
def test_states_overwritten_when_larger_directory_eat_smaller_ones(): def test_states_overwritten_when_larger_directory_eat_smaller_ones():
# ref #248 # ref #248
# When setting the state of a folder, we overwrite previously set states for subfolders. # When setting the state of a folder, we overwrite previously set states for subfolders.
d = Directories() d = Directories()
p = testpath['onefile'] p = testpath["onefile"]
d.add_path(p) d.add_path(p)
d.set_state(p, DirectoryState.Excluded) d.set_state(p, DirectoryState.Excluded)
d.add_path(testpath) d.add_path(testpath)
d.set_state(testpath, DirectoryState.Reference) d.set_state(testpath, DirectoryState.Reference)
eq_(d.get_state(p), DirectoryState.Reference) eq_(d.get_state(p), DirectoryState.Reference)
eq_(d.get_state(p['dir1']), DirectoryState.Reference) eq_(d.get_state(p["dir1"]), DirectoryState.Reference)
eq_(d.get_state(testpath), DirectoryState.Reference) eq_(d.get_state(testpath), DirectoryState.Reference)
def test_get_files(): def test_get_files():
d = Directories() d = Directories()
p = testpath['fs'] p = testpath["fs"]
d.add_path(p) d.add_path(p)
d.set_state(p['dir1'], DirectoryState.Reference) d.set_state(p["dir1"], DirectoryState.Reference)
d.set_state(p['dir2'], DirectoryState.Excluded) d.set_state(p["dir2"], DirectoryState.Excluded)
files = list(d.get_files()) files = list(d.get_files())
eq_(5, len(files)) eq_(5, len(files))
for f in files: for f in files:
if f.path.parent() == p['dir1']: if f.path.parent() == p["dir1"]:
assert f.is_ref assert f.is_ref
else: else:
assert not f.is_ref assert not f.is_ref
def test_get_files_with_folders(): def test_get_files_with_folders():
# When fileclasses handle folders, return them and stop recursing! # When fileclasses handle folders, return them and stop recursing!
class FakeFile(File): class FakeFile(File):
@ -171,106 +191,115 @@ def test_get_files_with_folders():
return True return True
d = Directories() d = Directories()
p = testpath['fs'] p = testpath["fs"]
d.add_path(p) d.add_path(p)
files = list(d.get_files(fileclasses=[FakeFile])) files = list(d.get_files(fileclasses=[FakeFile]))
# We have the 3 root files and the 3 root dirs # We have the 3 root files and the 3 root dirs
eq_(6, len(files)) eq_(6, len(files))
def test_get_folders(): def test_get_folders():
d = Directories() d = Directories()
p = testpath['fs'] p = testpath["fs"]
d.add_path(p) d.add_path(p)
d.set_state(p['dir1'], DirectoryState.Reference) d.set_state(p["dir1"], DirectoryState.Reference)
d.set_state(p['dir2'], DirectoryState.Excluded) d.set_state(p["dir2"], DirectoryState.Excluded)
folders = list(d.get_folders()) folders = list(d.get_folders())
eq_(len(folders), 3) eq_(len(folders), 3)
ref = [f for f in folders if f.is_ref] ref = [f for f in folders if f.is_ref]
not_ref = [f for f in folders if not f.is_ref] not_ref = [f for f in folders if not f.is_ref]
eq_(len(ref), 1) eq_(len(ref), 1)
eq_(ref[0].path, p['dir1']) eq_(ref[0].path, p["dir1"])
eq_(len(not_ref), 2) eq_(len(not_ref), 2)
eq_(ref[0].size, 1) eq_(ref[0].size, 1)
def test_get_files_with_inherited_exclusion(): def test_get_files_with_inherited_exclusion():
d = Directories() d = Directories()
p = testpath['onefile'] p = testpath["onefile"]
d.add_path(p) d.add_path(p)
d.set_state(p, DirectoryState.Excluded) d.set_state(p, DirectoryState.Excluded)
eq_([], list(d.get_files())) eq_([], list(d.get_files()))
def test_save_and_load(tmpdir): def test_save_and_load(tmpdir):
d1 = Directories() d1 = Directories()
d2 = Directories() d2 = Directories()
p1 = Path(str(tmpdir.join('p1'))) p1 = Path(str(tmpdir.join("p1")))
p1.mkdir() p1.mkdir()
p2 = Path(str(tmpdir.join('p2'))) p2 = Path(str(tmpdir.join("p2")))
p2.mkdir() p2.mkdir()
d1.add_path(p1) d1.add_path(p1)
d1.add_path(p2) d1.add_path(p2)
d1.set_state(p1, DirectoryState.Reference) d1.set_state(p1, DirectoryState.Reference)
d1.set_state(p1['dir1'], DirectoryState.Excluded) d1.set_state(p1["dir1"], DirectoryState.Excluded)
tmpxml = str(tmpdir.join('directories_testunit.xml')) tmpxml = str(tmpdir.join("directories_testunit.xml"))
d1.save_to_file(tmpxml) d1.save_to_file(tmpxml)
d2.load_from_file(tmpxml) d2.load_from_file(tmpxml)
eq_(2, len(d2)) eq_(2, len(d2))
eq_(DirectoryState.Reference, d2.get_state(p1)) eq_(DirectoryState.Reference, d2.get_state(p1))
eq_(DirectoryState.Excluded, d2.get_state(p1['dir1'])) eq_(DirectoryState.Excluded, d2.get_state(p1["dir1"]))
def test_invalid_path(): def test_invalid_path():
d = Directories() d = Directories()
p = Path('does_not_exist') p = Path("does_not_exist")
with raises(InvalidPathError): with raises(InvalidPathError):
d.add_path(p) d.add_path(p)
eq_(0, len(d)) eq_(0, len(d))
def test_set_state_on_invalid_path(): def test_set_state_on_invalid_path():
d = Directories() d = Directories()
try: try:
d.set_state(Path('foobar',), DirectoryState.Normal) d.set_state(Path("foobar",), DirectoryState.Normal)
except LookupError: except LookupError:
assert False assert False
def test_load_from_file_with_invalid_path(tmpdir): def test_load_from_file_with_invalid_path(tmpdir):
#This test simulates a load from file resulting in a # This test simulates a load from file resulting in a
#InvalidPath raise. Other directories must be loaded. # InvalidPath raise. Other directories must be loaded.
d1 = Directories() d1 = Directories()
d1.add_path(testpath['onefile']) d1.add_path(testpath["onefile"])
#Will raise InvalidPath upon loading # Will raise InvalidPath upon loading
p = Path(str(tmpdir.join('toremove'))) p = Path(str(tmpdir.join("toremove")))
p.mkdir() p.mkdir()
d1.add_path(p) d1.add_path(p)
p.rmdir() p.rmdir()
tmpxml = str(tmpdir.join('directories_testunit.xml')) tmpxml = str(tmpdir.join("directories_testunit.xml"))
d1.save_to_file(tmpxml) d1.save_to_file(tmpxml)
d2 = Directories() d2 = Directories()
d2.load_from_file(tmpxml) d2.load_from_file(tmpxml)
eq_(1, len(d2)) eq_(1, len(d2))
def test_unicode_save(tmpdir): def test_unicode_save(tmpdir):
d = Directories() d = Directories()
p1 = Path(str(tmpdir))['hello\xe9'] p1 = Path(str(tmpdir))["hello\xe9"]
p1.mkdir() p1.mkdir()
p1['foo\xe9'].mkdir() p1["foo\xe9"].mkdir()
d.add_path(p1) d.add_path(p1)
d.set_state(p1['foo\xe9'], DirectoryState.Excluded) d.set_state(p1["foo\xe9"], DirectoryState.Excluded)
tmpxml = str(tmpdir.join('directories_testunit.xml')) tmpxml = str(tmpdir.join("directories_testunit.xml"))
try: try:
d.save_to_file(tmpxml) d.save_to_file(tmpxml)
except UnicodeDecodeError: except UnicodeDecodeError:
assert False assert False
def test_get_files_refreshes_its_directories(): def test_get_files_refreshes_its_directories():
d = Directories() d = Directories()
p = testpath['fs'] p = testpath["fs"]
d.add_path(p) d.add_path(p)
files = d.get_files() files = d.get_files()
eq_(6, len(list(files))) eq_(6, len(list(files)))
time.sleep(1) time.sleep(1)
os.remove(str(p['dir1']['file1.test'])) os.remove(str(p["dir1"]["file1.test"]))
files = d.get_files() files = d.get_files()
eq_(5, len(list(files))) eq_(5, len(list(files)))
def test_get_files_does_not_choke_on_non_existing_directories(tmpdir): def test_get_files_does_not_choke_on_non_existing_directories(tmpdir):
d = Directories() d = Directories()
p = Path(str(tmpdir)) p = Path(str(tmpdir))
@ -278,36 +307,37 @@ def test_get_files_does_not_choke_on_non_existing_directories(tmpdir):
p.rmtree() p.rmtree()
eq_([], list(d.get_files())) eq_([], list(d.get_files()))
def test_get_state_returns_excluded_by_default_for_hidden_directories(tmpdir): def test_get_state_returns_excluded_by_default_for_hidden_directories(tmpdir):
d = Directories() d = Directories()
p = Path(str(tmpdir)) p = Path(str(tmpdir))
hidden_dir_path = p['.foo'] hidden_dir_path = p[".foo"]
p['.foo'].mkdir() p[".foo"].mkdir()
d.add_path(p) d.add_path(p)
eq_(d.get_state(hidden_dir_path), DirectoryState.Excluded) eq_(d.get_state(hidden_dir_path), DirectoryState.Excluded)
# But it can be overriden # But it can be overriden
d.set_state(hidden_dir_path, DirectoryState.Normal) d.set_state(hidden_dir_path, DirectoryState.Normal)
eq_(d.get_state(hidden_dir_path), DirectoryState.Normal) eq_(d.get_state(hidden_dir_path), DirectoryState.Normal)
def test_default_path_state_override(tmpdir): def test_default_path_state_override(tmpdir):
# It's possible for a subclass to override the default state of a path # It's possible for a subclass to override the default state of a path
class MyDirectories(Directories): class MyDirectories(Directories):
def _default_state_for_path(self, path): def _default_state_for_path(self, path):
if 'foobar' in path: if "foobar" in path:
return DirectoryState.Excluded return DirectoryState.Excluded
d = MyDirectories() d = MyDirectories()
p1 = Path(str(tmpdir)) p1 = Path(str(tmpdir))
p1['foobar'].mkdir() p1["foobar"].mkdir()
p1['foobar/somefile'].open('w').close() p1["foobar/somefile"].open("w").close()
p1['foobaz'].mkdir() p1["foobaz"].mkdir()
p1['foobaz/somefile'].open('w').close() p1["foobaz/somefile"].open("w").close()
d.add_path(p1) d.add_path(p1)
eq_(d.get_state(p1['foobaz']), DirectoryState.Normal) eq_(d.get_state(p1["foobaz"]), DirectoryState.Normal)
eq_(d.get_state(p1['foobar']), DirectoryState.Excluded) eq_(d.get_state(p1["foobar"]), DirectoryState.Excluded)
eq_(len(list(d.get_files())), 1) # only the 'foobaz' file is there eq_(len(list(d.get_files())), 1) # only the 'foobaz' file is there
# However, the default state can be changed # However, the default state can be changed
d.set_state(p1['foobar'], DirectoryState.Normal) d.set_state(p1["foobar"], DirectoryState.Normal)
eq_(d.get_state(p1['foobar']), DirectoryState.Normal) eq_(d.get_state(p1["foobar"]), DirectoryState.Normal)
eq_(len(list(d.get_files())), 2) eq_(len(list(d.get_files())), 2)

View File

@ -13,13 +13,28 @@ from hscommon.testutil import eq_, log_calls
from .base import NamedObject from .base import NamedObject
from .. import engine from .. import engine
from ..engine import ( from ..engine import (
get_match, getwords, Group, getfields, unpack_fields, compare_fields, compare, WEIGHT_WORDS, get_match,
MATCH_SIMILAR_WORDS, NO_FIELD_ORDER, build_word_dict, get_groups, getmatches, Match, getwords,
getmatches_by_contents, merge_similar_words, reduce_common_words Group,
getfields,
unpack_fields,
compare_fields,
compare,
WEIGHT_WORDS,
MATCH_SIMILAR_WORDS,
NO_FIELD_ORDER,
build_word_dict,
get_groups,
getmatches,
Match,
getmatches_by_contents,
merge_similar_words,
reduce_common_words,
) )
no = NamedObject no = NamedObject
def get_match_triangle(): def get_match_triangle():
o1 = NamedObject(with_words=True) o1 = NamedObject(with_words=True)
o2 = NamedObject(with_words=True) o2 = NamedObject(with_words=True)
@ -29,6 +44,7 @@ def get_match_triangle():
m3 = get_match(o2, o3) m3 = get_match(o2, o3)
return [m1, m2, m3] return [m1, m2, m3]
def get_test_group(): def get_test_group():
m1, m2, m3 = get_match_triangle() m1, m2, m3 = get_match_triangle()
result = Group() result = Group()
@ -37,6 +53,7 @@ def get_test_group():
result.add_match(m3) result.add_match(m3)
return result return result
def assert_match(m, name1, name2): def assert_match(m, name1, name2):
# When testing matches, whether objects are in first or second position very often doesn't # When testing matches, whether objects are in first or second position very often doesn't
# matter. This function makes this test more convenient. # matter. This function makes this test more convenient.
@ -46,53 +63,54 @@ def assert_match(m, name1, name2):
eq_(m.first.name, name2) eq_(m.first.name, name2)
eq_(m.second.name, name1) eq_(m.second.name, name1)
class TestCasegetwords: class TestCasegetwords:
def test_spaces(self): def test_spaces(self):
eq_(['a', 'b', 'c', 'd'], getwords("a b c d")) eq_(["a", "b", "c", "d"], getwords("a b c d"))
eq_(['a', 'b', 'c', 'd'], getwords(" a b c d ")) eq_(["a", "b", "c", "d"], getwords(" a b c d "))
def test_splitter_chars(self): def test_splitter_chars(self):
eq_( eq_(
[chr(i) for i in range(ord('a'), ord('z')+1)], [chr(i) for i in range(ord("a"), ord("z") + 1)],
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z") getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z"),
) )
def test_joiner_chars(self): def test_joiner_chars(self):
eq_(["aec"], getwords("a'e\u0301c")) eq_(["aec"], getwords("a'e\u0301c"))
def test_empty(self): def test_empty(self):
eq_([], getwords('')) eq_([], getwords(""))
def test_returns_lowercase(self): def test_returns_lowercase(self):
eq_(['foo', 'bar'], getwords('FOO BAR')) eq_(["foo", "bar"], getwords("FOO BAR"))
def test_decompose_unicode(self): def test_decompose_unicode(self):
eq_(getwords('foo\xe9bar'), ['fooebar']) eq_(getwords("foo\xe9bar"), ["fooebar"])
class TestCasegetfields: class TestCasegetfields:
def test_simple(self): def test_simple(self):
eq_([['a', 'b'], ['c', 'd', 'e']], getfields('a b - c d e')) eq_([["a", "b"], ["c", "d", "e"]], getfields("a b - c d e"))
def test_empty(self): def test_empty(self):
eq_([], getfields('')) eq_([], getfields(""))
def test_cleans_empty_fields(self): def test_cleans_empty_fields(self):
expected = [['a', 'bc', 'def']] expected = [["a", "bc", "def"]]
actual = getfields(' - a bc def') actual = getfields(" - a bc def")
eq_(expected, actual) eq_(expected, actual)
expected = [['bc', 'def']] expected = [["bc", "def"]]
class TestCaseunpack_fields: class TestCaseunpack_fields:
def test_with_fields(self): def test_with_fields(self):
expected = ['a', 'b', 'c', 'd', 'e', 'f'] expected = ["a", "b", "c", "d", "e", "f"]
actual = unpack_fields([['a'], ['b', 'c'], ['d', 'e', 'f']]) actual = unpack_fields([["a"], ["b", "c"], ["d", "e", "f"]])
eq_(expected, actual) eq_(expected, actual)
def test_without_fields(self): def test_without_fields(self):
expected = ['a', 'b', 'c', 'd', 'e', 'f'] expected = ["a", "b", "c", "d", "e", "f"]
actual = unpack_fields(['a', 'b', 'c', 'd', 'e', 'f']) actual = unpack_fields(["a", "b", "c", "d", "e", "f"])
eq_(expected, actual) eq_(expected, actual)
def test_empty(self): def test_empty(self):
@ -101,134 +119,151 @@ class TestCaseunpack_fields:
class TestCaseWordCompare: class TestCaseWordCompare:
def test_list(self): def test_list(self):
eq_(100, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'])) eq_(100, compare(["a", "b", "c", "d"], ["a", "b", "c", "d"]))
eq_(86, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c'])) eq_(86, compare(["a", "b", "c", "d"], ["a", "b", "c"]))
def test_unordered(self): def test_unordered(self):
#Sometimes, users don't want fuzzy matching too much When they set the slider # Sometimes, users don't want fuzzy matching too much When they set the slider
#to 100, they don't expect a filename with the same words, but not the same order, to match. # to 100, they don't expect a filename with the same words, but not the same order, to match.
#Thus, we want to return 99 in that case. # Thus, we want to return 99 in that case.
eq_(99, compare(['a', 'b', 'c', 'd'], ['d', 'b', 'c', 'a'])) eq_(99, compare(["a", "b", "c", "d"], ["d", "b", "c", "a"]))
def test_word_occurs_twice(self): def test_word_occurs_twice(self):
#if a word occurs twice in first, but once in second, we want the word to be only counted once # if a word occurs twice in first, but once in second, we want the word to be only counted once
eq_(89, compare(['a', 'b', 'c', 'd', 'a'], ['d', 'b', 'c', 'a'])) eq_(89, compare(["a", "b", "c", "d", "a"], ["d", "b", "c", "a"]))
def test_uses_copy_of_lists(self): def test_uses_copy_of_lists(self):
first = ['foo', 'bar'] first = ["foo", "bar"]
second = ['bar', 'bleh'] second = ["bar", "bleh"]
compare(first, second) compare(first, second)
eq_(['foo', 'bar'], first) eq_(["foo", "bar"], first)
eq_(['bar', 'bleh'], second) eq_(["bar", "bleh"], second)
def test_word_weight(self): def test_word_weight(self):
eq_(int((6.0 / 13.0) * 100), compare(['foo', 'bar'], ['bar', 'bleh'], (WEIGHT_WORDS, ))) eq_(
int((6.0 / 13.0) * 100),
compare(["foo", "bar"], ["bar", "bleh"], (WEIGHT_WORDS,)),
)
def test_similar_words(self): def test_similar_words(self):
eq_(100, compare(['the', 'white', 'stripes'], ['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, ))) eq_(
100,
compare(
["the", "white", "stripes"],
["the", "whites", "stripe"],
(MATCH_SIMILAR_WORDS,),
),
)
def test_empty(self): def test_empty(self):
eq_(0, compare([], [])) eq_(0, compare([], []))
def test_with_fields(self): def test_with_fields(self):
eq_(67, compare([['a', 'b'], ['c', 'd', 'e']], [['a', 'b'], ['c', 'd', 'f']])) eq_(67, compare([["a", "b"], ["c", "d", "e"]], [["a", "b"], ["c", "d", "f"]]))
def test_propagate_flags_with_fields(self, monkeypatch): def test_propagate_flags_with_fields(self, monkeypatch):
def mock_compare(first, second, flags): def mock_compare(first, second, flags):
eq_((0, 1, 2, 3, 5), flags) eq_((0, 1, 2, 3, 5), flags)
monkeypatch.setattr(engine, 'compare_fields', mock_compare) monkeypatch.setattr(engine, "compare_fields", mock_compare)
compare([['a']], [['a']], (0, 1, 2, 3, 5)) compare([["a"]], [["a"]], (0, 1, 2, 3, 5))
class TestCaseWordCompareWithFields: class TestCaseWordCompareWithFields:
def test_simple(self): def test_simple(self):
eq_(67, compare_fields([['a', 'b'], ['c', 'd', 'e']], [['a', 'b'], ['c', 'd', 'f']])) eq_(
67,
compare_fields(
[["a", "b"], ["c", "d", "e"]], [["a", "b"], ["c", "d", "f"]]
),
)
def test_empty(self): def test_empty(self):
eq_(0, compare_fields([], [])) eq_(0, compare_fields([], []))
def test_different_length(self): def test_different_length(self):
eq_(0, compare_fields([['a'], ['b']], [['a'], ['b'], ['c']])) eq_(0, compare_fields([["a"], ["b"]], [["a"], ["b"], ["c"]]))
def test_propagates_flags(self, monkeypatch): def test_propagates_flags(self, monkeypatch):
def mock_compare(first, second, flags): def mock_compare(first, second, flags):
eq_((0, 1, 2, 3, 5), flags) eq_((0, 1, 2, 3, 5), flags)
monkeypatch.setattr(engine, 'compare_fields', mock_compare) monkeypatch.setattr(engine, "compare_fields", mock_compare)
compare_fields([['a']], [['a']], (0, 1, 2, 3, 5)) compare_fields([["a"]], [["a"]], (0, 1, 2, 3, 5))
def test_order(self): def test_order(self):
first = [['a', 'b'], ['c', 'd', 'e']] first = [["a", "b"], ["c", "d", "e"]]
second = [['c', 'd', 'f'], ['a', 'b']] second = [["c", "d", "f"], ["a", "b"]]
eq_(0, compare_fields(first, second)) eq_(0, compare_fields(first, second))
def test_no_order(self): def test_no_order(self):
first = [['a', 'b'], ['c', 'd', 'e']] first = [["a", "b"], ["c", "d", "e"]]
second = [['c', 'd', 'f'], ['a', 'b']] second = [["c", "d", "f"], ["a", "b"]]
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, ))) eq_(67, compare_fields(first, second, (NO_FIELD_ORDER,)))
first = [['a', 'b'], ['a', 'b']] #a field can only be matched once. first = [["a", "b"], ["a", "b"]] # a field can only be matched once.
second = [['c', 'd', 'f'], ['a', 'b']] second = [["c", "d", "f"], ["a", "b"]]
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER, ))) eq_(0, compare_fields(first, second, (NO_FIELD_ORDER,)))
first = [['a', 'b'], ['a', 'b', 'c']] first = [["a", "b"], ["a", "b", "c"]]
second = [['c', 'd', 'f'], ['a', 'b']] second = [["c", "d", "f"], ["a", "b"]]
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER, ))) eq_(33, compare_fields(first, second, (NO_FIELD_ORDER,)))
def test_compare_fields_without_order_doesnt_alter_fields(self): def test_compare_fields_without_order_doesnt_alter_fields(self):
#The NO_ORDER comp type altered the fields! # The NO_ORDER comp type altered the fields!
first = [['a', 'b'], ['c', 'd', 'e']] first = [["a", "b"], ["c", "d", "e"]]
second = [['c', 'd', 'f'], ['a', 'b']] second = [["c", "d", "f"], ["a", "b"]]
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, ))) eq_(67, compare_fields(first, second, (NO_FIELD_ORDER,)))
eq_([['a', 'b'], ['c', 'd', 'e']], first) eq_([["a", "b"], ["c", "d", "e"]], first)
eq_([['c', 'd', 'f'], ['a', 'b']], second) eq_([["c", "d", "f"], ["a", "b"]], second)
class TestCasebuild_word_dict: class TestCasebuild_word_dict:
def test_with_standard_words(self): def test_with_standard_words(self):
l = [NamedObject('foo bar', True)] itemList = [NamedObject("foo bar", True)]
l.append(NamedObject('bar baz', True)) itemList.append(NamedObject("bar baz", True))
l.append(NamedObject('baz bleh foo', True)) itemList.append(NamedObject("baz bleh foo", True))
d = build_word_dict(l) d = build_word_dict(itemList)
eq_(4, len(d)) eq_(4, len(d))
eq_(2, len(d['foo'])) eq_(2, len(d["foo"]))
assert l[0] in d['foo'] assert itemList[0] in d["foo"]
assert l[2] in d['foo'] assert itemList[2] in d["foo"]
eq_(2, len(d['bar'])) eq_(2, len(d["bar"]))
assert l[0] in d['bar'] assert itemList[0] in d["bar"]
assert l[1] in d['bar'] assert itemList[1] in d["bar"]
eq_(2, len(d['baz'])) eq_(2, len(d["baz"]))
assert l[1] in d['baz'] assert itemList[1] in d["baz"]
assert l[2] in d['baz'] assert itemList[2] in d["baz"]
eq_(1, len(d['bleh'])) eq_(1, len(d["bleh"]))
assert l[2] in d['bleh'] assert itemList[2] in d["bleh"]
def test_unpack_fields(self): def test_unpack_fields(self):
o = NamedObject('') o = NamedObject("")
o.words = [['foo', 'bar'], ['baz']] o.words = [["foo", "bar"], ["baz"]]
d = build_word_dict([o]) d = build_word_dict([o])
eq_(3, len(d)) eq_(3, len(d))
eq_(1, len(d['foo'])) eq_(1, len(d["foo"]))
def test_words_are_unaltered(self): def test_words_are_unaltered(self):
o = NamedObject('') o = NamedObject("")
o.words = [['foo', 'bar'], ['baz']] o.words = [["foo", "bar"], ["baz"]]
build_word_dict([o]) build_word_dict([o])
eq_([['foo', 'bar'], ['baz']], o.words) eq_([["foo", "bar"], ["baz"]], o.words)
def test_object_instances_can_only_be_once_in_words_object_list(self): def test_object_instances_can_only_be_once_in_words_object_list(self):
o = NamedObject('foo foo', True) o = NamedObject("foo foo", True)
d = build_word_dict([o]) d = build_word_dict([o])
eq_(1, len(d['foo'])) eq_(1, len(d["foo"]))
def test_job(self): def test_job(self):
def do_progress(p, d=''): def do_progress(p, d=""):
self.log.append(p) self.log.append(p)
return True return True
j = job.Job(1, do_progress) j = job.Job(1, do_progress)
self.log = [] self.log = []
s = "foo bar" s = "foo bar"
build_word_dict([NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j) build_word_dict(
[NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j
)
# We don't have intermediate log because iter_with_progress is called with every > 1 # We don't have intermediate log because iter_with_progress is called with every > 1
eq_(0, self.log[0]) eq_(0, self.log[0])
eq_(100, self.log[1]) eq_(100, self.log[1])
@ -237,51 +272,56 @@ class TestCasebuild_word_dict:
class TestCasemerge_similar_words: class TestCasemerge_similar_words:
def test_some_similar_words(self): def test_some_similar_words(self):
d = { d = {
'foobar': set([1]), "foobar": set([1]),
'foobar1': set([2]), "foobar1": set([2]),
'foobar2': set([3]), "foobar2": set([3]),
} }
merge_similar_words(d) merge_similar_words(d)
eq_(1, len(d)) eq_(1, len(d))
eq_(3, len(d['foobar'])) eq_(3, len(d["foobar"]))
class TestCasereduce_common_words: class TestCasereduce_common_words:
def test_typical(self): def test_typical(self):
d = { d = {
'foo': set([NamedObject('foo bar', True) for i in range(50)]), "foo": set([NamedObject("foo bar", True) for i in range(50)]),
'bar': set([NamedObject('foo bar', True) for i in range(49)]) "bar": set([NamedObject("foo bar", True) for i in range(49)]),
} }
reduce_common_words(d, 50) reduce_common_words(d, 50)
assert 'foo' not in d assert "foo" not in d
eq_(49, len(d['bar'])) eq_(49, len(d["bar"]))
def test_dont_remove_objects_with_only_common_words(self): def test_dont_remove_objects_with_only_common_words(self):
d = { d = {
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]), "common": set(
'uncommon': set([NamedObject("common uncommon", True)]) [NamedObject("common uncommon", True) for i in range(50)]
+ [NamedObject("common", True)]
),
"uncommon": set([NamedObject("common uncommon", True)]),
} }
reduce_common_words(d, 50) reduce_common_words(d, 50)
eq_(1, len(d['common'])) eq_(1, len(d["common"]))
eq_(1, len(d['uncommon'])) eq_(1, len(d["uncommon"]))
def test_values_still_are_set_instances(self): def test_values_still_are_set_instances(self):
d = { d = {
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]), "common": set(
'uncommon': set([NamedObject("common uncommon", True)]) [NamedObject("common uncommon", True) for i in range(50)]
+ [NamedObject("common", True)]
),
"uncommon": set([NamedObject("common uncommon", True)]),
} }
reduce_common_words(d, 50) reduce_common_words(d, 50)
assert isinstance(d['common'], set) assert isinstance(d["common"], set)
assert isinstance(d['uncommon'], set) assert isinstance(d["uncommon"], set)
def test_dont_raise_KeyError_when_a_word_has_been_removed(self): def test_dont_raise_KeyError_when_a_word_has_been_removed(self):
#If a word has been removed by the reduce, an object in a subsequent common word that # If a word has been removed by the reduce, an object in a subsequent common word that
#contains the word that has been removed would cause a KeyError. # contains the word that has been removed would cause a KeyError.
d = { d = {
'foo': set([NamedObject('foo bar baz', True) for i in range(50)]), "foo": set([NamedObject("foo bar baz", True) for i in range(50)]),
'bar': set([NamedObject('foo bar baz', True) for i in range(50)]), "bar": set([NamedObject("foo bar baz", True) for i in range(50)]),
'baz': set([NamedObject('foo bar baz', True) for i in range(49)]) "baz": set([NamedObject("foo bar baz", True) for i in range(49)]),
} }
try: try:
reduce_common_words(d, 50) reduce_common_words(d, 50)
@ -289,35 +329,37 @@ class TestCasereduce_common_words:
self.fail() self.fail()
def test_unpack_fields(self): def test_unpack_fields(self):
#object.words may be fields. # object.words may be fields.
def create_it(): def create_it():
o = NamedObject('') o = NamedObject("")
o.words = [['foo', 'bar'], ['baz']] o.words = [["foo", "bar"], ["baz"]]
return o return o
d = { d = {"foo": set([create_it() for i in range(50)])}
'foo': set([create_it() for i in range(50)])
}
try: try:
reduce_common_words(d, 50) reduce_common_words(d, 50)
except TypeError: except TypeError:
self.fail("must support fields.") self.fail("must support fields.")
def test_consider_a_reduced_common_word_common_even_after_reduction(self): def test_consider_a_reduced_common_word_common_even_after_reduction(self):
#There was a bug in the code that causeda word that has already been reduced not to # There was a bug in the code that causeda word that has already been reduced not to
#be counted as a common word for subsequent words. For example, if 'foo' is processed # be counted as a common word for subsequent words. For example, if 'foo' is processed
#as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar" # as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
#would not stay in 'bar' because 'foo' is not a common word anymore. # would not stay in 'bar' because 'foo' is not a common word anymore.
only_common = NamedObject('foo bar', True) only_common = NamedObject("foo bar", True)
d = { d = {
'foo': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]), "foo": set(
'bar': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]), [NamedObject("foo bar baz", True) for i in range(49)] + [only_common]
'baz': set([NamedObject('foo bar baz', True) for i in range(49)]) ),
"bar": set(
[NamedObject("foo bar baz", True) for i in range(49)] + [only_common]
),
"baz": set([NamedObject("foo bar baz", True) for i in range(49)]),
} }
reduce_common_words(d, 50) reduce_common_words(d, 50)
eq_(1, len(d['foo'])) eq_(1, len(d["foo"]))
eq_(1, len(d['bar'])) eq_(1, len(d["bar"]))
eq_(49, len(d['baz'])) eq_(49, len(d["baz"]))
class TestCaseget_match: class TestCaseget_match:
@ -326,8 +368,8 @@ class TestCaseget_match:
o2 = NamedObject("bar bleh", True) o2 = NamedObject("bar bleh", True)
m = get_match(o1, o2) m = get_match(o1, o2)
eq_(50, m.percentage) eq_(50, m.percentage)
eq_(['foo', 'bar'], m.first.words) eq_(["foo", "bar"], m.first.words)
eq_(['bar', 'bleh'], m.second.words) eq_(["bar", "bleh"], m.second.words)
assert m.first is o1 assert m.first is o1
assert m.second is o2 assert m.second is o2
@ -340,7 +382,9 @@ class TestCaseget_match:
assert object() not in m assert object() not in m
def test_word_weight(self): def test_word_weight(self):
m = get_match(NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS, )) m = get_match(
NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS,)
)
eq_(m.percentage, int((6.0 / 13.0) * 100)) eq_(m.percentage, int((6.0 / 13.0) * 100))
@ -349,54 +393,59 @@ class TestCaseGetMatches:
eq_(getmatches([]), []) eq_(getmatches([]), [])
def test_simple(self): def test_simple(self):
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")] itemList = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
r = getmatches(l) r = getmatches(itemList)
eq_(2, len(r)) eq_(2, len(r))
m = first(m for m in r if m.percentage == 50) #"foo bar" and "bar bleh" m = first(m for m in r if m.percentage == 50) # "foo bar" and "bar bleh"
assert_match(m, 'foo bar', 'bar bleh') assert_match(m, "foo bar", "bar bleh")
m = first(m for m in r if m.percentage == 33) #"foo bar" and "a b c foo" m = first(m for m in r if m.percentage == 33) # "foo bar" and "a b c foo"
assert_match(m, 'foo bar', 'a b c foo') assert_match(m, "foo bar", "a b c foo")
def test_null_and_unrelated_objects(self): def test_null_and_unrelated_objects(self):
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject(""), NamedObject("unrelated object")] itemList = [
r = getmatches(l) NamedObject("foo bar"),
NamedObject("bar bleh"),
NamedObject(""),
NamedObject("unrelated object"),
]
r = getmatches(itemList)
eq_(len(r), 1) eq_(len(r), 1)
m = r[0] m = r[0]
eq_(m.percentage, 50) eq_(m.percentage, 50)
assert_match(m, 'foo bar', 'bar bleh') assert_match(m, "foo bar", "bar bleh")
def test_twice_the_same_word(self): def test_twice_the_same_word(self):
l = [NamedObject("foo foo bar"), NamedObject("bar bleh")] itemList = [NamedObject("foo foo bar"), NamedObject("bar bleh")]
r = getmatches(l) r = getmatches(itemList)
eq_(1, len(r)) eq_(1, len(r))
def test_twice_the_same_word_when_preworded(self): def test_twice_the_same_word_when_preworded(self):
l = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)] itemList = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)]
r = getmatches(l) r = getmatches(itemList)
eq_(1, len(r)) eq_(1, len(r))
def test_two_words_match(self): def test_two_words_match(self):
l = [NamedObject("foo bar"), NamedObject("foo bar bleh")] itemList = [NamedObject("foo bar"), NamedObject("foo bar bleh")]
r = getmatches(l) r = getmatches(itemList)
eq_(1, len(r)) eq_(1, len(r))
def test_match_files_with_only_common_words(self): def test_match_files_with_only_common_words(self):
#If a word occurs more than 50 times, it is excluded from the matching process # If a word occurs more than 50 times, it is excluded from the matching process
#The problem with the common_word_threshold is that the files containing only common # The problem with the common_word_threshold is that the files containing only common
#words will never be matched together. We *should* match them. # words will never be matched together. We *should* match them.
# This test assumes that the common word threashold const is 50 # This test assumes that the common word threashold const is 50
l = [NamedObject("foo") for i in range(50)] itemList = [NamedObject("foo") for i in range(50)]
r = getmatches(l) r = getmatches(itemList)
eq_(1225, len(r)) eq_(1225, len(r))
def test_use_words_already_there_if_there(self): def test_use_words_already_there_if_there(self):
o1 = NamedObject('foo') o1 = NamedObject("foo")
o2 = NamedObject('bar') o2 = NamedObject("bar")
o2.words = ['foo'] o2.words = ["foo"]
eq_(1, len(getmatches([o1, o2]))) eq_(1, len(getmatches([o1, o2])))
def test_job(self): def test_job(self):
def do_progress(p, d=''): def do_progress(p, d=""):
self.log.append(p) self.log.append(p)
return True return True
@ -409,28 +458,28 @@ class TestCaseGetMatches:
eq_(100, self.log[-1]) eq_(100, self.log[-1])
def test_weight_words(self): def test_weight_words(self):
l = [NamedObject("foo bar"), NamedObject("bar bleh")] itemList = [NamedObject("foo bar"), NamedObject("bar bleh")]
m = getmatches(l, weight_words=True)[0] m = getmatches(itemList, weight_words=True)[0]
eq_(int((6.0 / 13.0) * 100), m.percentage) eq_(int((6.0 / 13.0) * 100), m.percentage)
def test_similar_word(self): def test_similar_word(self):
l = [NamedObject("foobar"), NamedObject("foobars")] itemList = [NamedObject("foobar"), NamedObject("foobars")]
eq_(len(getmatches(l, match_similar_words=True)), 1) eq_(len(getmatches(itemList, match_similar_words=True)), 1)
eq_(getmatches(l, match_similar_words=True)[0].percentage, 100) eq_(getmatches(itemList, match_similar_words=True)[0].percentage, 100)
l = [NamedObject("foobar"), NamedObject("foo")] itemList = [NamedObject("foobar"), NamedObject("foo")]
eq_(len(getmatches(l, match_similar_words=True)), 0) #too far eq_(len(getmatches(itemList, match_similar_words=True)), 0) # too far
l = [NamedObject("bizkit"), NamedObject("bizket")] itemList = [NamedObject("bizkit"), NamedObject("bizket")]
eq_(len(getmatches(l, match_similar_words=True)), 1) eq_(len(getmatches(itemList, match_similar_words=True)), 1)
l = [NamedObject("foobar"), NamedObject("foosbar")] itemList = [NamedObject("foobar"), NamedObject("foosbar")]
eq_(len(getmatches(l, match_similar_words=True)), 1) eq_(len(getmatches(itemList, match_similar_words=True)), 1)
def test_single_object_with_similar_words(self): def test_single_object_with_similar_words(self):
l = [NamedObject("foo foos")] itemList = [NamedObject("foo foos")]
eq_(len(getmatches(l, match_similar_words=True)), 0) eq_(len(getmatches(itemList, match_similar_words=True)), 0)
def test_double_words_get_counted_only_once(self): def test_double_words_get_counted_only_once(self):
l = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")] itemList = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")]
m = getmatches(l)[0] m = getmatches(itemList)[0]
eq_(75, m.percentage) eq_(75, m.percentage)
def test_with_fields(self): def test_with_fields(self):
@ -450,13 +499,13 @@ class TestCaseGetMatches:
eq_(m.percentage, 50) eq_(m.percentage, 50)
def test_only_match_similar_when_the_option_is_set(self): def test_only_match_similar_when_the_option_is_set(self):
l = [NamedObject("foobar"), NamedObject("foobars")] itemList = [NamedObject("foobar"), NamedObject("foobars")]
eq_(len(getmatches(l, match_similar_words=False)), 0) eq_(len(getmatches(itemList, match_similar_words=False)), 0)
def test_dont_recurse_do_match(self): def test_dont_recurse_do_match(self):
# with nosetests, the stack is increased. The number has to be high enough not to be failing falsely # with nosetests, the stack is increased. The number has to be high enough not to be failing falsely
sys.setrecursionlimit(200) sys.setrecursionlimit(200)
files = [NamedObject('foo bar') for i in range(201)] files = [NamedObject("foo bar") for i in range(201)]
try: try:
getmatches(files) getmatches(files)
except RuntimeError: except RuntimeError:
@ -465,9 +514,9 @@ class TestCaseGetMatches:
sys.setrecursionlimit(1000) sys.setrecursionlimit(1000)
def test_min_match_percentage(self): def test_min_match_percentage(self):
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")] itemList = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
r = getmatches(l, min_match_percentage=50) r = getmatches(itemList, min_match_percentage=50)
eq_(1, len(r)) #Only "foo bar" / "bar bleh" should match eq_(1, len(r)) # Only "foo bar" / "bar bleh" should match
def test_MemoryError(self, monkeypatch): def test_MemoryError(self, monkeypatch):
@log_calls @log_calls
@ -476,12 +525,12 @@ class TestCaseGetMatches:
raise MemoryError() raise MemoryError()
return Match(first, second, 0) return Match(first, second, 0)
objects = [NamedObject() for i in range(10)] # results in 45 matches objects = [NamedObject() for i in range(10)] # results in 45 matches
monkeypatch.setattr(engine, 'get_match', mocked_match) monkeypatch.setattr(engine, "get_match", mocked_match)
try: try:
r = getmatches(objects) r = getmatches(objects)
except MemoryError: except MemoryError:
self.fail('MemorryError must be handled') self.fail("MemorryError must be handled")
eq_(42, len(r)) eq_(42, len(r))
@ -599,7 +648,7 @@ class TestCaseGroup:
eq_([o1], g.dupes) eq_([o1], g.dupes)
g.switch_ref(o2) g.switch_ref(o2)
assert o2 is g.ref assert o2 is g.ref
g.switch_ref(NamedObject('', True)) g.switch_ref(NamedObject("", True))
assert o2 is g.ref assert o2 is g.ref
def test_switch_ref_from_ref_dir(self): def test_switch_ref_from_ref_dir(self):
@ -620,11 +669,11 @@ class TestCaseGroup:
m = g.get_match_of(o) m = g.get_match_of(o)
assert g.ref in m assert g.ref in m
assert o in m assert o in m
assert g.get_match_of(NamedObject('', True)) is None assert g.get_match_of(NamedObject("", True)) is None
assert g.get_match_of(g.ref) is None assert g.get_match_of(g.ref) is None
def test_percentage(self): def test_percentage(self):
#percentage should return the avg percentage in relation to the ref # percentage should return the avg percentage in relation to the ref
m1, m2, m3 = get_match_triangle() m1, m2, m3 = get_match_triangle()
m1 = Match(m1[0], m1[1], 100) m1 = Match(m1[0], m1[1], 100)
m2 = Match(m2[0], m2[1], 50) m2 = Match(m2[0], m2[1], 50)
@ -651,9 +700,9 @@ class TestCaseGroup:
o1 = m1.first o1 = m1.first
o2 = m1.second o2 = m1.second
o3 = m2.second o3 = m2.second
o1.name = 'c' o1.name = "c"
o2.name = 'b' o2.name = "b"
o3.name = 'a' o3.name = "a"
g = Group() g = Group()
g.add_match(m1) g.add_match(m1)
g.add_match(m2) g.add_match(m2)
@ -709,9 +758,9 @@ class TestCaseGroup:
def test_prioritize_nothing_changes(self): def test_prioritize_nothing_changes(self):
# prioritize() returns False when nothing changes in the group. # prioritize() returns False when nothing changes in the group.
g = get_test_group() g = get_test_group()
g[0].name = 'a' g[0].name = "a"
g[1].name = 'b' g[1].name = "b"
g[2].name = 'c' g[2].name = "c"
assert not g.prioritize(lambda x: x.name) assert not g.prioritize(lambda x: x.name)
def test_list_like(self): def test_list_like(self):
@ -723,7 +772,11 @@ class TestCaseGroup:
def test_discard_matches(self): def test_discard_matches(self):
g = Group() g = Group()
o1, o2, o3 = (NamedObject("foo", True), NamedObject("bar", True), NamedObject("baz", True)) o1, o2, o3 = (
NamedObject("foo", True),
NamedObject("bar", True),
NamedObject("baz", True),
)
g.add_match(get_match(o1, o2)) g.add_match(get_match(o1, o2))
g.add_match(get_match(o1, o3)) g.add_match(get_match(o1, o3))
g.discard_matches() g.discard_matches()
@ -737,8 +790,8 @@ class TestCaseget_groups:
eq_([], r) eq_([], r)
def test_simple(self): def test_simple(self):
l = [NamedObject("foo bar"), NamedObject("bar bleh")] itemList = [NamedObject("foo bar"), NamedObject("bar bleh")]
matches = getmatches(l) matches = getmatches(itemList)
m = matches[0] m = matches[0]
r = get_groups(matches) r = get_groups(matches)
eq_(1, len(r)) eq_(1, len(r))
@ -747,28 +800,39 @@ class TestCaseget_groups:
eq_([m.second], g.dupes) eq_([m.second], g.dupes)
def test_group_with_multiple_matches(self): def test_group_with_multiple_matches(self):
#This results in 3 matches # This results in 3 matches
l = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")] itemList = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")]
matches = getmatches(l) matches = getmatches(itemList)
r = get_groups(matches) r = get_groups(matches)
eq_(1, len(r)) eq_(1, len(r))
g = r[0] g = r[0]
eq_(3, len(g)) eq_(3, len(g))
def test_must_choose_a_group(self): def test_must_choose_a_group(self):
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("b c"), NamedObject("c d"), NamedObject("c d")] itemList = [
#There will be 2 groups here: group "a b" and group "c d" NamedObject("a b"),
#"b c" can go either of them, but not both. NamedObject("a b"),
matches = getmatches(l) NamedObject("b c"),
NamedObject("c d"),
NamedObject("c d"),
]
# There will be 2 groups here: group "a b" and group "c d"
# "b c" can go either of them, but not both.
matches = getmatches(itemList)
r = get_groups(matches) r = get_groups(matches)
eq_(2, len(r)) eq_(2, len(r))
eq_(5, len(r[0])+len(r[1])) eq_(5, len(r[0]) + len(r[1]))
def test_should_all_go_in_the_same_group(self): def test_should_all_go_in_the_same_group(self):
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("a b"), NamedObject("a b")] itemList = [
#There will be 2 groups here: group "a b" and group "c d" NamedObject("a b"),
#"b c" can fit in both, but it must be in only one of them NamedObject("a b"),
matches = getmatches(l) NamedObject("a b"),
NamedObject("a b"),
]
# There will be 2 groups here: group "a b" and group "c d"
# "b c" can fit in both, but it must be in only one of them
matches = getmatches(itemList)
r = get_groups(matches) r = get_groups(matches)
eq_(1, len(r)) eq_(1, len(r))
@ -787,8 +851,8 @@ class TestCaseget_groups:
assert o3 in g assert o3 in g
def test_four_sized_group(self): def test_four_sized_group(self):
l = [NamedObject("foobar") for i in range(4)] itemList = [NamedObject("foobar") for i in range(4)]
m = getmatches(l) m = getmatches(itemList)
r = get_groups(m) r = get_groups(m)
eq_(1, len(r)) eq_(1, len(r))
eq_(4, len(r[0])) eq_(4, len(r[0]))
@ -808,10 +872,12 @@ class TestCaseget_groups:
# (A, B) match is the highest (thus resulting in an (A, B) group), still match C and D # (A, B) match is the highest (thus resulting in an (A, B) group), still match C and D
# in a separate group instead of discarding them. # in a separate group instead of discarding them.
A, B, C, D = [NamedObject() for _ in range(4)] A, B, C, D = [NamedObject() for _ in range(4)]
m1 = Match(A, B, 90) # This is the strongest "A" match m1 = Match(A, B, 90) # This is the strongest "A" match
m2 = Match(A, C, 80) # Because C doesn't match with B, it won't be in the group m2 = Match(A, C, 80) # Because C doesn't match with B, it won't be in the group
m3 = Match(A, D, 80) # Same thing for D m3 = Match(A, D, 80) # Same thing for D
m4 = Match(C, D, 70) # However, because C and D match, they should have their own group. m4 = Match(
C, D, 70
) # However, because C and D match, they should have their own group.
groups = get_groups([m1, m2, m3, m4]) groups = get_groups([m1, m2, m3, m4])
eq_(len(groups), 2) eq_(len(groups), 2)
g1, g2 = groups g1, g2 = groups
@ -819,4 +885,3 @@ class TestCaseget_groups:
assert B in g1 assert B in g1
assert C in g2 assert C in g2
assert D in g2 assert D in g2

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2009-10-23 # Created On: 2009-10-23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import hashlib import hashlib
@ -14,32 +14,35 @@ from core.tests.directories_test import create_fake_fs
from .. import fs from .. import fs
def test_size_aggregates_subfiles(tmpdir): def test_size_aggregates_subfiles(tmpdir):
p = create_fake_fs(Path(str(tmpdir))) p = create_fake_fs(Path(str(tmpdir)))
b = fs.Folder(p) b = fs.Folder(p)
eq_(b.size, 12) eq_(b.size, 12)
def test_md5_aggregate_subfiles_sorted(tmpdir): def test_md5_aggregate_subfiles_sorted(tmpdir):
#dir.allfiles can return child in any order. Thus, bundle.md5 must aggregate # dir.allfiles can return child in any order. Thus, bundle.md5 must aggregate
#all files' md5 it contains, but it must make sure that it does so in the # all files' md5 it contains, but it must make sure that it does so in the
#same order everytime. # same order everytime.
p = create_fake_fs(Path(str(tmpdir))) p = create_fake_fs(Path(str(tmpdir)))
b = fs.Folder(p) b = fs.Folder(p)
md51 = fs.File(p['dir1']['file1.test']).md5 md51 = fs.File(p["dir1"]["file1.test"]).md5
md52 = fs.File(p['dir2']['file2.test']).md5 md52 = fs.File(p["dir2"]["file2.test"]).md5
md53 = fs.File(p['dir3']['file3.test']).md5 md53 = fs.File(p["dir3"]["file3.test"]).md5
md54 = fs.File(p['file1.test']).md5 md54 = fs.File(p["file1.test"]).md5
md55 = fs.File(p['file2.test']).md5 md55 = fs.File(p["file2.test"]).md5
md56 = fs.File(p['file3.test']).md5 md56 = fs.File(p["file3.test"]).md5
# The expected md5 is the md5 of md5s for folders and the direct md5 for files # The expected md5 is the md5 of md5s for folders and the direct md5 for files
folder_md51 = hashlib.md5(md51).digest() folder_md51 = hashlib.md5(md51).digest()
folder_md52 = hashlib.md5(md52).digest() folder_md52 = hashlib.md5(md52).digest()
folder_md53 = hashlib.md5(md53).digest() folder_md53 = hashlib.md5(md53).digest()
md5 = hashlib.md5(folder_md51+folder_md52+folder_md53+md54+md55+md56) md5 = hashlib.md5(folder_md51 + folder_md52 + folder_md53 + md54 + md55 + md56)
eq_(b.md5, md5.digest()) eq_(b.md5, md5.digest())
def test_has_file_attrs(tmpdir): def test_has_file_attrs(tmpdir):
#a Folder must behave like a file, so it must have mtime attributes # a Folder must behave like a file, so it must have mtime attributes
b = fs.Folder(Path(str(tmpdir))) b = fs.Folder(Path(str(tmpdir)))
assert b.mtime > 0 assert b.mtime > 0
eq_(b.extension, '') eq_(b.extension, "")

View File

@ -12,152 +12,172 @@ from hscommon.testutil import eq_
from ..ignore import IgnoreList from ..ignore import IgnoreList
def test_empty(): def test_empty():
il = IgnoreList() il = IgnoreList()
eq_(0, len(il)) eq_(0, len(il))
assert not il.AreIgnored('foo', 'bar') assert not il.AreIgnored("foo", "bar")
def test_simple(): def test_simple():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
assert il.AreIgnored('foo', 'bar') assert il.AreIgnored("foo", "bar")
assert il.AreIgnored('bar', 'foo') assert il.AreIgnored("bar", "foo")
assert not il.AreIgnored('foo', 'bleh') assert not il.AreIgnored("foo", "bleh")
assert not il.AreIgnored('bleh', 'bar') assert not il.AreIgnored("bleh", "bar")
eq_(1, len(il)) eq_(1, len(il))
def test_multiple(): def test_multiple():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('foo', 'bleh') il.Ignore("foo", "bleh")
il.Ignore('bleh', 'bar') il.Ignore("bleh", "bar")
il.Ignore('aybabtu', 'bleh') il.Ignore("aybabtu", "bleh")
assert il.AreIgnored('foo', 'bar') assert il.AreIgnored("foo", "bar")
assert il.AreIgnored('bar', 'foo') assert il.AreIgnored("bar", "foo")
assert il.AreIgnored('foo', 'bleh') assert il.AreIgnored("foo", "bleh")
assert il.AreIgnored('bleh', 'bar') assert il.AreIgnored("bleh", "bar")
assert not il.AreIgnored('aybabtu', 'bar') assert not il.AreIgnored("aybabtu", "bar")
eq_(4, len(il)) eq_(4, len(il))
def test_clear(): def test_clear():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Clear() il.Clear()
assert not il.AreIgnored('foo', 'bar') assert not il.AreIgnored("foo", "bar")
assert not il.AreIgnored('bar', 'foo') assert not il.AreIgnored("bar", "foo")
eq_(0, len(il)) eq_(0, len(il))
def test_add_same_twice(): def test_add_same_twice():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('bar', 'foo') il.Ignore("bar", "foo")
eq_(1, len(il)) eq_(1, len(il))
def test_save_to_xml(): def test_save_to_xml():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('foo', 'bleh') il.Ignore("foo", "bleh")
il.Ignore('bleh', 'bar') il.Ignore("bleh", "bar")
f = io.BytesIO() f = io.BytesIO()
il.save_to_xml(f) il.save_to_xml(f)
f.seek(0) f.seek(0)
doc = ET.parse(f) doc = ET.parse(f)
root = doc.getroot() root = doc.getroot()
eq_(root.tag, 'ignore_list') eq_(root.tag, "ignore_list")
eq_(len(root), 2) eq_(len(root), 2)
eq_(len([c for c in root if c.tag == 'file']), 2) eq_(len([c for c in root if c.tag == "file"]), 2)
f1, f2 = root[:] f1, f2 = root[:]
subchildren = [c for c in f1 if c.tag == 'file'] + [c for c in f2 if c.tag == 'file'] subchildren = [c for c in f1 if c.tag == "file"] + [
c for c in f2 if c.tag == "file"
]
eq_(len(subchildren), 3) eq_(len(subchildren), 3)
def test_SaveThenLoad(): def test_SaveThenLoad():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('foo', 'bleh') il.Ignore("foo", "bleh")
il.Ignore('bleh', 'bar') il.Ignore("bleh", "bar")
il.Ignore('\u00e9', 'bar') il.Ignore("\u00e9", "bar")
f = io.BytesIO() f = io.BytesIO()
il.save_to_xml(f) il.save_to_xml(f)
f.seek(0) f.seek(0)
il = IgnoreList() il = IgnoreList()
il.load_from_xml(f) il.load_from_xml(f)
eq_(4, len(il)) eq_(4, len(il))
assert il.AreIgnored('\u00e9', 'bar') assert il.AreIgnored("\u00e9", "bar")
def test_LoadXML_with_empty_file_tags(): def test_LoadXML_with_empty_file_tags():
f = io.BytesIO() f = io.BytesIO()
f.write(b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>') f.write(
b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>'
)
f.seek(0) f.seek(0)
il = IgnoreList() il = IgnoreList()
il.load_from_xml(f) il.load_from_xml(f)
eq_(0, len(il)) eq_(0, len(il))
def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else(): def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('bar', 'baz') il.Ignore("bar", "baz")
assert il.AreIgnored('bar', 'foo') assert il.AreIgnored("bar", "foo")
def test_no_dupes_when_a_child_is_a_key_somewhere_else(): def test_no_dupes_when_a_child_is_a_key_somewhere_else():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('bar', 'baz') il.Ignore("bar", "baz")
il.Ignore('bar', 'foo') il.Ignore("bar", "foo")
eq_(2, len(il)) eq_(2, len(il))
def test_iterate(): def test_iterate():
#It must be possible to iterate through ignore list # It must be possible to iterate through ignore list
il = IgnoreList() il = IgnoreList()
expected = [('foo', 'bar'), ('bar', 'baz'), ('foo', 'baz')] expected = [("foo", "bar"), ("bar", "baz"), ("foo", "baz")]
for i in expected: for i in expected:
il.Ignore(i[0], i[1]) il.Ignore(i[0], i[1])
for i in il: for i in il:
expected.remove(i) #No exception should be raised expected.remove(i) # No exception should be raised
assert not expected #expected should be empty assert not expected # expected should be empty
def test_filter(): def test_filter():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('bar', 'baz') il.Ignore("bar", "baz")
il.Ignore('foo', 'baz') il.Ignore("foo", "baz")
il.Filter(lambda f, s: f == 'bar') il.Filter(lambda f, s: f == "bar")
eq_(1, len(il)) eq_(1, len(il))
assert not il.AreIgnored('foo', 'bar') assert not il.AreIgnored("foo", "bar")
assert il.AreIgnored('bar', 'baz') assert il.AreIgnored("bar", "baz")
def test_save_with_non_ascii_items(): def test_save_with_non_ascii_items():
il = IgnoreList() il = IgnoreList()
il.Ignore('\xac', '\xbf') il.Ignore("\xac", "\xbf")
f = io.BytesIO() f = io.BytesIO()
try: try:
il.save_to_xml(f) il.save_to_xml(f)
except Exception as e: except Exception as e:
raise AssertionError(str(e)) raise AssertionError(str(e))
def test_len(): def test_len():
il = IgnoreList() il = IgnoreList()
eq_(0, len(il)) eq_(0, len(il))
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
eq_(1, len(il)) eq_(1, len(il))
def test_nonzero(): def test_nonzero():
il = IgnoreList() il = IgnoreList()
assert not il assert not il
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
assert il assert il
def test_remove(): def test_remove():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('foo', 'baz') il.Ignore("foo", "baz")
il.remove('bar', 'foo') il.remove("bar", "foo")
eq_(len(il), 1) eq_(len(il), 1)
assert not il.AreIgnored('foo', 'bar') assert not il.AreIgnored("foo", "bar")
def test_remove_non_existant(): def test_remove_non_existant():
il = IgnoreList() il = IgnoreList()
il.Ignore('foo', 'bar') il.Ignore("foo", "bar")
il.Ignore('foo', 'baz') il.Ignore("foo", "baz")
with raises(ValueError): with raises(ValueError):
il.remove('foo', 'bleh') il.remove("foo", "bleh")

View File

@ -8,33 +8,39 @@ from hscommon.testutil import eq_
from ..markable import MarkableList, Markable from ..markable import MarkableList, Markable
def gen(): def gen():
ml = MarkableList() ml = MarkableList()
ml.extend(list(range(10))) ml.extend(list(range(10)))
return ml return ml
def test_unmarked(): def test_unmarked():
ml = gen() ml = gen()
for i in ml: for i in ml:
assert not ml.is_marked(i) assert not ml.is_marked(i)
def test_mark(): def test_mark():
ml = gen() ml = gen()
assert ml.mark(3) assert ml.mark(3)
assert ml.is_marked(3) assert ml.is_marked(3)
assert not ml.is_marked(2) assert not ml.is_marked(2)
def test_unmark(): def test_unmark():
ml = gen() ml = gen()
ml.mark(4) ml.mark(4)
assert ml.unmark(4) assert ml.unmark(4)
assert not ml.is_marked(4) assert not ml.is_marked(4)
def test_unmark_unmarked(): def test_unmark_unmarked():
ml = gen() ml = gen()
assert not ml.unmark(4) assert not ml.unmark(4)
assert not ml.is_marked(4) assert not ml.is_marked(4)
def test_mark_twice_and_unmark(): def test_mark_twice_and_unmark():
ml = gen() ml = gen()
assert ml.mark(5) assert ml.mark(5)
@ -42,6 +48,7 @@ def test_mark_twice_and_unmark():
ml.unmark(5) ml.unmark(5)
assert not ml.is_marked(5) assert not ml.is_marked(5)
def test_mark_toggle(): def test_mark_toggle():
ml = gen() ml = gen()
ml.mark_toggle(6) ml.mark_toggle(6)
@ -51,22 +58,25 @@ def test_mark_toggle():
ml.mark_toggle(6) ml.mark_toggle(6)
assert ml.is_marked(6) assert ml.is_marked(6)
def test_is_markable(): def test_is_markable():
class Foobar(Markable): class Foobar(Markable):
def _is_markable(self, o): def _is_markable(self, o):
return o == 'foobar' return o == "foobar"
f = Foobar() f = Foobar()
assert not f.is_marked('foobar') assert not f.is_marked("foobar")
assert not f.mark('foo') assert not f.mark("foo")
assert not f.is_marked('foo') assert not f.is_marked("foo")
f.mark_toggle('foo') f.mark_toggle("foo")
assert not f.is_marked('foo') assert not f.is_marked("foo")
f.mark('foobar') f.mark("foobar")
assert f.is_marked('foobar') assert f.is_marked("foobar")
ml = gen() ml = gen()
ml.mark(11) ml.mark(11)
assert not ml.is_marked(11) assert not ml.is_marked(11)
def test_change_notifications(): def test_change_notifications():
class Foobar(Markable): class Foobar(Markable):
def _did_mark(self, o): def _did_mark(self, o):
@ -77,13 +87,14 @@ def test_change_notifications():
f = Foobar() f = Foobar()
f.log = [] f.log = []
f.mark('foo') f.mark("foo")
f.mark('foo') f.mark("foo")
f.mark_toggle('bar') f.mark_toggle("bar")
f.unmark('foo') f.unmark("foo")
f.unmark('foo') f.unmark("foo")
f.mark_toggle('bar') f.mark_toggle("bar")
eq_([(True, 'foo'), (True, 'bar'), (False, 'foo'), (False, 'bar')], f.log) eq_([(True, "foo"), (True, "bar"), (False, "foo"), (False, "bar")], f.log)
def test_mark_count(): def test_mark_count():
ml = gen() ml = gen()
@ -93,6 +104,7 @@ def test_mark_count():
ml.mark(11) ml.mark(11)
eq_(1, ml.mark_count) eq_(1, ml.mark_count)
def test_mark_none(): def test_mark_none():
log = [] log = []
ml = gen() ml = gen()
@ -104,6 +116,7 @@ def test_mark_none():
eq_(0, ml.mark_count) eq_(0, ml.mark_count)
eq_([1, 2], log) eq_([1, 2], log)
def test_mark_all(): def test_mark_all():
ml = gen() ml = gen()
eq_(0, ml.mark_count) eq_(0, ml.mark_count)
@ -111,6 +124,7 @@ def test_mark_all():
eq_(10, ml.mark_count) eq_(10, ml.mark_count)
assert ml.is_marked(1) assert ml.is_marked(1)
def test_mark_invert(): def test_mark_invert():
ml = gen() ml = gen()
ml.mark(1) ml.mark(1)
@ -118,6 +132,7 @@ def test_mark_invert():
assert not ml.is_marked(1) assert not ml.is_marked(1)
assert ml.is_marked(2) assert ml.is_marked(2)
def test_mark_while_inverted(): def test_mark_while_inverted():
log = [] log = []
ml = gen() ml = gen()
@ -134,6 +149,7 @@ def test_mark_while_inverted():
eq_(7, ml.mark_count) eq_(7, ml.mark_count)
eq_([(True, 1), (False, 1), (True, 2), (True, 1), (True, 3)], log) eq_([(True, 1), (False, 1), (True, 2), (True, 1), (True, 3)], log)
def test_remove_mark_flag(): def test_remove_mark_flag():
ml = gen() ml = gen()
ml.mark(1) ml.mark(1)
@ -145,10 +161,12 @@ def test_remove_mark_flag():
ml._remove_mark_flag(1) ml._remove_mark_flag(1)
assert ml.is_marked(1) assert ml.is_marked(1)
def test_is_marked_returns_false_if_object_not_markable(): def test_is_marked_returns_false_if_object_not_markable():
class MyMarkableList(MarkableList): class MyMarkableList(MarkableList):
def _is_markable(self, o): def _is_markable(self, o):
return o != 4 return o != 4
ml = MyMarkableList() ml = MyMarkableList()
ml.extend(list(range(10))) ml.extend(list(range(10)))
ml.mark_invert() ml.mark_invert()

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2011/09/07 # Created On: 2011/09/07
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import os.path as op import os.path as op
@ -14,6 +14,7 @@ from ..engine import Group, Match
no = NamedObject no = NamedObject
def app_with_dupes(dupes): def app_with_dupes(dupes):
# Creates an app with specified dupes. dupes is a list of lists, each list in the list being # Creates an app with specified dupes. dupes is a list of lists, each list in the list being
# a dupe group. We cheat a little bit by creating dupe groups manually instead of running a # a dupe group. We cheat a little bit by creating dupe groups manually instead of running a
@ -29,57 +30,63 @@ def app_with_dupes(dupes):
app.app._results_changed() app.app._results_changed()
return app return app
#---
# ---
def app_normal_results(): def app_normal_results():
# Just some results, with different extensions and size, for good measure. # Just some results, with different extensions and size, for good measure.
dupes = [ dupes = [
[ [
no('foo1.ext1', size=1, folder='folder1'), no("foo1.ext1", size=1, folder="folder1"),
no('foo2.ext2', size=2, folder='folder2') no("foo2.ext2", size=2, folder="folder2"),
], ],
] ]
return app_with_dupes(dupes) return app_with_dupes(dupes)
@with_app(app_normal_results) @with_app(app_normal_results)
def test_kind_subcrit(app): def test_kind_subcrit(app):
# The subcriteria of the "Kind" criteria is a list of extensions contained in the dupes. # The subcriteria of the "Kind" criteria is a list of extensions contained in the dupes.
app.select_pri_criterion("Kind") app.select_pri_criterion("Kind")
eq_(app.pdialog.criteria_list[:], ['ext1', 'ext2']) eq_(app.pdialog.criteria_list[:], ["ext1", "ext2"])
@with_app(app_normal_results) @with_app(app_normal_results)
def test_kind_reprioritization(app): def test_kind_reprioritization(app):
# Just a simple test of the system as a whole. # Just a simple test of the system as a whole.
# select a criterion, and perform re-prioritization and see if it worked. # select a criterion, and perform re-prioritization and see if it worked.
app.select_pri_criterion("Kind") app.select_pri_criterion("Kind")
app.pdialog.criteria_list.select([1]) # ext2 app.pdialog.criteria_list.select([1]) # ext2
app.pdialog.add_selected() app.pdialog.add_selected()
app.pdialog.perform_reprioritization() app.pdialog.perform_reprioritization()
eq_(app.rtable[0].data['name'], 'foo2.ext2') eq_(app.rtable[0].data["name"], "foo2.ext2")
@with_app(app_normal_results) @with_app(app_normal_results)
def test_folder_subcrit(app): def test_folder_subcrit(app):
app.select_pri_criterion("Folder") app.select_pri_criterion("Folder")
eq_(app.pdialog.criteria_list[:], ['folder1', 'folder2']) eq_(app.pdialog.criteria_list[:], ["folder1", "folder2"])
@with_app(app_normal_results) @with_app(app_normal_results)
def test_folder_reprioritization(app): def test_folder_reprioritization(app):
app.select_pri_criterion("Folder") app.select_pri_criterion("Folder")
app.pdialog.criteria_list.select([1]) # folder2 app.pdialog.criteria_list.select([1]) # folder2
app.pdialog.add_selected() app.pdialog.add_selected()
app.pdialog.perform_reprioritization() app.pdialog.perform_reprioritization()
eq_(app.rtable[0].data['name'], 'foo2.ext2') eq_(app.rtable[0].data["name"], "foo2.ext2")
@with_app(app_normal_results) @with_app(app_normal_results)
def test_prilist_display(app): def test_prilist_display(app):
# The prioritization list displays selected criteria correctly. # The prioritization list displays selected criteria correctly.
app.select_pri_criterion("Kind") app.select_pri_criterion("Kind")
app.pdialog.criteria_list.select([1]) # ext2 app.pdialog.criteria_list.select([1]) # ext2
app.pdialog.add_selected() app.pdialog.add_selected()
app.select_pri_criterion("Folder") app.select_pri_criterion("Folder")
app.pdialog.criteria_list.select([1]) # folder2 app.pdialog.criteria_list.select([1]) # folder2
app.pdialog.add_selected() app.pdialog.add_selected()
app.select_pri_criterion("Size") app.select_pri_criterion("Size")
app.pdialog.criteria_list.select([1]) # Lowest app.pdialog.criteria_list.select([1]) # Lowest
app.pdialog.add_selected() app.pdialog.add_selected()
expected = [ expected = [
"Kind (ext2)", "Kind (ext2)",
@ -88,23 +95,26 @@ def test_prilist_display(app):
] ]
eq_(app.pdialog.prioritization_list[:], expected) eq_(app.pdialog.prioritization_list[:], expected)
@with_app(app_normal_results) @with_app(app_normal_results)
def test_size_subcrit(app): def test_size_subcrit(app):
app.select_pri_criterion("Size") app.select_pri_criterion("Size")
eq_(app.pdialog.criteria_list[:], ['Highest', 'Lowest']) eq_(app.pdialog.criteria_list[:], ["Highest", "Lowest"])
@with_app(app_normal_results) @with_app(app_normal_results)
def test_size_reprioritization(app): def test_size_reprioritization(app):
app.select_pri_criterion("Size") app.select_pri_criterion("Size")
app.pdialog.criteria_list.select([0]) # highest app.pdialog.criteria_list.select([0]) # highest
app.pdialog.add_selected() app.pdialog.add_selected()
app.pdialog.perform_reprioritization() app.pdialog.perform_reprioritization()
eq_(app.rtable[0].data['name'], 'foo2.ext2') eq_(app.rtable[0].data["name"], "foo2.ext2")
@with_app(app_normal_results) @with_app(app_normal_results)
def test_reorder_prioritizations(app): def test_reorder_prioritizations(app):
app.add_pri_criterion("Kind", 0) # ext1 app.add_pri_criterion("Kind", 0) # ext1
app.add_pri_criterion("Kind", 1) # ext2 app.add_pri_criterion("Kind", 1) # ext2
app.pdialog.prioritization_list.move_indexes([1], 0) app.pdialog.prioritization_list.move_indexes([1], 0)
expected = [ expected = [
"Kind (ext2)", "Kind (ext2)",
@ -112,6 +122,7 @@ def test_reorder_prioritizations(app):
] ]
eq_(app.pdialog.prioritization_list[:], expected) eq_(app.pdialog.prioritization_list[:], expected)
@with_app(app_normal_results) @with_app(app_normal_results)
def test_remove_crit_from_list(app): def test_remove_crit_from_list(app):
app.add_pri_criterion("Kind", 0) app.add_pri_criterion("Kind", 0)
@ -123,75 +134,72 @@ def test_remove_crit_from_list(app):
] ]
eq_(app.pdialog.prioritization_list[:], expected) eq_(app.pdialog.prioritization_list[:], expected)
@with_app(app_normal_results) @with_app(app_normal_results)
def test_add_crit_without_selection(app): def test_add_crit_without_selection(app):
# Adding a criterion without having made a selection doesn't cause a crash. # Adding a criterion without having made a selection doesn't cause a crash.
app.pdialog.add_selected() # no crash app.pdialog.add_selected() # no crash
#---
# ---
def app_one_name_ends_with_number(): def app_one_name_ends_with_number():
dupes = [ dupes = [
[ [no("foo.ext"), no("foo1.ext")],
no('foo.ext'),
no('foo1.ext'),
],
] ]
return app_with_dupes(dupes) return app_with_dupes(dupes)
@with_app(app_one_name_ends_with_number) @with_app(app_one_name_ends_with_number)
def test_filename_reprioritization(app): def test_filename_reprioritization(app):
app.add_pri_criterion("Filename", 0) # Ends with a number app.add_pri_criterion("Filename", 0) # Ends with a number
app.pdialog.perform_reprioritization() app.pdialog.perform_reprioritization()
eq_(app.rtable[0].data['name'], 'foo1.ext') eq_(app.rtable[0].data["name"], "foo1.ext")
#---
# ---
def app_with_subfolders(): def app_with_subfolders():
dupes = [ dupes = [
[ [no("foo1", folder="baz"), no("foo2", folder="foo/bar")],
no('foo1', folder='baz'), [no("foo3", folder="baz"), no("foo4", folder="foo")],
no('foo2', folder='foo/bar'),
],
[
no('foo3', folder='baz'),
no('foo4', folder='foo'),
],
] ]
return app_with_dupes(dupes) return app_with_dupes(dupes)
@with_app(app_with_subfolders) @with_app(app_with_subfolders)
def test_folder_crit_is_sorted(app): def test_folder_crit_is_sorted(app):
# Folder subcriteria are sorted. # Folder subcriteria are sorted.
app.select_pri_criterion("Folder") app.select_pri_criterion("Folder")
eq_(app.pdialog.criteria_list[:], ['baz', 'foo', op.join('foo', 'bar')]) eq_(app.pdialog.criteria_list[:], ["baz", "foo", op.join("foo", "bar")])
@with_app(app_with_subfolders) @with_app(app_with_subfolders)
def test_folder_crit_includes_subfolders(app): def test_folder_crit_includes_subfolders(app):
# When selecting a folder crit, dupes in a subfolder are also considered as affected by that # When selecting a folder crit, dupes in a subfolder are also considered as affected by that
# crit. # crit.
app.add_pri_criterion("Folder", 1) # foo app.add_pri_criterion("Folder", 1) # foo
app.pdialog.perform_reprioritization() app.pdialog.perform_reprioritization()
# Both foo and foo/bar dupes will be prioritized # Both foo and foo/bar dupes will be prioritized
eq_(app.rtable[0].data['name'], 'foo2') eq_(app.rtable[0].data["name"], "foo2")
eq_(app.rtable[2].data['name'], 'foo4') eq_(app.rtable[2].data["name"], "foo4")
@with_app(app_with_subfolders) @with_app(app_with_subfolders)
def test_display_something_on_empty_extensions(app): def test_display_something_on_empty_extensions(app):
# When there's no extension, display "None" instead of nothing at all. # When there's no extension, display "None" instead of nothing at all.
app.select_pri_criterion("Kind") app.select_pri_criterion("Kind")
eq_(app.pdialog.criteria_list[:], ['None']) eq_(app.pdialog.criteria_list[:], ["None"])
#---
# ---
def app_one_name_longer_than_the_other(): def app_one_name_longer_than_the_other():
dupes = [ dupes = [
[ [no("shortest.ext"), no("loooongest.ext")],
no('shortest.ext'),
no('loooongest.ext'),
],
] ]
return app_with_dupes(dupes) return app_with_dupes(dupes)
@with_app(app_one_name_longer_than_the_other) @with_app(app_one_name_longer_than_the_other)
def test_longest_filename_prioritization(app): def test_longest_filename_prioritization(app):
app.add_pri_criterion("Filename", 2) # Longest app.add_pri_criterion("Filename", 2) # Longest
app.pdialog.perform_reprioritization() app.pdialog.perform_reprioritization()
eq_(app.rtable[0].data['name'], 'loooongest.ext') eq_(app.rtable[0].data["name"], "loooongest.ext")

View File

@ -1,13 +1,14 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2013-07-28 # Created On: 2013-07-28
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from .base import TestApp, GetTestGroups from .base import TestApp, GetTestGroups
def app_with_results(): def app_with_results():
app = TestApp() app = TestApp()
objects, matches, groups = GetTestGroups() objects, matches, groups = GetTestGroups()
@ -15,23 +16,26 @@ def app_with_results():
app.rtable.refresh() app.rtable.refresh()
return app return app
def test_delta_flags_delta_mode_off(): def test_delta_flags_delta_mode_off():
app = app_with_results() app = app_with_results()
# When the delta mode is off, we never have delta values flags # When the delta mode is off, we never have delta values flags
app.rtable.delta_values = False app.rtable.delta_values = False
# Ref file, always false anyway # Ref file, always false anyway
assert not app.rtable[0].is_cell_delta('size') assert not app.rtable[0].is_cell_delta("size")
# False because delta mode is off # False because delta mode is off
assert not app.rtable[1].is_cell_delta('size') assert not app.rtable[1].is_cell_delta("size")
def test_delta_flags_delta_mode_on_delta_columns(): def test_delta_flags_delta_mode_on_delta_columns():
# When the delta mode is on, delta columns always have a delta flag, except for ref rows # When the delta mode is on, delta columns always have a delta flag, except for ref rows
app = app_with_results() app = app_with_results()
app.rtable.delta_values = True app.rtable.delta_values = True
# Ref file, always false anyway # Ref file, always false anyway
assert not app.rtable[0].is_cell_delta('size') assert not app.rtable[0].is_cell_delta("size")
# But for a dupe, the flag is on # But for a dupe, the flag is on
assert app.rtable[1].is_cell_delta('size') assert app.rtable[1].is_cell_delta("size")
def test_delta_flags_delta_mode_on_non_delta_columns(): def test_delta_flags_delta_mode_on_non_delta_columns():
# When the delta mode is on, non-delta columns have a delta flag if their value differs from # When the delta mode is on, non-delta columns have a delta flag if their value differs from
@ -39,11 +43,12 @@ def test_delta_flags_delta_mode_on_non_delta_columns():
app = app_with_results() app = app_with_results()
app.rtable.delta_values = True app.rtable.delta_values = True
# "bar bleh" != "foo bar", flag on # "bar bleh" != "foo bar", flag on
assert app.rtable[1].is_cell_delta('name') assert app.rtable[1].is_cell_delta("name")
# "ibabtu" row, but it's a ref, flag off # "ibabtu" row, but it's a ref, flag off
assert not app.rtable[3].is_cell_delta('name') assert not app.rtable[3].is_cell_delta("name")
# "ibabtu" == "ibabtu", flag off # "ibabtu" == "ibabtu", flag off
assert not app.rtable[4].is_cell_delta('name') assert not app.rtable[4].is_cell_delta("name")
def test_delta_flags_delta_mode_on_non_delta_columns_case_insensitive(): def test_delta_flags_delta_mode_on_non_delta_columns_case_insensitive():
# Comparison that occurs for non-numeric columns to check whether they're delta is case # Comparison that occurs for non-numeric columns to check whether they're delta is case
@ -53,4 +58,4 @@ def test_delta_flags_delta_mode_on_non_delta_columns_case_insensitive():
app.app.results.groups[1].dupes[0].name = "IBaBTU" app.app.results.groups[1].dupes[0].name = "IBaBTU"
app.rtable.delta_values = True app.rtable.delta_values = True
# "ibAbtu" == "IBaBTU", flag off # "ibAbtu" == "IBaBTU", flag off
assert not app.rtable[4].is_cell_delta('name') assert not app.rtable[4].is_cell_delta("name")

View File

@ -17,6 +17,7 @@ from .. import engine
from .base import NamedObject, GetTestGroups, DupeGuru from .base import NamedObject, GetTestGroups, DupeGuru
from ..results import Results from ..results import Results
class TestCaseResultsEmpty: class TestCaseResultsEmpty:
def setup_method(self, method): def setup_method(self, method):
self.app = DupeGuru() self.app = DupeGuru()
@ -24,8 +25,8 @@ class TestCaseResultsEmpty:
def test_apply_invalid_filter(self): def test_apply_invalid_filter(self):
# If the applied filter is an invalid regexp, just ignore the filter. # If the applied filter is an invalid regexp, just ignore the filter.
self.results.apply_filter('[') # invalid self.results.apply_filter("[") # invalid
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
def test_stat_line(self): def test_stat_line(self):
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.", self.results.stat_line) eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.", self.results.stat_line)
@ -34,7 +35,7 @@ class TestCaseResultsEmpty:
eq_(0, len(self.results.groups)) eq_(0, len(self.results.groups))
def test_get_group_of_duplicate(self): def test_get_group_of_duplicate(self):
assert self.results.get_group_of_duplicate('foo') is None assert self.results.get_group_of_duplicate("foo") is None
def test_save_to_xml(self): def test_save_to_xml(self):
f = io.BytesIO() f = io.BytesIO()
@ -42,7 +43,7 @@ class TestCaseResultsEmpty:
f.seek(0) f.seek(0)
doc = ET.parse(f) doc = ET.parse(f)
root = doc.getroot() root = doc.getroot()
eq_('results', root.tag) eq_("results", root.tag)
def test_is_modified(self): def test_is_modified(self):
assert not self.results.is_modified assert not self.results.is_modified
@ -59,10 +60,10 @@ class TestCaseResultsEmpty:
# would have been some kind of feedback to the user, but the work involved for something # would have been some kind of feedback to the user, but the work involved for something
# that simply never happens (I never received a report of this crash, I experienced it # that simply never happens (I never received a report of this crash, I experienced it
# while fooling around) is too much. Instead, use standard name conflict resolution. # while fooling around) is too much. Instead, use standard name conflict resolution.
folderpath = tmpdir.join('foo') folderpath = tmpdir.join("foo")
folderpath.mkdir() folderpath.mkdir()
self.results.save_to_xml(str(folderpath)) # no crash self.results.save_to_xml(str(folderpath)) # no crash
assert tmpdir.join('[000] foo').check() assert tmpdir.join("[000] foo").check()
class TestCaseResultsWithSomeGroups: class TestCaseResultsWithSomeGroups:
@ -116,18 +117,22 @@ class TestCaseResultsWithSomeGroups:
assert d is g.ref assert d is g.ref
def test_sort_groups(self): def test_sort_groups(self):
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref. self.results.make_ref(
self.objects[1]
) # We want to make the 1024 sized object to go ref.
g1, g2 = self.groups g1, g2 = self.groups
self.results.sort_groups('size') self.results.sort_groups("size")
assert self.results.groups[0] is g2 assert self.results.groups[0] is g2
assert self.results.groups[1] is g1 assert self.results.groups[1] is g1
self.results.sort_groups('size', False) self.results.sort_groups("size", False)
assert self.results.groups[0] is g1 assert self.results.groups[0] is g1
assert self.results.groups[1] is g2 assert self.results.groups[1] is g2
def test_set_groups_when_sorted(self): def test_set_groups_when_sorted(self):
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref. self.results.make_ref(
self.results.sort_groups('size') self.objects[1]
) # We want to make the 1024 sized object to go ref.
self.results.sort_groups("size")
objects, matches, groups = GetTestGroups() objects, matches, groups = GetTestGroups()
g1, g2 = groups g1, g2 = groups
g1.switch_ref(objects[1]) g1.switch_ref(objects[1])
@ -158,9 +163,9 @@ class TestCaseResultsWithSomeGroups:
o3.size = 3 o3.size = 3
o4.size = 2 o4.size = 2
o5.size = 1 o5.size = 1
self.results.sort_dupes('size') self.results.sort_dupes("size")
eq_([o5, o3, o2], self.results.dupes) eq_([o5, o3, o2], self.results.dupes)
self.results.sort_dupes('size', False) self.results.sort_dupes("size", False)
eq_([o2, o3, o5], self.results.dupes) eq_([o2, o3, o5], self.results.dupes)
def test_dupe_list_remember_sort(self): def test_dupe_list_remember_sort(self):
@ -170,25 +175,25 @@ class TestCaseResultsWithSomeGroups:
o3.size = 3 o3.size = 3
o4.size = 2 o4.size = 2
o5.size = 1 o5.size = 1
self.results.sort_dupes('size') self.results.sort_dupes("size")
self.results.make_ref(o2) self.results.make_ref(o2)
eq_([o5, o3, o1], self.results.dupes) eq_([o5, o3, o1], self.results.dupes)
def test_dupe_list_sort_delta_values(self): def test_dupe_list_sort_delta_values(self):
o1, o2, o3, o4, o5 = self.objects o1, o2, o3, o4, o5 = self.objects
o1.size = 10 o1.size = 10
o2.size = 2 #-8 o2.size = 2 # -8
o3.size = 3 #-7 o3.size = 3 # -7
o4.size = 20 o4.size = 20
o5.size = 1 #-19 o5.size = 1 # -19
self.results.sort_dupes('size', delta=True) self.results.sort_dupes("size", delta=True)
eq_([o5, o2, o3], self.results.dupes) eq_([o5, o2, o3], self.results.dupes)
def test_sort_empty_list(self): def test_sort_empty_list(self):
#There was an infinite loop when sorting an empty list. # There was an infinite loop when sorting an empty list.
app = DupeGuru() app = DupeGuru()
r = app.results r = app.results
r.sort_dupes('name') r.sort_dupes("name")
eq_([], r.dupes) eq_([], r.dupes)
def test_dupe_list_update_on_remove_duplicates(self): def test_dupe_list_update_on_remove_duplicates(self):
@ -209,7 +214,7 @@ class TestCaseResultsWithSomeGroups:
f = io.BytesIO() f = io.BytesIO()
self.results.save_to_xml(f) self.results.save_to_xml(f)
assert not self.results.is_modified assert not self.results.is_modified
self.results.groups = self.groups # sets the flag back self.results.groups = self.groups # sets the flag back
f.seek(0) f.seek(0)
self.results.load_from_xml(f, get_file) self.results.load_from_xml(f, get_file)
assert not self.results.is_modified assert not self.results.is_modified
@ -236,7 +241,7 @@ class TestCaseResultsWithSomeGroups:
# "aaa" makes our dupe go first in alphabetical order, but since we have the same value as # "aaa" makes our dupe go first in alphabetical order, but since we have the same value as
# ref, we're going last. # ref, we're going last.
g2r.name = g2d1.name = "aaa" g2r.name = g2d1.name = "aaa"
self.results.sort_dupes('name', delta=True) self.results.sort_dupes("name", delta=True)
eq_("aaa", self.results.dupes[2].name) eq_("aaa", self.results.dupes[2].name)
def test_dupe_list_sort_delta_values_nonnumeric_case_insensitive(self): def test_dupe_list_sort_delta_values_nonnumeric_case_insensitive(self):
@ -244,9 +249,10 @@ class TestCaseResultsWithSomeGroups:
g1r, g1d1, g1d2, g2r, g2d1 = self.objects g1r, g1d1, g1d2, g2r, g2d1 = self.objects
g2r.name = "AaA" g2r.name = "AaA"
g2d1.name = "aAa" g2d1.name = "aAa"
self.results.sort_dupes('name', delta=True) self.results.sort_dupes("name", delta=True)
eq_("aAa", self.results.dupes[2].name) eq_("aAa", self.results.dupes[2].name)
class TestCaseResultsWithSavedResults: class TestCaseResultsWithSavedResults:
def setup_method(self, method): def setup_method(self, method):
self.app = DupeGuru() self.app = DupeGuru()
@ -266,7 +272,7 @@ class TestCaseResultsWithSavedResults:
def get_file(path): def get_file(path):
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
self.results.groups = self.groups # sets the flag back self.results.groups = self.groups # sets the flag back
self.results.load_from_xml(self.f, get_file) self.results.load_from_xml(self.f, get_file)
assert not self.results.is_modified assert not self.results.is_modified
@ -299,7 +305,7 @@ class TestCaseResultsMarkings:
self.results.mark(self.objects[2]) self.results.mark(self.objects[2])
self.results.mark(self.objects[4]) self.results.mark(self.objects[4])
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.mark(self.objects[0]) #this is a ref, it can't be counted self.results.mark(self.objects[0]) # this is a ref, it can't be counted
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.groups = self.groups self.results.groups = self.groups
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
@ -335,7 +341,7 @@ class TestCaseResultsMarkings:
def log_object(o): def log_object(o):
log.append(o) log.append(o)
if o is self.objects[1]: if o is self.objects[1]:
raise EnvironmentError('foobar') raise EnvironmentError("foobar")
log = [] log = []
self.results.mark_all() self.results.mark_all()
@ -350,7 +356,7 @@ class TestCaseResultsMarkings:
eq_(len(self.results.problems), 1) eq_(len(self.results.problems), 1)
dupe, msg = self.results.problems[0] dupe, msg = self.results.problems[0]
assert dupe is self.objects[1] assert dupe is self.objects[1]
eq_(msg, 'foobar') eq_(msg, "foobar")
def test_perform_on_marked_with_ref(self): def test_perform_on_marked_with_ref(self):
def log_object(o): def log_object(o):
@ -408,20 +414,20 @@ class TestCaseResultsMarkings:
f.seek(0) f.seek(0)
doc = ET.parse(f) doc = ET.parse(f)
root = doc.getroot() root = doc.getroot()
g1, g2 = root.getiterator('group') g1, g2 = root.getiterator("group")
d1, d2, d3 = g1.getiterator('file') d1, d2, d3 = g1.getiterator("file")
eq_('n', d1.get('marked')) eq_("n", d1.get("marked"))
eq_('n', d2.get('marked')) eq_("n", d2.get("marked"))
eq_('y', d3.get('marked')) eq_("y", d3.get("marked"))
d1, d2 = g2.getiterator('file') d1, d2 = g2.getiterator("file")
eq_('n', d1.get('marked')) eq_("n", d1.get("marked"))
eq_('y', d2.get('marked')) eq_("y", d2.get("marked"))
def test_LoadXML(self): def test_LoadXML(self):
def get_file(path): def get_file(path):
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
self.results.mark(self.objects[1]) self.results.mark(self.objects[1])
self.results.mark_invert() self.results.mark_invert()
f = io.BytesIO() f = io.BytesIO()
@ -444,51 +450,51 @@ class TestCaseResultsXML:
self.objects, self.matches, self.groups = GetTestGroups() self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups self.results.groups = self.groups
def get_file(self, path): # use this as a callback for load_from_xml def get_file(self, path): # use this as a callback for load_from_xml
return [o for o in self.objects if o.path == path][0] return [o for o in self.objects if o.path == path][0]
def test_save_to_xml(self): def test_save_to_xml(self):
self.objects[0].is_ref = True self.objects[0].is_ref = True
self.objects[0].words = [['foo', 'bar']] self.objects[0].words = [["foo", "bar"]]
f = io.BytesIO() f = io.BytesIO()
self.results.save_to_xml(f) self.results.save_to_xml(f)
f.seek(0) f.seek(0)
doc = ET.parse(f) doc = ET.parse(f)
root = doc.getroot() root = doc.getroot()
eq_('results', root.tag) eq_("results", root.tag)
eq_(2, len(root)) eq_(2, len(root))
eq_(2, len([c for c in root if c.tag == 'group'])) eq_(2, len([c for c in root if c.tag == "group"]))
g1, g2 = root g1, g2 = root
eq_(6, len(g1)) eq_(6, len(g1))
eq_(3, len([c for c in g1 if c.tag == 'file'])) eq_(3, len([c for c in g1 if c.tag == "file"]))
eq_(3, len([c for c in g1 if c.tag == 'match'])) eq_(3, len([c for c in g1 if c.tag == "match"]))
d1, d2, d3 = [c for c in g1 if c.tag == 'file'] d1, d2, d3 = [c for c in g1 if c.tag == "file"]
eq_(op.join('basepath', 'foo bar'), d1.get('path')) eq_(op.join("basepath", "foo bar"), d1.get("path"))
eq_(op.join('basepath', 'bar bleh'), d2.get('path')) eq_(op.join("basepath", "bar bleh"), d2.get("path"))
eq_(op.join('basepath', 'foo bleh'), d3.get('path')) eq_(op.join("basepath", "foo bleh"), d3.get("path"))
eq_('y', d1.get('is_ref')) eq_("y", d1.get("is_ref"))
eq_('n', d2.get('is_ref')) eq_("n", d2.get("is_ref"))
eq_('n', d3.get('is_ref')) eq_("n", d3.get("is_ref"))
eq_('foo,bar', d1.get('words')) eq_("foo,bar", d1.get("words"))
eq_('bar,bleh', d2.get('words')) eq_("bar,bleh", d2.get("words"))
eq_('foo,bleh', d3.get('words')) eq_("foo,bleh", d3.get("words"))
eq_(3, len(g2)) eq_(3, len(g2))
eq_(2, len([c for c in g2 if c.tag == 'file'])) eq_(2, len([c for c in g2 if c.tag == "file"]))
eq_(1, len([c for c in g2 if c.tag == 'match'])) eq_(1, len([c for c in g2 if c.tag == "match"]))
d1, d2 = [c for c in g2 if c.tag == 'file'] d1, d2 = [c for c in g2 if c.tag == "file"]
eq_(op.join('basepath', 'ibabtu'), d1.get('path')) eq_(op.join("basepath", "ibabtu"), d1.get("path"))
eq_(op.join('basepath', 'ibabtu'), d2.get('path')) eq_(op.join("basepath", "ibabtu"), d2.get("path"))
eq_('n', d1.get('is_ref')) eq_("n", d1.get("is_ref"))
eq_('n', d2.get('is_ref')) eq_("n", d2.get("is_ref"))
eq_('ibabtu', d1.get('words')) eq_("ibabtu", d1.get("words"))
eq_('ibabtu', d2.get('words')) eq_("ibabtu", d2.get("words"))
def test_LoadXML(self): def test_LoadXML(self):
def get_file(path): def get_file(path):
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
self.objects[0].is_ref = True self.objects[0].is_ref = True
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
f = io.BytesIO() f = io.BytesIO()
self.results.save_to_xml(f) self.results.save_to_xml(f)
f.seek(0) f.seek(0)
@ -504,23 +510,23 @@ class TestCaseResultsXML:
assert g1[0] is self.objects[0] assert g1[0] is self.objects[0]
assert g1[1] is self.objects[1] assert g1[1] is self.objects[1]
assert g1[2] is self.objects[2] assert g1[2] is self.objects[2]
eq_(['foo', 'bar'], g1[0].words) eq_(["foo", "bar"], g1[0].words)
eq_(['bar', 'bleh'], g1[1].words) eq_(["bar", "bleh"], g1[1].words)
eq_(['foo', 'bleh'], g1[2].words) eq_(["foo", "bleh"], g1[2].words)
eq_(2, len(g2)) eq_(2, len(g2))
assert not g2[0].is_ref assert not g2[0].is_ref
assert not g2[1].is_ref assert not g2[1].is_ref
assert g2[0] is self.objects[3] assert g2[0] is self.objects[3]
assert g2[1] is self.objects[4] assert g2[1] is self.objects[4]
eq_(['ibabtu'], g2[0].words) eq_(["ibabtu"], g2[0].words)
eq_(['ibabtu'], g2[1].words) eq_(["ibabtu"], g2[1].words)
def test_LoadXML_with_filename(self, tmpdir): def test_LoadXML_with_filename(self, tmpdir):
def get_file(path): def get_file(path):
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
filename = str(tmpdir.join('dupeguru_results.xml')) filename = str(tmpdir.join("dupeguru_results.xml"))
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
self.results.save_to_xml(filename) self.results.save_to_xml(filename)
app = DupeGuru() app = DupeGuru()
r = Results(app) r = Results(app)
@ -529,11 +535,11 @@ class TestCaseResultsXML:
def test_LoadXML_with_some_files_that_dont_exist_anymore(self): def test_LoadXML_with_some_files_that_dont_exist_anymore(self):
def get_file(path): def get_file(path):
if path.endswith('ibabtu 2'): if path.endswith("ibabtu 2"):
return None return None
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
f = io.BytesIO() f = io.BytesIO()
self.results.save_to_xml(f) self.results.save_to_xml(f)
f.seek(0) f.seek(0)
@ -547,36 +553,36 @@ class TestCaseResultsXML:
def get_file(path): def get_file(path):
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
root = ET.Element('foobar') #The root element shouldn't matter, really. root = ET.Element("foobar") # The root element shouldn't matter, really.
group_node = ET.SubElement(root, 'group') group_node = ET.SubElement(root, "group")
dupe_node = ET.SubElement(group_node, 'file') #Perfectly correct file dupe_node = ET.SubElement(group_node, "file") # Perfectly correct file
dupe_node.set('path', op.join('basepath', 'foo bar')) dupe_node.set("path", op.join("basepath", "foo bar"))
dupe_node.set('is_ref', 'y') dupe_node.set("is_ref", "y")
dupe_node.set('words', 'foo, bar') dupe_node.set("words", "foo, bar")
dupe_node = ET.SubElement(group_node, 'file') #is_ref missing, default to 'n' dupe_node = ET.SubElement(group_node, "file") # is_ref missing, default to 'n'
dupe_node.set('path', op.join('basepath', 'foo bleh')) dupe_node.set("path", op.join("basepath", "foo bleh"))
dupe_node.set('words', 'foo, bleh') dupe_node.set("words", "foo, bleh")
dupe_node = ET.SubElement(group_node, 'file') #words are missing, valid. dupe_node = ET.SubElement(group_node, "file") # words are missing, valid.
dupe_node.set('path', op.join('basepath', 'bar bleh')) dupe_node.set("path", op.join("basepath", "bar bleh"))
dupe_node = ET.SubElement(group_node, 'file') #path is missing, invalid. dupe_node = ET.SubElement(group_node, "file") # path is missing, invalid.
dupe_node.set('words', 'foo, bleh') dupe_node.set("words", "foo, bleh")
dupe_node = ET.SubElement(group_node, 'foobar') #Invalid element name dupe_node = ET.SubElement(group_node, "foobar") # Invalid element name
dupe_node.set('path', op.join('basepath', 'bar bleh')) dupe_node.set("path", op.join("basepath", "bar bleh"))
dupe_node.set('is_ref', 'y') dupe_node.set("is_ref", "y")
dupe_node.set('words', 'bar, bleh') dupe_node.set("words", "bar, bleh")
match_node = ET.SubElement(group_node, 'match') # match pointing to a bad index match_node = ET.SubElement(group_node, "match") # match pointing to a bad index
match_node.set('first', '42') match_node.set("first", "42")
match_node.set('second', '45') match_node.set("second", "45")
match_node = ET.SubElement(group_node, 'match') # match with missing attrs match_node = ET.SubElement(group_node, "match") # match with missing attrs
match_node = ET.SubElement(group_node, 'match') # match with non-int values match_node = ET.SubElement(group_node, "match") # match with non-int values
match_node.set('first', 'foo') match_node.set("first", "foo")
match_node.set('second', 'bar') match_node.set("second", "bar")
match_node.set('percentage', 'baz') match_node.set("percentage", "baz")
group_node = ET.SubElement(root, 'foobar') #invalid group group_node = ET.SubElement(root, "foobar") # invalid group
group_node = ET.SubElement(root, 'group') #empty group group_node = ET.SubElement(root, "group") # empty group
f = io.BytesIO() f = io.BytesIO()
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
tree.write(f, encoding='utf-8') tree.write(f, encoding="utf-8")
f.seek(0) f.seek(0)
app = DupeGuru() app = DupeGuru()
r = Results(app) r = Results(app)
@ -586,16 +592,18 @@ class TestCaseResultsXML:
def test_xml_non_ascii(self): def test_xml_non_ascii(self):
def get_file(path): def get_file(path):
if path == op.join('basepath', '\xe9foo bar'): if path == op.join("basepath", "\xe9foo bar"):
return objects[0] return objects[0]
if path == op.join('basepath', 'bar bleh'): if path == op.join("basepath", "bar bleh"):
return objects[1] return objects[1]
objects = [NamedObject("\xe9foo bar", True), NamedObject("bar bleh", True)] objects = [NamedObject("\xe9foo bar", True), NamedObject("bar bleh", True)]
matches = engine.getmatches(objects) #we should have 5 matches matches = engine.getmatches(objects) # we should have 5 matches
groups = engine.get_groups(matches) #We should have 2 groups groups = engine.get_groups(matches) # We should have 2 groups
for g in groups: for g in groups:
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is g.prioritize(
lambda x: objects.index(x)
) # We want the dupes to be in the same order as the list is
app = DupeGuru() app = DupeGuru()
results = Results(app) results = Results(app)
results.groups = groups results.groups = groups
@ -607,11 +615,11 @@ class TestCaseResultsXML:
r.load_from_xml(f, get_file) r.load_from_xml(f, get_file)
g = r.groups[0] g = r.groups[0]
eq_("\xe9foo bar", g[0].name) eq_("\xe9foo bar", g[0].name)
eq_(['efoo', 'bar'], g[0].words) eq_(["efoo", "bar"], g[0].words)
def test_load_invalid_xml(self): def test_load_invalid_xml(self):
f = io.BytesIO() f = io.BytesIO()
f.write(b'<this is invalid') f.write(b"<this is invalid")
f.seek(0) f.seek(0)
app = DupeGuru() app = DupeGuru()
r = Results(app) r = Results(app)
@ -623,7 +631,7 @@ class TestCaseResultsXML:
app = DupeGuru() app = DupeGuru()
r = Results(app) r = Results(app)
with raises(IOError): with raises(IOError):
r.load_from_xml('does_not_exist.xml', None) r.load_from_xml("does_not_exist.xml", None)
eq_(0, len(r.groups)) eq_(0, len(r.groups))
def test_remember_match_percentage(self): def test_remember_match_percentage(self):
@ -643,12 +651,12 @@ class TestCaseResultsXML:
results.load_from_xml(f, self.get_file) results.load_from_xml(f, self.get_file)
group = results.groups[0] group = results.groups[0]
d1, d2, d3 = group d1, d2, d3 = group
match = group.get_match_of(d2) #d1 - d2 match = group.get_match_of(d2) # d1 - d2
eq_(42, match[2]) eq_(42, match[2])
match = group.get_match_of(d3) #d1 - d3 match = group.get_match_of(d3) # d1 - d3
eq_(43, match[2]) eq_(43, match[2])
group.switch_ref(d2) group.switch_ref(d2)
match = group.get_match_of(d3) #d2 - d3 match = group.get_match_of(d3) # d2 - d3
eq_(46, match[2]) eq_(46, match[2])
def test_save_and_load(self): def test_save_and_load(self):
@ -661,13 +669,13 @@ class TestCaseResultsXML:
def test_apply_filter_works_on_paths(self): def test_apply_filter_works_on_paths(self):
# apply_filter() searches on the whole path, not just on the filename. # apply_filter() searches on the whole path, not just on the filename.
self.results.apply_filter('basepath') self.results.apply_filter("basepath")
eq_(len(self.results.groups), 2) eq_(len(self.results.groups), 2)
def test_save_xml_with_invalid_characters(self): def test_save_xml_with_invalid_characters(self):
# Don't crash when saving files that have invalid xml characters in their path # Don't crash when saving files that have invalid xml characters in their path
self.objects[0].name = 'foo\x19' self.objects[0].name = "foo\x19"
self.results.save_to_xml(io.BytesIO()) # don't crash self.results.save_to_xml(io.BytesIO()) # don't crash
class TestCaseResultsFilter: class TestCaseResultsFilter:
@ -676,7 +684,7 @@ class TestCaseResultsFilter:
self.results = self.app.results self.results = self.app.results
self.objects, self.matches, self.groups = GetTestGroups() self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups self.results.groups = self.groups
self.results.apply_filter(r'foo') self.results.apply_filter(r"foo")
def test_groups(self): def test_groups(self):
eq_(1, len(self.results.groups)) eq_(1, len(self.results.groups))
@ -694,7 +702,7 @@ class TestCaseResultsFilter:
def test_dupes_reconstructed_filtered(self): def test_dupes_reconstructed_filtered(self):
# make_ref resets self.__dupes to None. When it's reconstructed, we want it filtered # make_ref resets self.__dupes to None. When it's reconstructed, we want it filtered
dupe = self.results.dupes[0] #3rd object dupe = self.results.dupes[0] # 3rd object
self.results.make_ref(dupe) self.results.make_ref(dupe)
eq_(1, len(self.results.dupes)) eq_(1, len(self.results.dupes))
assert self.results.dupes[0] is self.objects[0] assert self.results.dupes[0] is self.objects[0]
@ -702,23 +710,23 @@ class TestCaseResultsFilter:
def test_include_ref_dupes_in_filter(self): def test_include_ref_dupes_in_filter(self):
# When only the ref of a group match the filter, include it in the group # When only the ref of a group match the filter, include it in the group
self.results.apply_filter(None) self.results.apply_filter(None)
self.results.apply_filter(r'foo bar') self.results.apply_filter(r"foo bar")
eq_(1, len(self.results.groups)) eq_(1, len(self.results.groups))
eq_(0, len(self.results.dupes)) eq_(0, len(self.results.dupes))
def test_filters_build_on_one_another(self): def test_filters_build_on_one_another(self):
self.results.apply_filter(r'bar') self.results.apply_filter(r"bar")
eq_(1, len(self.results.groups)) eq_(1, len(self.results.groups))
eq_(0, len(self.results.dupes)) eq_(0, len(self.results.dupes))
def test_stat_line(self): def test_stat_line(self):
expected = '0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo' expected = "0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo"
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)
self.results.apply_filter(r'bar') self.results.apply_filter(r"bar")
expected = '0 / 0 (0.00 B / 0.00 B) duplicates marked. filter: foo --> bar' expected = "0 / 0 (0.00 B / 0.00 B) duplicates marked. filter: foo --> bar"
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)
self.results.apply_filter(None) self.results.apply_filter(None)
expected = '0 / 3 (0.00 B / 1.01 KB) duplicates marked.' expected = "0 / 3 (0.00 B / 1.01 KB) duplicates marked."
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)
def test_mark_count_is_filtered_as_well(self): def test_mark_count_is_filtered_as_well(self):
@ -726,8 +734,8 @@ class TestCaseResultsFilter:
# We don't want to perform mark_all() because we want the mark list to contain objects # We don't want to perform mark_all() because we want the mark list to contain objects
for dupe in self.results.dupes: for dupe in self.results.dupes:
self.results.mark(dupe) self.results.mark(dupe)
self.results.apply_filter(r'foo') self.results.apply_filter(r"foo")
expected = '1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo' expected = "1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo"
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)
def test_mark_all_only_affects_filtered_items(self): def test_mark_all_only_affects_filtered_items(self):
@ -739,22 +747,22 @@ class TestCaseResultsFilter:
def test_sort_groups(self): def test_sort_groups(self):
self.results.apply_filter(None) self.results.apply_filter(None)
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
g1, g2 = self.groups g1, g2 = self.groups
self.results.apply_filter('a') # Matches both group self.results.apply_filter("a") # Matches both group
self.results.sort_groups('size') self.results.sort_groups("size")
assert self.results.groups[0] is g2 assert self.results.groups[0] is g2
assert self.results.groups[1] is g1 assert self.results.groups[1] is g1
self.results.apply_filter(None) self.results.apply_filter(None)
assert self.results.groups[0] is g2 assert self.results.groups[0] is g2
assert self.results.groups[1] is g1 assert self.results.groups[1] is g1
self.results.sort_groups('size', False) self.results.sort_groups("size", False)
self.results.apply_filter('a') self.results.apply_filter("a")
assert self.results.groups[1] is g2 assert self.results.groups[1] is g2
assert self.results.groups[0] is g1 assert self.results.groups[0] is g1
def test_set_group(self): def test_set_group(self):
#We want the new group to be filtered # We want the new group to be filtered
self.objects, self.matches, self.groups = GetTestGroups() self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups self.results.groups = self.groups
eq_(1, len(self.results.groups)) eq_(1, len(self.results.groups))
@ -764,12 +772,12 @@ class TestCaseResultsFilter:
def get_file(path): def get_file(path):
return [f for f in self.objects if str(f.path) == path][0] return [f for f in self.objects if str(f.path) == path][0]
filename = str(tmpdir.join('dupeguru_results.xml')) filename = str(tmpdir.join("dupeguru_results.xml"))
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
self.results.save_to_xml(filename) self.results.save_to_xml(filename)
app = DupeGuru() app = DupeGuru()
r = Results(app) r = Results(app)
r.apply_filter('foo') r.apply_filter("foo")
r.load_from_xml(filename, get_file) r.load_from_xml(filename, get_file)
eq_(2, len(r.groups)) eq_(2, len(r.groups))
@ -778,7 +786,7 @@ class TestCaseResultsFilter:
self.results.apply_filter(None) self.results.apply_filter(None)
eq_(2, len(self.results.groups)) eq_(2, len(self.results.groups))
eq_(2, len(self.results.dupes)) eq_(2, len(self.results.dupes))
self.results.apply_filter('ibabtu') self.results.apply_filter("ibabtu")
self.results.remove_duplicates([self.results.dupes[0]]) self.results.remove_duplicates([self.results.dupes[0]])
self.results.apply_filter(None) self.results.apply_filter(None)
eq_(1, len(self.results.groups)) eq_(1, len(self.results.groups))
@ -786,7 +794,7 @@ class TestCaseResultsFilter:
def test_filter_is_case_insensitive(self): def test_filter_is_case_insensitive(self):
self.results.apply_filter(None) self.results.apply_filter(None)
self.results.apply_filter('FOO') self.results.apply_filter("FOO")
eq_(1, len(self.results.dupes)) eq_(1, len(self.results.dupes))
def test_make_ref_on_filtered_out_doesnt_mess_stats(self): def test_make_ref_on_filtered_out_doesnt_mess_stats(self):
@ -794,13 +802,15 @@ class TestCaseResultsFilter:
# When calling make_ref on such a dupe, the total size and dupecount stats gets messed up # When calling make_ref on such a dupe, the total size and dupecount stats gets messed up
# because they are *not* counted in the stats in the first place. # because they are *not* counted in the stats in the first place.
g1, g2 = self.groups g1, g2 = self.groups
bar_bleh = g1[1] # The "bar bleh" dupe is filtered out bar_bleh = g1[1] # The "bar bleh" dupe is filtered out
self.results.make_ref(bar_bleh) self.results.make_ref(bar_bleh)
# Now the stats should display *2* markable dupes (instead of 1) # Now the stats should display *2* markable dupes (instead of 1)
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked. filter: foo' expected = "0 / 2 (0.00 B / 2.00 B) duplicates marked. filter: foo"
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)
self.results.apply_filter(None) # Now let's make sure our unfiltered results aren't fucked up self.results.apply_filter(
expected = '0 / 3 (0.00 B / 3.00 B) duplicates marked.' None
) # Now let's make sure our unfiltered results aren't fucked up
expected = "0 / 3 (0.00 B / 3.00 B) duplicates marked."
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)
@ -814,6 +824,5 @@ class TestCaseResultsRefFile:
self.results.groups = self.groups self.results.groups = self.groups
def test_stat_line(self): def test_stat_line(self):
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked.' expected = "0 / 2 (0.00 B / 2.00 B) duplicates marked."
eq_(expected, self.results.stat_line) eq_(expected, self.results.stat_line)

View File

@ -14,6 +14,7 @@ from ..ignore import IgnoreList
from ..scanner import Scanner, ScanType from ..scanner import Scanner, ScanType
from ..me.scanner import ScannerME from ..me.scanner import ScannerME
class NamedObject: class NamedObject:
def __init__(self, name="foobar", size=1, path=None): def __init__(self, name="foobar", size=1, path=None):
if path is None: if path is None:
@ -26,22 +27,25 @@ class NamedObject:
self.words = getwords(name) self.words = getwords(name)
def __repr__(self): def __repr__(self):
return '<NamedObject %r %r>' % (self.name, self.path) return "<NamedObject %r %r>" % (self.name, self.path)
no = NamedObject no = NamedObject
def pytest_funcarg__fake_fileexists(request): def pytest_funcarg__fake_fileexists(request):
# This is a hack to avoid invalidating all previous tests since the scanner started to test # This is a hack to avoid invalidating all previous tests since the scanner started to test
# for file existence before doing the match grouping. # for file existence before doing the match grouping.
monkeypatch = request.getfuncargvalue('monkeypatch') monkeypatch = request.getfuncargvalue("monkeypatch")
monkeypatch.setattr(Path, 'exists', lambda _: True) monkeypatch.setattr(Path, "exists", lambda _: True)
def test_empty(fake_fileexists): def test_empty(fake_fileexists):
s = Scanner() s = Scanner()
r = s.get_dupe_groups([]) r = s.get_dupe_groups([])
eq_(r, []) eq_(r, [])
def test_default_settings(fake_fileexists): def test_default_settings(fake_fileexists):
s = Scanner() s = Scanner()
eq_(s.min_match_percentage, 80) eq_(s.min_match_percentage, 80)
@ -50,40 +54,54 @@ def test_default_settings(fake_fileexists):
eq_(s.word_weighting, False) eq_(s.word_weighting, False)
eq_(s.match_similar_words, False) eq_(s.match_similar_words, False)
def test_simple_with_default_settings(fake_fileexists): def test_simple_with_default_settings(fake_fileexists):
s = Scanner() s = Scanner()
f = [no('foo bar', path='p1'), no('foo bar', path='p2'), no('foo bleh')] f = [no("foo bar", path="p1"), no("foo bar", path="p2"), no("foo bleh")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
g = r[0] g = r[0]
#'foo bleh' cannot be in the group because the default min match % is 80 # 'foo bleh' cannot be in the group because the default min match % is 80
eq_(len(g), 2) eq_(len(g), 2)
assert g.ref in f[:2] assert g.ref in f[:2]
assert g.dupes[0] in f[:2] assert g.dupes[0] in f[:2]
def test_simple_with_lower_min_match(fake_fileexists): def test_simple_with_lower_min_match(fake_fileexists):
s = Scanner() s = Scanner()
s.min_match_percentage = 50 s.min_match_percentage = 50
f = [no('foo bar', path='p1'), no('foo bar', path='p2'), no('foo bleh')] f = [no("foo bar", path="p1"), no("foo bar", path="p2"), no("foo bleh")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
g = r[0] g = r[0]
eq_(len(g), 3) eq_(len(g), 3)
def test_trim_all_ref_groups(fake_fileexists): def test_trim_all_ref_groups(fake_fileexists):
# When all files of a group are ref, don't include that group in the results, but also don't # When all files of a group are ref, don't include that group in the results, but also don't
# count the files from that group as discarded. # count the files from that group as discarded.
s = Scanner() s = Scanner()
f = [no('foo', path='p1'), no('foo', path='p2'), no('bar', path='p1'), no('bar', path='p2')] f = [
no("foo", path="p1"),
no("foo", path="p2"),
no("bar", path="p1"),
no("bar", path="p2"),
]
f[2].is_ref = True f[2].is_ref = True
f[3].is_ref = True f[3].is_ref = True
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
eq_(s.discarded_file_count, 0) eq_(s.discarded_file_count, 0)
def test_priorize(fake_fileexists): def test_priorize(fake_fileexists):
s = Scanner() s = Scanner()
f = [no('foo', path='p1'), no('foo', path='p2'), no('bar', path='p1'), no('bar', path='p2')] f = [
no("foo", path="p1"),
no("foo", path="p2"),
no("bar", path="p1"),
no("bar", path="p2"),
]
f[1].size = 2 f[1].size = 2
f[2].size = 3 f[2].size = 3
f[3].is_ref = True f[3].is_ref = True
@ -94,17 +112,19 @@ def test_priorize(fake_fileexists):
assert f[3] in (g1.ref, g2.ref) assert f[3] in (g1.ref, g2.ref)
assert f[2] in (g1.dupes[0], g2.dupes[0]) assert f[2] in (g1.dupes[0], g2.dupes[0])
def test_content_scan(fake_fileexists): def test_content_scan(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Contents s.scan_type = ScanType.Contents
f = [no('foo'), no('bar'), no('bleh')] f = [no("foo"), no("bar"), no("bleh")]
f[0].md5 = f[0].md5partial = 'foobar' f[0].md5 = f[0].md5partial = "foobar"
f[1].md5 = f[1].md5partial = 'foobar' f[1].md5 = f[1].md5partial = "foobar"
f[2].md5 = f[2].md5partial = 'bleh' f[2].md5 = f[2].md5partial = "bleh"
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
eq_(len(r[0]), 2) eq_(len(r[0]), 2)
eq_(s.discarded_file_count, 0) # don't count the different md5 as discarded! eq_(s.discarded_file_count, 0) # don't count the different md5 as discarded!
def test_content_scan_compare_sizes_first(fake_fileexists): def test_content_scan_compare_sizes_first(fake_fileexists):
class MyFile(no): class MyFile(no):
@ -114,16 +134,17 @@ def test_content_scan_compare_sizes_first(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Contents s.scan_type = ScanType.Contents
f = [MyFile('foo', 1), MyFile('bar', 2)] f = [MyFile("foo", 1), MyFile("bar", 2)]
eq_(len(s.get_dupe_groups(f)), 0) eq_(len(s.get_dupe_groups(f)), 0)
def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists): def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Contents s.scan_type = ScanType.Contents
f = [no('foo'), no('bar'), no('bleh')] f = [no("foo"), no("bar"), no("bleh")]
f[0].md5 = f[0].md5partial = 'foobar' f[0].md5 = f[0].md5partial = "foobar"
f[1].md5 = f[1].md5partial = 'foobar' f[1].md5 = f[1].md5partial = "foobar"
f[2].md5 = f[2].md5partial = 'bleh' f[2].md5 = f[2].md5partial = "bleh"
s.min_match_percentage = 101 s.min_match_percentage = 101
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
@ -133,157 +154,180 @@ def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
eq_(len(r), 1) eq_(len(r), 1)
eq_(len(r[0]), 2) eq_(len(r[0]), 2)
def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists): def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Contents s.scan_type = ScanType.Contents
f = [no('foo'), no('bar')] f = [no("foo"), no("bar")]
f[0].md5 = f[0].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f' f[0].md5 = f[
f[1].md5 = f[1].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f' 0
].md5partial = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
f[1].md5 = f[
1
].md5partial = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
r[0] r[0]
def test_extension_is_not_counted_in_filename_scan(fake_fileexists): def test_extension_is_not_counted_in_filename_scan(fake_fileexists):
s = Scanner() s = Scanner()
s.min_match_percentage = 100 s.min_match_percentage = 100
f = [no('foo.bar'), no('foo.bleh')] f = [no("foo.bar"), no("foo.bleh")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
eq_(len(r[0]), 2) eq_(len(r[0]), 2)
def test_job(fake_fileexists): def test_job(fake_fileexists):
def do_progress(progress, desc=''): def do_progress(progress, desc=""):
log.append(progress) log.append(progress)
return True return True
s = Scanner() s = Scanner()
log = [] log = []
f = [no('foo bar'), no('foo bar'), no('foo bleh')] f = [no("foo bar"), no("foo bar"), no("foo bleh")]
s.get_dupe_groups(f, j=job.Job(1, do_progress)) s.get_dupe_groups(f, j=job.Job(1, do_progress))
eq_(log[0], 0) eq_(log[0], 0)
eq_(log[-1], 100) eq_(log[-1], 100)
def test_mix_file_kind(fake_fileexists): def test_mix_file_kind(fake_fileexists):
s = Scanner() s = Scanner()
s.mix_file_kind = False s.mix_file_kind = False
f = [no('foo.1'), no('foo.2')] f = [no("foo.1"), no("foo.2")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 0) eq_(len(r), 0)
def test_word_weighting(fake_fileexists): def test_word_weighting(fake_fileexists):
s = Scanner() s = Scanner()
s.min_match_percentage = 75 s.min_match_percentage = 75
s.word_weighting = True s.word_weighting = True
f = [no('foo bar'), no('foo bar bleh')] f = [no("foo bar"), no("foo bar bleh")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
g = r[0] g = r[0]
m = g.get_match_of(g.dupes[0]) m = g.get_match_of(g.dupes[0])
eq_(m.percentage, 75) # 16 letters, 12 matching eq_(m.percentage, 75) # 16 letters, 12 matching
def test_similar_words(fake_fileexists): def test_similar_words(fake_fileexists):
s = Scanner() s = Scanner()
s.match_similar_words = True s.match_similar_words = True
f = [no('The White Stripes'), no('The Whites Stripe'), no('Limp Bizkit'), no('Limp Bizkitt')] f = [
no("The White Stripes"),
no("The Whites Stripe"),
no("Limp Bizkit"),
no("Limp Bizkitt"),
]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 2) eq_(len(r), 2)
def test_fields(fake_fileexists): def test_fields(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Fields s.scan_type = ScanType.Fields
f = [no('The White Stripes - Little Ghost'), no('The White Stripes - Little Acorn')] f = [no("The White Stripes - Little Ghost"), no("The White Stripes - Little Acorn")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 0) eq_(len(r), 0)
def test_fields_no_order(fake_fileexists): def test_fields_no_order(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.FieldsNoOrder s.scan_type = ScanType.FieldsNoOrder
f = [no('The White Stripes - Little Ghost'), no('Little Ghost - The White Stripes')] f = [no("The White Stripes - Little Ghost"), no("Little Ghost - The White Stripes")]
r = s.get_dupe_groups(f) r = s.get_dupe_groups(f)
eq_(len(r), 1) eq_(len(r), 1)
def test_tag_scan(fake_fileexists): def test_tag_scan(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o1.artist = 'The White Stripes' o1.artist = "The White Stripes"
o1.title = 'The Air Near My Fingers' o1.title = "The Air Near My Fingers"
o2.artist = 'The White Stripes' o2.artist = "The White Stripes"
o2.title = 'The Air Near My Fingers' o2.title = "The Air Near My Fingers"
r = s.get_dupe_groups([o1, o2]) r = s.get_dupe_groups([o1, o2])
eq_(len(r), 1) eq_(len(r), 1)
def test_tag_with_album_scan(fake_fileexists): def test_tag_with_album_scan(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
s.scanned_tags = set(['artist', 'album', 'title']) s.scanned_tags = set(["artist", "album", "title"])
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o3 = no('bleh') o3 = no("bleh")
o1.artist = 'The White Stripes' o1.artist = "The White Stripes"
o1.title = 'The Air Near My Fingers' o1.title = "The Air Near My Fingers"
o1.album = 'Elephant' o1.album = "Elephant"
o2.artist = 'The White Stripes' o2.artist = "The White Stripes"
o2.title = 'The Air Near My Fingers' o2.title = "The Air Near My Fingers"
o2.album = 'Elephant' o2.album = "Elephant"
o3.artist = 'The White Stripes' o3.artist = "The White Stripes"
o3.title = 'The Air Near My Fingers' o3.title = "The Air Near My Fingers"
o3.album = 'foobar' o3.album = "foobar"
r = s.get_dupe_groups([o1, o2, o3]) r = s.get_dupe_groups([o1, o2, o3])
eq_(len(r), 1) eq_(len(r), 1)
def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists): def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
s.scanned_tags = set(['artist', 'album', 'title']) s.scanned_tags = set(["artist", "album", "title"])
s.min_match_percentage = 50 s.min_match_percentage = 50
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o1.artist = 'The White Stripes - a' o1.artist = "The White Stripes - a"
o1.title = 'The Air Near My Fingers - a' o1.title = "The Air Near My Fingers - a"
o1.album = 'Elephant - a' o1.album = "Elephant - a"
o2.artist = 'The White Stripes - b' o2.artist = "The White Stripes - b"
o2.title = 'The Air Near My Fingers - b' o2.title = "The Air Near My Fingers - b"
o2.album = 'Elephant - b' o2.album = "Elephant - b"
r = s.get_dupe_groups([o1, o2]) r = s.get_dupe_groups([o1, o2])
eq_(len(r), 1) eq_(len(r), 1)
def test_tag_scan_with_different_scanned(fake_fileexists): def test_tag_scan_with_different_scanned(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
s.scanned_tags = set(['track', 'year']) s.scanned_tags = set(["track", "year"])
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o1.artist = 'The White Stripes' o1.artist = "The White Stripes"
o1.title = 'some title' o1.title = "some title"
o1.track = 'foo' o1.track = "foo"
o1.year = 'bar' o1.year = "bar"
o2.artist = 'The White Stripes' o2.artist = "The White Stripes"
o2.title = 'another title' o2.title = "another title"
o2.track = 'foo' o2.track = "foo"
o2.year = 'bar' o2.year = "bar"
r = s.get_dupe_groups([o1, o2]) r = s.get_dupe_groups([o1, o2])
eq_(len(r), 1) eq_(len(r), 1)
def test_tag_scan_only_scans_existing_tags(fake_fileexists): def test_tag_scan_only_scans_existing_tags(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
s.scanned_tags = set(['artist', 'foo']) s.scanned_tags = set(["artist", "foo"])
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o1.artist = 'The White Stripes' o1.artist = "The White Stripes"
o1.foo = 'foo' o1.foo = "foo"
o2.artist = 'The White Stripes' o2.artist = "The White Stripes"
o2.foo = 'bar' o2.foo = "bar"
r = s.get_dupe_groups([o1, o2]) r = s.get_dupe_groups([o1, o2])
eq_(len(r), 1) # Because 'foo' is not scanned, they match eq_(len(r), 1) # Because 'foo' is not scanned, they match
def test_tag_scan_converts_to_str(fake_fileexists): def test_tag_scan_converts_to_str(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
s.scanned_tags = set(['track']) s.scanned_tags = set(["track"])
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o1.track = 42 o1.track = 42
o2.track = 42 o2.track = 42
try: try:
@ -292,28 +336,30 @@ def test_tag_scan_converts_to_str(fake_fileexists):
raise AssertionError() raise AssertionError()
eq_(len(r), 1) eq_(len(r), 1)
def test_tag_scan_non_ascii(fake_fileexists): def test_tag_scan_non_ascii(fake_fileexists):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Tag s.scan_type = ScanType.Tag
s.scanned_tags = set(['title']) s.scanned_tags = set(["title"])
o1 = no('foo') o1 = no("foo")
o2 = no('bar') o2 = no("bar")
o1.title = 'foobar\u00e9' o1.title = "foobar\u00e9"
o2.title = 'foobar\u00e9' o2.title = "foobar\u00e9"
try: try:
r = s.get_dupe_groups([o1, o2]) r = s.get_dupe_groups([o1, o2])
except UnicodeEncodeError: except UnicodeEncodeError:
raise AssertionError() raise AssertionError()
eq_(len(r), 1) eq_(len(r), 1)
def test_ignore_list(fake_fileexists): def test_ignore_list(fake_fileexists):
s = Scanner() s = Scanner()
f1 = no('foobar') f1 = no("foobar")
f2 = no('foobar') f2 = no("foobar")
f3 = no('foobar') f3 = no("foobar")
f1.path = Path('dir1/foobar') f1.path = Path("dir1/foobar")
f2.path = Path('dir2/foobar') f2.path = Path("dir2/foobar")
f3.path = Path('dir3/foobar') f3.path = Path("dir3/foobar")
ignore_list = IgnoreList() ignore_list = IgnoreList()
ignore_list.Ignore(str(f1.path), str(f2.path)) ignore_list.Ignore(str(f1.path), str(f2.path))
ignore_list.Ignore(str(f1.path), str(f3.path)) ignore_list.Ignore(str(f1.path), str(f3.path))
@ -327,16 +373,17 @@ def test_ignore_list(fake_fileexists):
# Ignored matches are not counted as discarded # Ignored matches are not counted as discarded
eq_(s.discarded_file_count, 0) eq_(s.discarded_file_count, 0)
def test_ignore_list_checks_for_unicode(fake_fileexists): def test_ignore_list_checks_for_unicode(fake_fileexists):
#scanner was calling path_str for ignore list checks. Since the Path changes, it must # scanner was calling path_str for ignore list checks. Since the Path changes, it must
#be unicode(path) # be unicode(path)
s = Scanner() s = Scanner()
f1 = no('foobar') f1 = no("foobar")
f2 = no('foobar') f2 = no("foobar")
f3 = no('foobar') f3 = no("foobar")
f1.path = Path('foo1\u00e9') f1.path = Path("foo1\u00e9")
f2.path = Path('foo2\u00e9') f2.path = Path("foo2\u00e9")
f3.path = Path('foo3\u00e9') f3.path = Path("foo3\u00e9")
ignore_list = IgnoreList() ignore_list = IgnoreList()
ignore_list.Ignore(str(f1.path), str(f2.path)) ignore_list.Ignore(str(f1.path), str(f2.path))
ignore_list.Ignore(str(f1.path), str(f3.path)) ignore_list.Ignore(str(f1.path), str(f3.path))
@ -348,6 +395,7 @@ def test_ignore_list_checks_for_unicode(fake_fileexists):
assert f2 in g assert f2 in g
assert f3 in g assert f3 in g
def test_file_evaluates_to_false(fake_fileexists): def test_file_evaluates_to_false(fake_fileexists):
# A very wrong way to use any() was added at some point, causing resulting group list # A very wrong way to use any() was added at some point, causing resulting group list
# to be empty. # to be empty.
@ -355,19 +403,19 @@ def test_file_evaluates_to_false(fake_fileexists):
def __bool__(self): def __bool__(self):
return False return False
s = Scanner() s = Scanner()
f1 = FalseNamedObject('foobar', path='p1') f1 = FalseNamedObject("foobar", path="p1")
f2 = FalseNamedObject('foobar', path='p2') f2 = FalseNamedObject("foobar", path="p2")
r = s.get_dupe_groups([f1, f2]) r = s.get_dupe_groups([f1, f2])
eq_(len(r), 1) eq_(len(r), 1)
def test_size_threshold(fake_fileexists): def test_size_threshold(fake_fileexists):
# Only file equal or higher than the size_threshold in size are scanned # Only file equal or higher than the size_threshold in size are scanned
s = Scanner() s = Scanner()
f1 = no('foo', 1, path='p1') f1 = no("foo", 1, path="p1")
f2 = no('foo', 2, path='p2') f2 = no("foo", 2, path="p2")
f3 = no('foo', 3, path='p3') f3 = no("foo", 3, path="p3")
s.size_threshold = 2 s.size_threshold = 2
groups = s.get_dupe_groups([f1, f2, f3]) groups = s.get_dupe_groups([f1, f2, f3])
eq_(len(groups), 1) eq_(len(groups), 1)
@ -377,48 +425,52 @@ def test_size_threshold(fake_fileexists):
assert f2 in group assert f2 in group
assert f3 in group assert f3 in group
def test_tie_breaker_path_deepness(fake_fileexists): def test_tie_breaker_path_deepness(fake_fileexists):
# If there is a tie in prioritization, path deepness is used as a tie breaker # If there is a tie in prioritization, path deepness is used as a tie breaker
s = Scanner() s = Scanner()
o1, o2 = no('foo'), no('foo') o1, o2 = no("foo"), no("foo")
o1.path = Path('foo') o1.path = Path("foo")
o2.path = Path('foo/bar') o2.path = Path("foo/bar")
[group] = s.get_dupe_groups([o1, o2]) [group] = s.get_dupe_groups([o1, o2])
assert group.ref is o2 assert group.ref is o2
def test_tie_breaker_copy(fake_fileexists): def test_tie_breaker_copy(fake_fileexists):
# if copy is in the words used (even if it has a deeper path), it becomes a dupe # if copy is in the words used (even if it has a deeper path), it becomes a dupe
s = Scanner() s = Scanner()
o1, o2 = no('foo bar Copy'), no('foo bar') o1, o2 = no("foo bar Copy"), no("foo bar")
o1.path = Path('deeper/path') o1.path = Path("deeper/path")
o2.path = Path('foo') o2.path = Path("foo")
[group] = s.get_dupe_groups([o1, o2]) [group] = s.get_dupe_groups([o1, o2])
assert group.ref is o2 assert group.ref is o2
def test_tie_breaker_same_name_plus_digit(fake_fileexists): def test_tie_breaker_same_name_plus_digit(fake_fileexists):
# if ref has the same words as dupe, but has some just one extra word which is a digit, it # if ref has the same words as dupe, but has some just one extra word which is a digit, it
# becomes a dupe # becomes a dupe
s = Scanner() s = Scanner()
o1 = no('foo bar 42') o1 = no("foo bar 42")
o2 = no('foo bar [42]') o2 = no("foo bar [42]")
o3 = no('foo bar (42)') o3 = no("foo bar (42)")
o4 = no('foo bar {42}') o4 = no("foo bar {42}")
o5 = no('foo bar') o5 = no("foo bar")
# all numbered names have deeper paths, so they'll end up ref if the digits aren't correctly # all numbered names have deeper paths, so they'll end up ref if the digits aren't correctly
# used as tie breakers # used as tie breakers
o1.path = Path('deeper/path') o1.path = Path("deeper/path")
o2.path = Path('deeper/path') o2.path = Path("deeper/path")
o3.path = Path('deeper/path') o3.path = Path("deeper/path")
o4.path = Path('deeper/path') o4.path = Path("deeper/path")
o5.path = Path('foo') o5.path = Path("foo")
[group] = s.get_dupe_groups([o1, o2, o3, o4, o5]) [group] = s.get_dupe_groups([o1, o2, o3, o4, o5])
assert group.ref is o5 assert group.ref is o5
def test_partial_group_match(fake_fileexists): def test_partial_group_match(fake_fileexists):
# Count the number of discarded matches (when a file doesn't match all other dupes of the # Count the number of discarded matches (when a file doesn't match all other dupes of the
# group) in Scanner.discarded_file_count # group) in Scanner.discarded_file_count
s = Scanner() s = Scanner()
o1, o2, o3 = no('a b'), no('a'), no('b') o1, o2, o3 = no("a b"), no("a"), no("b")
s.min_match_percentage = 50 s.min_match_percentage = 50
[group] = s.get_dupe_groups([o1, o2, o3]) [group] = s.get_dupe_groups([o1, o2, o3])
eq_(len(group), 2) eq_(len(group), 2)
@ -431,6 +483,7 @@ def test_partial_group_match(fake_fileexists):
assert o3 in group assert o3 in group
eq_(s.discarded_file_count, 1) eq_(s.discarded_file_count, 1)
def test_dont_group_files_that_dont_exist(tmpdir): def test_dont_group_files_that_dont_exist(tmpdir):
# when creating groups, check that files exist first. It's possible that these files have # when creating groups, check that files exist first. It's possible that these files have
# been moved during the scan by the user. # been moved during the scan by the user.
@ -439,8 +492,8 @@ def test_dont_group_files_that_dont_exist(tmpdir):
s = Scanner() s = Scanner()
s.scan_type = ScanType.Contents s.scan_type = ScanType.Contents
p = Path(str(tmpdir)) p = Path(str(tmpdir))
p['file1'].open('w').write('foo') p["file1"].open("w").write("foo")
p['file2'].open('w').write('foo') p["file2"].open("w").write("foo")
file1, file2 = fs.get_files(p) file1, file2 = fs.get_files(p)
def getmatches(*args, **kw): def getmatches(*args, **kw):
@ -451,6 +504,7 @@ def test_dont_group_files_that_dont_exist(tmpdir):
assert not s.get_dupe_groups([file1, file2]) assert not s.get_dupe_groups([file1, file2])
def test_folder_scan_exclude_subfolder_matches(fake_fileexists): def test_folder_scan_exclude_subfolder_matches(fake_fileexists):
# when doing a Folders scan type, don't include matches for folders whose parent folder already # when doing a Folders scan type, don't include matches for folders whose parent folder already
# match. # match.
@ -458,31 +512,33 @@ def test_folder_scan_exclude_subfolder_matches(fake_fileexists):
s.scan_type = ScanType.Folders s.scan_type = ScanType.Folders
topf1 = no("top folder 1", size=42) topf1 = no("top folder 1", size=42)
topf1.md5 = topf1.md5partial = b"some_md5_1" topf1.md5 = topf1.md5partial = b"some_md5_1"
topf1.path = Path('/topf1') topf1.path = Path("/topf1")
topf2 = no("top folder 2", size=42) topf2 = no("top folder 2", size=42)
topf2.md5 = topf2.md5partial = b"some_md5_1" topf2.md5 = topf2.md5partial = b"some_md5_1"
topf2.path = Path('/topf2') topf2.path = Path("/topf2")
subf1 = no("sub folder 1", size=41) subf1 = no("sub folder 1", size=41)
subf1.md5 = subf1.md5partial = b"some_md5_2" subf1.md5 = subf1.md5partial = b"some_md5_2"
subf1.path = Path('/topf1/sub') subf1.path = Path("/topf1/sub")
subf2 = no("sub folder 2", size=41) subf2 = no("sub folder 2", size=41)
subf2.md5 = subf2.md5partial = b"some_md5_2" subf2.md5 = subf2.md5partial = b"some_md5_2"
subf2.path = Path('/topf2/sub') subf2.path = Path("/topf2/sub")
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2])), 1) # only top folders eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2])), 1) # only top folders
# however, if another folder matches a subfolder, keep in in the matches # however, if another folder matches a subfolder, keep in in the matches
otherf = no("other folder", size=41) otherf = no("other folder", size=41)
otherf.md5 = otherf.md5partial = b"some_md5_2" otherf.md5 = otherf.md5partial = b"some_md5_2"
otherf.path = Path('/otherfolder') otherf.path = Path("/otherfolder")
eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2, otherf])), 2) eq_(len(s.get_dupe_groups([topf1, topf2, subf1, subf2, otherf])), 2)
def test_ignore_files_with_same_path(fake_fileexists): def test_ignore_files_with_same_path(fake_fileexists):
# It's possible that the scanner is fed with two file instances pointing to the same path. One # It's possible that the scanner is fed with two file instances pointing to the same path. One
# of these files has to be ignored # of these files has to be ignored
s = Scanner() s = Scanner()
f1 = no('foobar', path='path1/foobar') f1 = no("foobar", path="path1/foobar")
f2 = no('foobar', path='path1/foobar') f2 = no("foobar", path="path1/foobar")
eq_(s.get_dupe_groups([f1, f2]), []) eq_(s.get_dupe_groups([f1, f2]), [])
def test_dont_count_ref_files_as_discarded(fake_fileexists): def test_dont_count_ref_files_as_discarded(fake_fileexists):
# To speed up the scan, we don't bother comparing contents of files that are both ref files. # To speed up the scan, we don't bother comparing contents of files that are both ref files.
# However, this causes problems in "discarded" counting and we make sure here that we don't # However, this causes problems in "discarded" counting and we make sure here that we don't
@ -492,20 +548,20 @@ def test_dont_count_ref_files_as_discarded(fake_fileexists):
o1 = no("foo", path="p1") o1 = no("foo", path="p1")
o2 = no("foo", path="p2") o2 = no("foo", path="p2")
o3 = no("foo", path="p3") o3 = no("foo", path="p3")
o1.md5 = o1.md5partial = 'foobar' o1.md5 = o1.md5partial = "foobar"
o2.md5 = o2.md5partial = 'foobar' o2.md5 = o2.md5partial = "foobar"
o3.md5 = o3.md5partial = 'foobar' o3.md5 = o3.md5partial = "foobar"
o1.is_ref = True o1.is_ref = True
o2.is_ref = True o2.is_ref = True
eq_(len(s.get_dupe_groups([o1, o2, o3])), 1) eq_(len(s.get_dupe_groups([o1, o2, o3])), 1)
eq_(s.discarded_file_count, 0) eq_(s.discarded_file_count, 0)
def test_priorize_me(fake_fileexists): def test_priorize_me(fake_fileexists):
# in ScannerME, bitrate goes first (right after is_ref) in priorization # in ScannerME, bitrate goes first (right after is_ref) in priorization
s = ScannerME() s = ScannerME()
o1, o2 = no('foo', path='p1'), no('foo', path='p2') o1, o2 = no("foo", path="p1"), no("foo", path="p2")
o1.bitrate = 1 o1.bitrate = 1
o2.bitrate = 2 o2.bitrate = 2
[group] = s.get_dupe_groups([o1, o2]) [group] = s.get_dupe_groups([o1, o2])
assert group.ref is o2 assert group.ref is o2

View File

@ -8,35 +8,41 @@ import time
from hscommon.util import format_time_decimal from hscommon.util import format_time_decimal
def format_timestamp(t, delta): def format_timestamp(t, delta):
if delta: if delta:
return format_time_decimal(t) return format_time_decimal(t)
else: else:
if t > 0: if t > 0:
return time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(t)) return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(t))
else: else:
return '---' return "---"
def format_words(w): def format_words(w):
def do_format(w): def do_format(w):
if isinstance(w, list): if isinstance(w, list):
return '(%s)' % ', '.join(do_format(item) for item in w) return "(%s)" % ", ".join(do_format(item) for item in w)
else: else:
return w.replace('\n', ' ') return w.replace("\n", " ")
return ", ".join(do_format(item) for item in w)
return ', '.join(do_format(item) for item in w)
def format_perc(p): def format_perc(p):
return "%0.0f" % p return "%0.0f" % p
def format_dupe_count(c): def format_dupe_count(c):
return str(c) if c else '---' return str(c) if c else "---"
def cmp_value(dupe, attrname): def cmp_value(dupe, attrname):
value = getattr(dupe, attrname, '') value = getattr(dupe, attrname, "")
return value.lower() if isinstance(value, str) else value return value.lower() if isinstance(value, str) else value
def fix_surrogate_encoding(s, encoding='utf-8'):
def fix_surrogate_encoding(s, encoding="utf-8"):
# ref #210. It's possible to end up with file paths that, while correct unicode strings, are # ref #210. It's possible to end up with file paths that, while correct unicode strings, are
# decoded with the 'surrogateescape' option, which make the string unencodable to utf-8. We fix # decoded with the 'surrogateescape' option, which make the string unencodable to utf-8. We fix
# these strings here by trying to encode them and, if it fails, we do an encode/decode dance # these strings here by trying to encode them and, if it fails, we do an encode/decode dance
@ -49,8 +55,6 @@ def fix_surrogate_encoding(s, encoding='utf-8'):
try: try:
s.encode(encoding) s.encode(encoding)
except UnicodeEncodeError: except UnicodeEncodeError:
return s.encode(encoding, 'replace').decode(encoding) return s.encode(encoding, "replace").decode(encoding)
else: else:
return s return s

View File

@ -26,7 +26,8 @@ import modulefinder
from setuptools import setup, Extension from setuptools import setup, Extension
from .plat import ISWINDOWS from .plat import ISWINDOWS
from .util import modified_after, find_in_path, ensure_folder, delete_files_with_pattern from .util import ensure_folder, delete_files_with_pattern
def print_and_do(cmd): def print_and_do(cmd):
"""Prints ``cmd`` and executes it in the shell. """Prints ``cmd`` and executes it in the shell.
@ -35,6 +36,7 @@ def print_and_do(cmd):
p = Popen(cmd, shell=True) p = Popen(cmd, shell=True)
return p.wait() return p.wait()
def _perform(src, dst, action, actionname): def _perform(src, dst, action, actionname):
if not op.lexists(src): if not op.lexists(src):
print("Copying %s failed: it doesn't exist." % src) print("Copying %s failed: it doesn't exist." % src)
@ -44,26 +46,32 @@ def _perform(src, dst, action, actionname):
shutil.rmtree(dst) shutil.rmtree(dst)
else: else:
os.remove(dst) os.remove(dst)
print('%s %s --> %s' % (actionname, src, dst)) print("%s %s --> %s" % (actionname, src, dst))
action(src, dst) action(src, dst)
def copy_file_or_folder(src, dst): def copy_file_or_folder(src, dst):
if op.isdir(src): if op.isdir(src):
shutil.copytree(src, dst, symlinks=True) shutil.copytree(src, dst, symlinks=True)
else: else:
shutil.copy(src, dst) shutil.copy(src, dst)
def move(src, dst): def move(src, dst):
_perform(src, dst, os.rename, 'Moving') _perform(src, dst, os.rename, "Moving")
def copy(src, dst): def copy(src, dst):
_perform(src, dst, copy_file_or_folder, 'Copying') _perform(src, dst, copy_file_or_folder, "Copying")
def symlink(src, dst): def symlink(src, dst):
_perform(src, dst, os.symlink, 'Symlinking') _perform(src, dst, os.symlink, "Symlinking")
def hardlink(src, dst): def hardlink(src, dst):
_perform(src, dst, os.link, 'Hardlinking') _perform(src, dst, os.link, "Hardlinking")
def _perform_on_all(pattern, dst, action): def _perform_on_all(pattern, dst, action):
# pattern is a glob pattern, example "folder/foo*". The file is moved directly in dst, no folder # pattern is a glob pattern, example "folder/foo*". The file is moved directly in dst, no folder
@ -73,12 +81,15 @@ def _perform_on_all(pattern, dst, action):
destpath = op.join(dst, op.basename(fn)) destpath = op.join(dst, op.basename(fn))
action(fn, destpath) action(fn, destpath)
def move_all(pattern, dst): def move_all(pattern, dst):
_perform_on_all(pattern, dst, move) _perform_on_all(pattern, dst, move)
def copy_all(pattern, dst): def copy_all(pattern, dst):
_perform_on_all(pattern, dst, copy) _perform_on_all(pattern, dst, copy)
def ensure_empty_folder(path): def ensure_empty_folder(path):
"""Make sure that the path exists and that it's an empty folder. """Make sure that the path exists and that it's an empty folder.
""" """
@ -86,43 +97,54 @@ def ensure_empty_folder(path):
shutil.rmtree(path) shutil.rmtree(path)
os.mkdir(path) os.mkdir(path)
def filereplace(filename, outfilename=None, **kwargs): def filereplace(filename, outfilename=None, **kwargs):
"""Reads `filename`, replaces all {variables} in kwargs, and writes the result to `outfilename`. """Reads `filename`, replaces all {variables} in kwargs, and writes the result to `outfilename`.
""" """
if outfilename is None: if outfilename is None:
outfilename = filename outfilename = filename
fp = open(filename, 'rt', encoding='utf-8') fp = open(filename, "rt", encoding="utf-8")
contents = fp.read() contents = fp.read()
fp.close() fp.close()
# We can't use str.format() because in some files, there might be {} characters that mess with it. # We can't use str.format() because in some files, there might be {} characters that mess with it.
for key, item in kwargs.items(): for key, item in kwargs.items():
contents = contents.replace('{{{}}}'.format(key), item) contents = contents.replace("{{{}}}".format(key), item)
fp = open(outfilename, 'wt', encoding='utf-8') fp = open(outfilename, "wt", encoding="utf-8")
fp.write(contents) fp.write(contents)
fp.close() fp.close()
def get_module_version(modulename): def get_module_version(modulename):
mod = importlib.import_module(modulename) mod = importlib.import_module(modulename)
return mod.__version__ return mod.__version__
def setup_package_argparser(parser): def setup_package_argparser(parser):
parser.add_argument( parser.add_argument(
'--sign', dest='sign_identity', "--sign",
help="Sign app under specified identity before packaging (OS X only)" dest="sign_identity",
help="Sign app under specified identity before packaging (OS X only)",
) )
parser.add_argument( parser.add_argument(
'--nosign', action='store_true', dest='nosign', "--nosign",
help="Don't sign the packaged app (OS X only)" action="store_true",
dest="nosign",
help="Don't sign the packaged app (OS X only)",
) )
parser.add_argument( parser.add_argument(
'--src-pkg', action='store_true', dest='src_pkg', "--src-pkg",
help="Build a tar.gz of the current source." action="store_true",
dest="src_pkg",
help="Build a tar.gz of the current source.",
) )
parser.add_argument( parser.add_argument(
'--arch-pkg', action='store_true', dest='arch_pkg', "--arch-pkg",
help="Force Arch Linux packaging type, regardless of distro name." action="store_true",
dest="arch_pkg",
help="Force Arch Linux packaging type, regardless of distro name.",
) )
# `args` come from an ArgumentParser updated with setup_package_argparser() # `args` come from an ArgumentParser updated with setup_package_argparser()
def package_cocoa_app_in_dmg(app_path, destfolder, args): def package_cocoa_app_in_dmg(app_path, destfolder, args):
# Rather than signing our app in XCode during the build phase, we sign it during the package # Rather than signing our app in XCode during the build phase, we sign it during the package
@ -130,7 +152,9 @@ def package_cocoa_app_in_dmg(app_path, destfolder, args):
# a valid signature. # a valid signature.
if args.sign_identity: if args.sign_identity:
sign_identity = "Developer ID Application: {}".format(args.sign_identity) sign_identity = "Developer ID Application: {}".format(args.sign_identity)
result = print_and_do('codesign --force --deep --sign "{}" "{}"'.format(sign_identity, app_path)) result = print_and_do(
'codesign --force --deep --sign "{}" "{}"'.format(sign_identity, app_path)
)
if result != 0: if result != 0:
print("ERROR: Signing failed. Aborting packaging.") print("ERROR: Signing failed. Aborting packaging.")
return return
@ -139,23 +163,31 @@ def package_cocoa_app_in_dmg(app_path, destfolder, args):
return return
build_dmg(app_path, destfolder) build_dmg(app_path, destfolder)
def build_dmg(app_path, destfolder): def build_dmg(app_path, destfolder):
"""Builds a DMG volume with application at ``app_path`` and puts it in ``dest_path``. """Builds a DMG volume with application at ``app_path`` and puts it in ``dest_path``.
The name of the resulting DMG volume is determined by the app's name and version. The name of the resulting DMG volume is determined by the app's name and version.
""" """
print(repr(op.join(app_path, 'Contents', 'Info.plist'))) print(repr(op.join(app_path, "Contents", "Info.plist")))
plist = plistlib.readPlist(op.join(app_path, 'Contents', 'Info.plist')) plist = plistlib.readPlist(op.join(app_path, "Contents", "Info.plist"))
workpath = tempfile.mkdtemp() workpath = tempfile.mkdtemp()
dmgpath = op.join(workpath, plist['CFBundleName']) dmgpath = op.join(workpath, plist["CFBundleName"])
os.mkdir(dmgpath) os.mkdir(dmgpath)
print_and_do('cp -R "%s" "%s"' % (app_path, dmgpath)) print_and_do('cp -R "%s" "%s"' % (app_path, dmgpath))
print_and_do('ln -s /Applications "%s"' % op.join(dmgpath, 'Applications')) print_and_do('ln -s /Applications "%s"' % op.join(dmgpath, "Applications"))
dmgname = '%s_osx_%s.dmg' % (plist['CFBundleName'].lower().replace(' ', '_'), plist['CFBundleVersion'].replace('.', '_')) dmgname = "%s_osx_%s.dmg" % (
print('Building %s' % dmgname) plist["CFBundleName"].lower().replace(" ", "_"),
plist["CFBundleVersion"].replace(".", "_"),
)
print("Building %s" % dmgname)
# UDBZ = bzip compression. UDZO (zip compression) was used before, but it compresses much less. # UDBZ = bzip compression. UDZO (zip compression) was used before, but it compresses much less.
print_and_do('hdiutil create "%s" -format UDBZ -nocrossdev -srcdir "%s"' % (op.join(destfolder, dmgname), dmgpath)) print_and_do(
print('Build Complete') 'hdiutil create "%s" -format UDBZ -nocrossdev -srcdir "%s"'
% (op.join(destfolder, dmgname), dmgpath)
)
print("Build Complete")
def copy_sysconfig_files_for_embed(destpath): def copy_sysconfig_files_for_embed(destpath):
# This normally shouldn't be needed for Python 3.3+. # This normally shouldn't be needed for Python 3.3+.
@ -163,24 +195,28 @@ def copy_sysconfig_files_for_embed(destpath):
configh = sysconfig.get_config_h_filename() configh = sysconfig.get_config_h_filename()
shutil.copy(makefile, destpath) shutil.copy(makefile, destpath)
shutil.copy(configh, destpath) shutil.copy(configh, destpath)
with open(op.join(destpath, 'site.py'), 'w') as fp: with open(op.join(destpath, "site.py"), "w") as fp:
fp.write(""" fp.write(
"""
import os.path as op import os.path as op
from distutils import sysconfig from distutils import sysconfig
sysconfig.get_makefile_filename = lambda: op.join(op.dirname(__file__), 'Makefile') sysconfig.get_makefile_filename = lambda: op.join(op.dirname(__file__), 'Makefile')
sysconfig.get_config_h_filename = lambda: op.join(op.dirname(__file__), 'pyconfig.h') sysconfig.get_config_h_filename = lambda: op.join(op.dirname(__file__), 'pyconfig.h')
""") """
)
def add_to_pythonpath(path): def add_to_pythonpath(path):
"""Adds ``path`` to both ``PYTHONPATH`` env and ``sys.path``. """Adds ``path`` to both ``PYTHONPATH`` env and ``sys.path``.
""" """
abspath = op.abspath(path) abspath = op.abspath(path)
pythonpath = os.environ.get('PYTHONPATH', '') pythonpath = os.environ.get("PYTHONPATH", "")
pathsep = ';' if ISWINDOWS else ':' pathsep = ";" if ISWINDOWS else ":"
pythonpath = pathsep.join([abspath, pythonpath]) if pythonpath else abspath pythonpath = pathsep.join([abspath, pythonpath]) if pythonpath else abspath
os.environ['PYTHONPATH'] = pythonpath os.environ["PYTHONPATH"] = pythonpath
sys.path.insert(1, abspath) sys.path.insert(1, abspath)
# This is a method to hack around those freakingly tricky data inclusion/exlusion rules # This is a method to hack around those freakingly tricky data inclusion/exlusion rules
# in setuptools. We copy the packages *without data* in a build folder and then build the plugin # in setuptools. We copy the packages *without data* in a build folder and then build the plugin
# from there. # from there.
@ -195,14 +231,16 @@ def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
create_links = False create_links = False
if not extra_ignores: if not extra_ignores:
extra_ignores = [] extra_ignores = []
ignore = shutil.ignore_patterns('.hg*', 'tests', 'testdata', 'modules', 'docs', 'locale', *extra_ignores) ignore = shutil.ignore_patterns(
".hg*", "tests", "testdata", "modules", "docs", "locale", *extra_ignores
)
for package_name in packages_names: for package_name in packages_names:
if op.exists(package_name): if op.exists(package_name):
source_path = package_name source_path = package_name
else: else:
mod = __import__(package_name) mod = __import__(package_name)
source_path = mod.__file__ source_path = mod.__file__
if mod.__file__.endswith('__init__.py'): if mod.__file__.endswith("__init__.py"):
source_path = op.dirname(source_path) source_path = op.dirname(source_path)
dest_name = op.basename(source_path) dest_name = op.basename(source_path)
dest_path = op.join(dest, dest_name) dest_path = op.join(dest, dest_name)
@ -220,58 +258,81 @@ def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
else: else:
shutil.copy(source_path, dest_path) shutil.copy(source_path, dest_path)
def copy_qt_plugins(folder_names, dest): # This is only for Windows
def copy_qt_plugins(folder_names, dest): # This is only for Windows
from PyQt5.QtCore import QLibraryInfo from PyQt5.QtCore import QLibraryInfo
qt_plugin_dir = QLibraryInfo.location(QLibraryInfo.PluginsPath) qt_plugin_dir = QLibraryInfo.location(QLibraryInfo.PluginsPath)
def ignore(path, names): def ignore(path, names):
if path == qt_plugin_dir: if path == qt_plugin_dir:
return [n for n in names if n not in folder_names] return [n for n in names if n not in folder_names]
else: else:
return [n for n in names if not n.endswith('.dll')] return [n for n in names if not n.endswith(".dll")]
shutil.copytree(qt_plugin_dir, dest, ignore=ignore) shutil.copytree(qt_plugin_dir, dest, ignore=ignore)
def build_debian_changelog(changelogpath, destfile, pkgname, from_version=None,
distribution='precise', fix_version=None): def build_debian_changelog(
changelogpath,
destfile,
pkgname,
from_version=None,
distribution="precise",
fix_version=None,
):
"""Builds a debian changelog out of a YAML changelog. """Builds a debian changelog out of a YAML changelog.
Use fix_version to patch the top changelog to that version (if, for example, there was a Use fix_version to patch the top changelog to that version (if, for example, there was a
packaging error and you need to quickly fix it) packaging error and you need to quickly fix it)
""" """
def desc2list(desc): def desc2list(desc):
# We take each item, enumerated with the '*' character, and transform it into a list. # We take each item, enumerated with the '*' character, and transform it into a list.
desc = desc.replace('\n', ' ') desc = desc.replace("\n", " ")
desc = desc.replace(' ', ' ') desc = desc.replace(" ", " ")
result = desc.split('*') result = desc.split("*")
return [s.strip() for s in result if s.strip()] return [s.strip() for s in result if s.strip()]
ENTRY_MODEL = "{pkg} ({version}-1) {distribution}; urgency=low\n\n{changes}\n -- Virgil Dupras <hsoft@hardcoded.net> {date}\n\n" ENTRY_MODEL = (
"{pkg} ({version}-1) {distribution}; urgency=low\n\n{changes}\n "
"-- Virgil Dupras <hsoft@hardcoded.net> {date}\n\n"
)
CHANGE_MODEL = " * {description}\n" CHANGE_MODEL = " * {description}\n"
changelogs = read_changelog_file(changelogpath) changelogs = read_changelog_file(changelogpath)
if from_version: if from_version:
# We only want logs from a particular version # We only want logs from a particular version
for index, log in enumerate(changelogs): for index, log in enumerate(changelogs):
if log['version'] == from_version: if log["version"] == from_version:
changelogs = changelogs[:index+1] changelogs = changelogs[: index + 1]
break break
if fix_version: if fix_version:
changelogs[0]['version'] = fix_version changelogs[0]["version"] = fix_version
rendered_logs = [] rendered_logs = []
for log in changelogs: for log in changelogs:
version = log['version'] version = log["version"]
logdate = log['date'] logdate = log["date"]
desc = log['description'] desc = log["description"]
rendered_date = logdate.strftime('%a, %d %b %Y 00:00:00 +0000') rendered_date = logdate.strftime("%a, %d %b %Y 00:00:00 +0000")
rendered_descs = [CHANGE_MODEL.format(description=d) for d in desc2list(desc)] rendered_descs = [CHANGE_MODEL.format(description=d) for d in desc2list(desc)]
changes = ''.join(rendered_descs) changes = "".join(rendered_descs)
rendered_log = ENTRY_MODEL.format(pkg=pkgname, version=version, changes=changes, rendered_log = ENTRY_MODEL.format(
date=rendered_date, distribution=distribution) pkg=pkgname,
version=version,
changes=changes,
date=rendered_date,
distribution=distribution,
)
rendered_logs.append(rendered_log) rendered_logs.append(rendered_log)
result = ''.join(rendered_logs) result = "".join(rendered_logs)
fp = open(destfile, 'w') fp = open(destfile, "w")
fp.write(result) fp.write(result)
fp.close() fp.close()
re_changelog_header = re.compile(r'=== ([\d.b]*) \(([\d\-]*)\)')
re_changelog_header = re.compile(r"=== ([\d.b]*) \(([\d\-]*)\)")
def read_changelog_file(filename): def read_changelog_file(filename):
def iter_by_three(it): def iter_by_three(it):
while True: while True:
@ -283,25 +344,31 @@ def read_changelog_file(filename):
return return
yield version, date, description yield version, date, description
with open(filename, 'rt', encoding='utf-8') as fp: with open(filename, "rt", encoding="utf-8") as fp:
contents = fp.read() contents = fp.read()
splitted = re_changelog_header.split(contents)[1:] # the first item is empty splitted = re_changelog_header.split(contents)[1:] # the first item is empty
# splitted = [version1, date1, desc1, version2, date2, ...] # splitted = [version1, date1, desc1, version2, date2, ...]
result = [] result = []
for version, date_str, description in iter_by_three(iter(splitted)): for version, date_str, description in iter_by_three(iter(splitted)):
date = datetime.strptime(date_str, '%Y-%m-%d').date() date = datetime.strptime(date_str, "%Y-%m-%d").date()
d = {'date': date, 'date_str': date_str, 'version': version, 'description': description.strip()} d = {
"date": date,
"date_str": date_str,
"version": version,
"description": description.strip(),
}
result.append(d) result.append(d)
return result return result
class OSXAppStructure: class OSXAppStructure:
def __init__(self, dest): def __init__(self, dest):
self.dest = dest self.dest = dest
self.contents = op.join(dest, 'Contents') self.contents = op.join(dest, "Contents")
self.macos = op.join(self.contents, 'MacOS') self.macos = op.join(self.contents, "MacOS")
self.resources = op.join(self.contents, 'Resources') self.resources = op.join(self.contents, "Resources")
self.frameworks = op.join(self.contents, 'Frameworks') self.frameworks = op.join(self.contents, "Frameworks")
self.infoplist = op.join(self.contents, 'Info.plist') self.infoplist = op.join(self.contents, "Info.plist")
def create(self, infoplist): def create(self, infoplist):
ensure_empty_folder(self.dest) ensure_empty_folder(self.dest)
@ -309,11 +376,11 @@ class OSXAppStructure:
os.mkdir(self.resources) os.mkdir(self.resources)
os.mkdir(self.frameworks) os.mkdir(self.frameworks)
copy(infoplist, self.infoplist) copy(infoplist, self.infoplist)
open(op.join(self.contents, 'PkgInfo'), 'wt').write("APPLxxxx") open(op.join(self.contents, "PkgInfo"), "wt").write("APPLxxxx")
def copy_executable(self, executable): def copy_executable(self, executable):
info = plistlib.readPlist(self.infoplist) info = plistlib.readPlist(self.infoplist)
self.executablename = info['CFBundleExecutable'] self.executablename = info["CFBundleExecutable"]
self.executablepath = op.join(self.macos, self.executablename) self.executablepath = op.join(self.macos, self.executablename)
copy(executable, self.executablepath) copy(executable, self.executablepath)
@ -329,8 +396,14 @@ class OSXAppStructure:
copy(path, framework_dest) copy(path, framework_dest)
def create_osx_app_structure(dest, executable, infoplist, resources=None, frameworks=None, def create_osx_app_structure(
symlink_resources=False): dest,
executable,
infoplist,
resources=None,
frameworks=None,
symlink_resources=False,
):
# `dest`: A path to the destination .app folder # `dest`: A path to the destination .app folder
# `executable`: the path of the executable file that goes in "MacOS" # `executable`: the path of the executable file that goes in "MacOS"
# `infoplist`: The path to your Info.plist file. # `infoplist`: The path to your Info.plist file.
@ -343,13 +416,14 @@ def create_osx_app_structure(dest, executable, infoplist, resources=None, framew
app.copy_resources(*resources, use_symlinks=symlink_resources) app.copy_resources(*resources, use_symlinks=symlink_resources)
app.copy_frameworks(*frameworks) app.copy_frameworks(*frameworks)
class OSXFrameworkStructure: class OSXFrameworkStructure:
def __init__(self, dest): def __init__(self, dest):
self.dest = dest self.dest = dest
self.contents = op.join(dest, 'Versions', 'A') self.contents = op.join(dest, "Versions", "A")
self.resources = op.join(self.contents, 'Resources') self.resources = op.join(self.contents, "Resources")
self.headers = op.join(self.contents, 'Headers') self.headers = op.join(self.contents, "Headers")
self.infoplist = op.join(self.resources, 'Info.plist') self.infoplist = op.join(self.resources, "Info.plist")
self._update_executable_path() self._update_executable_path()
def _update_executable_path(self): def _update_executable_path(self):
@ -357,7 +431,7 @@ class OSXFrameworkStructure:
self.executablename = self.executablepath = None self.executablename = self.executablepath = None
return return
info = plistlib.readPlist(self.infoplist) info = plistlib.readPlist(self.infoplist)
self.executablename = info['CFBundleExecutable'] self.executablename = info["CFBundleExecutable"]
self.executablepath = op.join(self.contents, self.executablename) self.executablepath = op.join(self.contents, self.executablename)
def create(self, infoplist): def create(self, infoplist):
@ -371,10 +445,10 @@ class OSXFrameworkStructure:
def create_symlinks(self): def create_symlinks(self):
# Only call this after create() and copy_executable() # Only call this after create() and copy_executable()
rel = lambda path: op.relpath(path, self.dest) rel = lambda path: op.relpath(path, self.dest)
os.symlink('A', op.join(self.dest, 'Versions', 'Current')) os.symlink("A", op.join(self.dest, "Versions", "Current"))
os.symlink(rel(self.executablepath), op.join(self.dest, self.executablename)) os.symlink(rel(self.executablepath), op.join(self.dest, self.executablename))
os.symlink(rel(self.headers), op.join(self.dest, 'Headers')) os.symlink(rel(self.headers), op.join(self.dest, "Headers"))
os.symlink(rel(self.resources), op.join(self.dest, 'Resources')) os.symlink(rel(self.resources), op.join(self.dest, "Resources"))
def copy_executable(self, executable): def copy_executable(self, executable):
copy(executable, self.executablepath) copy(executable, self.executablepath)
@ -393,23 +467,28 @@ class OSXFrameworkStructure:
def copy_embeddable_python_dylib(dst): def copy_embeddable_python_dylib(dst):
runtime = op.join(sysconfig.get_config_var('PYTHONFRAMEWORKPREFIX'), sysconfig.get_config_var('LDLIBRARY')) runtime = op.join(
filedest = op.join(dst, 'Python') sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"),
sysconfig.get_config_var("LDLIBRARY"),
)
filedest = op.join(dst, "Python")
shutil.copy(runtime, filedest) shutil.copy(runtime, filedest)
os.chmod(filedest, 0o774) # We need write permission to use install_name_tool os.chmod(filedest, 0o774) # We need write permission to use install_name_tool
cmd = 'install_name_tool -id @rpath/Python %s' % filedest cmd = "install_name_tool -id @rpath/Python %s" % filedest
print_and_do(cmd) print_and_do(cmd)
def collect_stdlib_dependencies(script, dest_folder, extra_deps=None): def collect_stdlib_dependencies(script, dest_folder, extra_deps=None):
sysprefix = sys.prefix # could be a virtualenv sysprefix = sys.prefix # could be a virtualenv
real_lib_prefix = sysconfig.get_config_var('LIBDEST') real_lib_prefix = sysconfig.get_config_var("LIBDEST")
def is_stdlib_path(path): def is_stdlib_path(path):
# A module path is only a stdlib path if it's in either sys.prefix or # A module path is only a stdlib path if it's in either sys.prefix or
# sysconfig.get_config_var('prefix') (the 2 are different if we are in a virtualenv) and if # sysconfig.get_config_var('prefix') (the 2 are different if we are in a virtualenv) and if
# there's no "site-package in the path. # there's no "site-package in the path.
if not path: if not path:
return False return False
if 'site-package' in path: if "site-package" in path:
return False return False
if not (path.startswith(sysprefix) or path.startswith(real_lib_prefix)): if not (path.startswith(sysprefix) or path.startswith(real_lib_prefix)):
return False return False
@ -425,13 +504,17 @@ def collect_stdlib_dependencies(script, dest_folder, extra_deps=None):
relpath = op.relpath(p, real_lib_prefix) relpath = op.relpath(p, real_lib_prefix)
elif p.startswith(sysprefix): elif p.startswith(sysprefix):
relpath = op.relpath(p, sysprefix) relpath = op.relpath(p, sysprefix)
assert relpath.startswith('lib/python3.') # we want to get rid of that lib/python3.x part assert relpath.startswith(
relpath = relpath[len('lib/python3.X/'):] "lib/python3."
) # we want to get rid of that lib/python3.x part
relpath = relpath[len("lib/python3.X/") :]
else: else:
raise AssertionError() raise AssertionError()
if relpath.startswith('lib-dynload'): # We copy .so files in lib-dynload directly in our dest if relpath.startswith(
relpath = relpath[len('lib-dynload/'):] "lib-dynload"
if relpath.startswith('encodings') or relpath.startswith('distutils'): ): # We copy .so files in lib-dynload directly in our dest
relpath = relpath[len("lib-dynload/") :]
if relpath.startswith("encodings") or relpath.startswith("distutils"):
# We force their inclusion later. # We force their inclusion later.
continue continue
dest_path = op.join(dest_folder, relpath) dest_path = op.join(dest_folder, relpath)
@ -440,34 +523,47 @@ def collect_stdlib_dependencies(script, dest_folder, extra_deps=None):
# stringprep is used by encodings. # stringprep is used by encodings.
# We use real_lib_prefix with distutils because virtualenv messes with it and we need to refer # We use real_lib_prefix with distutils because virtualenv messes with it and we need to refer
# to the original distutils folder. # to the original distutils folder.
FORCED_INCLUSION = ['encodings', 'stringprep', op.join(real_lib_prefix, 'distutils')] FORCED_INCLUSION = [
"encodings",
"stringprep",
op.join(real_lib_prefix, "distutils"),
]
if extra_deps: if extra_deps:
FORCED_INCLUSION += extra_deps FORCED_INCLUSION += extra_deps
copy_packages(FORCED_INCLUSION, dest_folder) copy_packages(FORCED_INCLUSION, dest_folder)
# There's a couple of rather big exe files in the distutils folder that we absolutely don't # There's a couple of rather big exe files in the distutils folder that we absolutely don't
# need. Remove them. # need. Remove them.
delete_files_with_pattern(op.join(dest_folder, 'distutils'), '*.exe') delete_files_with_pattern(op.join(dest_folder, "distutils"), "*.exe")
# And, finally, create an empty "site.py" that Python needs around on startup. # And, finally, create an empty "site.py" that Python needs around on startup.
open(op.join(dest_folder, 'site.py'), 'w').close() open(op.join(dest_folder, "site.py"), "w").close()
def fix_qt_resource_file(path): def fix_qt_resource_file(path):
# pyrcc5 under Windows, if the locale is non-english, can produce a source file with a date # pyrcc5 under Windows, if the locale is non-english, can produce a source file with a date
# containing accented characters. If it does, the encoding is wrong and it prevents the file # containing accented characters. If it does, the encoding is wrong and it prevents the file
# from being correctly frozen by cx_freeze. To work around that, we open the file, strip all # from being correctly frozen by cx_freeze. To work around that, we open the file, strip all
# comments, and save. # comments, and save.
with open(path, 'rb') as fp: with open(path, "rb") as fp:
contents = fp.read() contents = fp.read()
lines = contents.split(b'\n') lines = contents.split(b"\n")
lines = [l for l in lines if not l.startswith(b'#')] lines = [l for l in lines if not l.startswith(b"#")]
with open(path, 'wb') as fp: with open(path, "wb") as fp:
fp.write(b'\n'.join(lines)) fp.write(b"\n".join(lines))
def build_cocoa_ext(extname, dest, source_files, extra_frameworks=(), extra_includes=()):
def build_cocoa_ext(
extname, dest, source_files, extra_frameworks=(), extra_includes=()
):
extra_link_args = ["-framework", "CoreFoundation", "-framework", "Foundation"] extra_link_args = ["-framework", "CoreFoundation", "-framework", "Foundation"]
for extra in extra_frameworks: for extra in extra_frameworks:
extra_link_args += ['-framework', extra] extra_link_args += ["-framework", extra]
ext = Extension(extname, source_files, extra_link_args=extra_link_args, include_dirs=extra_includes) ext = Extension(
setup(script_args=['build_ext', '--inplace'], ext_modules=[ext]) extname,
source_files,
extra_link_args=extra_link_args,
include_dirs=extra_includes,
)
setup(script_args=["build_ext", "--inplace"], ext_modules=[ext])
# Our problem here is to get the fully qualified filename of the resulting .so but I couldn't # Our problem here is to get the fully qualified filename of the resulting .so but I couldn't
# find a documented way to do so. The only thing I could find is this below :( # find a documented way to do so. The only thing I could find is this below :(
fn = ext._file_name fn = ext._file_name

View File

@ -8,26 +8,24 @@ import argparse
from setuptools import setup, Extension from setuptools import setup, Extension
def get_parser(): def get_parser():
parser = argparse.ArgumentParser(description="Build an arbitrary Python extension.") parser = argparse.ArgumentParser(description="Build an arbitrary Python extension.")
parser.add_argument( parser.add_argument(
'source_files', nargs='+', "source_files", nargs="+", help="List of source files to compile"
help="List of source files to compile"
)
parser.add_argument(
'name', nargs=1,
help="Name of the resulting extension"
) )
parser.add_argument("name", nargs=1, help="Name of the resulting extension")
return parser return parser
def main(): def main():
args = get_parser().parse_args() args = get_parser().parse_args()
print("Building {}...".format(args.name[0])) print("Building {}...".format(args.name[0]))
ext = Extension(args.name[0], args.source_files) ext = Extension(args.name[0], args.source_files)
setup( setup(
script_args=['build_ext', '--inplace'], script_args=["build_ext", "--inplace"], ext_modules=[ext],
ext_modules=[ext],
) )
if __name__ == '__main__':
if __name__ == "__main__":
main() main()

View File

@ -2,8 +2,8 @@
# Created On: 2008-01-08 # Created On: 2008-01-08
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
"""When you have to deal with names that have to be unique and can conflict together, you can use """When you have to deal with names that have to be unique and can conflict together, you can use
@ -16,14 +16,15 @@ import shutil
from .path import Path, pathify from .path import Path, pathify
#This matches [123], but not [12] (3 digits being the minimum). # This matches [123], but not [12] (3 digits being the minimum).
#It also matches [1234] [12345] etc.. # It also matches [1234] [12345] etc..
#And only at the start of the string # And only at the start of the string
re_conflict = re.compile(r'^\[\d{3}\d*\] ') re_conflict = re.compile(r"^\[\d{3}\d*\] ")
def get_conflicted_name(other_names, name): def get_conflicted_name(other_names, name):
"""Returns name with a ``[000]`` number in front of it. """Returns name with a ``[000]`` number in front of it.
The number between brackets depends on how many conlicted filenames The number between brackets depends on how many conlicted filenames
there already are in other_names. there already are in other_names.
""" """
@ -32,23 +33,26 @@ def get_conflicted_name(other_names, name):
return name return name
i = 0 i = 0
while True: while True:
newname = '[%03d] %s' % (i, name) newname = "[%03d] %s" % (i, name)
if newname not in other_names: if newname not in other_names:
return newname return newname
i += 1 i += 1
def get_unconflicted_name(name): def get_unconflicted_name(name):
"""Returns ``name`` without ``[]`` brackets. """Returns ``name`` without ``[]`` brackets.
Brackets which, of course, might have been added by func:`get_conflicted_name`. Brackets which, of course, might have been added by func:`get_conflicted_name`.
""" """
return re_conflict.sub('',name,1) return re_conflict.sub("", name, 1)
def is_conflicted(name): def is_conflicted(name):
"""Returns whether ``name`` is prepended with a bracketed number. """Returns whether ``name`` is prepended with a bracketed number.
""" """
return re_conflict.match(name) is not None return re_conflict.match(name) is not None
@pathify @pathify
def _smart_move_or_copy(operation, source_path: Path, dest_path: Path): def _smart_move_or_copy(operation, source_path: Path, dest_path: Path):
"""Use move() or copy() to move and copy file with the conflict management. """Use move() or copy() to move and copy file with the conflict management.
@ -61,19 +65,24 @@ def _smart_move_or_copy(operation, source_path: Path, dest_path: Path):
newname = get_conflicted_name(os.listdir(str(dest_dir_path)), filename) newname = get_conflicted_name(os.listdir(str(dest_dir_path)), filename)
dest_path = dest_dir_path[newname] dest_path = dest_dir_path[newname]
operation(str(source_path), str(dest_path)) operation(str(source_path), str(dest_path))
def smart_move(source_path, dest_path): def smart_move(source_path, dest_path):
"""Same as :func:`smart_copy`, but it moves files instead. """Same as :func:`smart_copy`, but it moves files instead.
""" """
_smart_move_or_copy(shutil.move, source_path, dest_path) _smart_move_or_copy(shutil.move, source_path, dest_path)
def smart_copy(source_path, dest_path): def smart_copy(source_path, dest_path):
"""Copies ``source_path`` to ``dest_path``, recursively and with conflict resolution. """Copies ``source_path`` to ``dest_path``, recursively and with conflict resolution.
""" """
try: try:
_smart_move_or_copy(shutil.copy, source_path, dest_path) _smart_move_or_copy(shutil.copy, source_path, dest_path)
except IOError as e: except IOError as e:
if e.errno in {21, 13}: # it's a directory, code is 21 on OS X / Linux and 13 on Windows if e.errno in {
21,
13,
}: # it's a directory, code is 21 on OS X / Linux and 13 on Windows
_smart_move_or_copy(shutil.copytree, source_path, dest_path) _smart_move_or_copy(shutil.copytree, source_path, dest_path)
else: else:
raise raise

View File

@ -1,14 +1,15 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2011-04-19 # Created On: 2011-04-19
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import sys import sys
import traceback import traceback
# Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/ # Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/
def stacktraces(): def stacktraces():
code = [] code = []
@ -18,5 +19,5 @@ def stacktraces():
code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line: if line:
code.append(" %s" % (line.strip())) code.append(" %s" % (line.strip()))
return "\n".join(code) return "\n".join(code)

View File

@ -9,25 +9,30 @@
import os.path as op import os.path as op
import logging import logging
class SpecialFolder: class SpecialFolder:
AppData = 1 AppData = 1
Cache = 2 Cache = 2
def open_url(url): def open_url(url):
"""Open ``url`` with the default browser. """Open ``url`` with the default browser.
""" """
_open_url(url) _open_url(url)
def open_path(path): def open_path(path):
"""Open ``path`` with its associated application. """Open ``path`` with its associated application.
""" """
_open_path(str(path)) _open_path(str(path))
def reveal_path(path): def reveal_path(path):
"""Open the folder containing ``path`` with the default file browser. """Open the folder containing ``path`` with the default file browser.
""" """
_reveal_path(str(path)) _reveal_path(str(path))
def special_folder_path(special_folder, appname=None): def special_folder_path(special_folder, appname=None):
"""Returns the path of ``special_folder``. """Returns the path of ``special_folder``.
@ -38,12 +43,14 @@ def special_folder_path(special_folder, appname=None):
""" """
return _special_folder_path(special_folder, appname) return _special_folder_path(special_folder, appname)
try: try:
# Normally, we would simply do "from cocoa import proxy", but due to a bug in pytest (currently # Normally, we would simply do "from cocoa import proxy", but due to a bug in pytest (currently
# at v2.4.2), our test suite is broken when we do that. This below is a workaround until that # at v2.4.2), our test suite is broken when we do that. This below is a workaround until that
# bug is fixed. # bug is fixed.
import cocoa import cocoa
if not hasattr(cocoa, 'proxy'):
if not hasattr(cocoa, "proxy"):
raise ImportError() raise ImportError()
proxy = cocoa.proxy proxy = cocoa.proxy
_open_url = proxy.openURL_ _open_url = proxy.openURL_
@ -56,13 +63,15 @@ try:
else: else:
base = proxy.getAppdataPath() base = proxy.getAppdataPath()
if not appname: if not appname:
appname = proxy.bundleInfo_('CFBundleName') appname = proxy.bundleInfo_("CFBundleName")
return op.join(base, appname) return op.join(base, appname)
except ImportError: except ImportError:
try: try:
from PyQt5.QtCore import QUrl, QStandardPaths from PyQt5.QtCore import QUrl, QStandardPaths
from PyQt5.QtGui import QDesktopServices from PyQt5.QtGui import QDesktopServices
def _open_url(url): def _open_url(url):
QDesktopServices.openUrl(QUrl(url)) QDesktopServices.openUrl(QUrl(url))
@ -79,10 +88,12 @@ except ImportError:
else: else:
qtfolder = QStandardPaths.DataLocation qtfolder = QStandardPaths.DataLocation
return QStandardPaths.standardLocations(qtfolder)[0] return QStandardPaths.standardLocations(qtfolder)[0]
except ImportError: except ImportError:
# We're either running tests, and these functions don't matter much or we're in a really # We're either running tests, and these functions don't matter much or we're in a really
# weird situation. Let's just have dummy fallbacks. # weird situation. Let's just have dummy fallbacks.
logging.warning("Can't setup desktop functions!") logging.warning("Can't setup desktop functions!")
def _open_path(path): def _open_path(path):
pass pass
@ -90,4 +101,4 @@ except ImportError:
pass pass
def _special_folder_path(special_folder, appname=None): def _special_folder_path(special_folder, appname=None):
return '/tmp' return "/tmp"

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2011-08-05 # Created On: 2011-08-05
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from sys import maxsize as INF from sys import maxsize as INF
@ -11,73 +11,74 @@ from math import sqrt
VERY_SMALL = 0.0000001 VERY_SMALL = 0.0000001
class Point: class Point:
def __init__(self, x, y): def __init__(self, x, y):
self.x = x self.x = x
self.y = y self.y = y
def __repr__(self): def __repr__(self):
return '<Point {:2.2f}, {:2.2f}>'.format(*self) return "<Point {:2.2f}, {:2.2f}>".format(*self)
def __iter__(self): def __iter__(self):
yield self.x yield self.x
yield self.y yield self.y
def distance_to(self, other): def distance_to(self, other):
return Line(self, other).length() return Line(self, other).length()
class Line: class Line:
def __init__(self, p1, p2): def __init__(self, p1, p2):
self.p1 = p1 self.p1 = p1
self.p2 = p2 self.p2 = p2
def __repr__(self): def __repr__(self):
return '<Line {}, {}>'.format(*self) return "<Line {}, {}>".format(*self)
def __iter__(self): def __iter__(self):
yield self.p1 yield self.p1
yield self.p2 yield self.p2
def dx(self): def dx(self):
return self.p2.x - self.p1.x return self.p2.x - self.p1.x
def dy(self): def dy(self):
return self.p2.y - self.p1.y return self.p2.y - self.p1.y
def length(self): def length(self):
return sqrt(self.dx() ** 2 + self.dy() ** 2) return sqrt(self.dx() ** 2 + self.dy() ** 2)
def slope(self): def slope(self):
if self.dx() == 0: if self.dx() == 0:
return INF if self.dy() > 0 else -INF return INF if self.dy() > 0 else -INF
else: else:
return self.dy() / self.dx() return self.dy() / self.dx()
def intersection_point(self, other): def intersection_point(self, other):
# with help from http://paulbourke.net/geometry/lineline2d/ # with help from http://paulbourke.net/geometry/lineline2d/
if abs(self.slope() - other.slope()) < VERY_SMALL: if abs(self.slope() - other.slope()) < VERY_SMALL:
# parallel. Even if coincident, we return nothing # parallel. Even if coincident, we return nothing
return None return None
A, B = self A, B = self
C, D = other C, D = other
denom = (D.y-C.y) * (B.x-A.x) - (D.x-C.x) * (B.y-A.y) denom = (D.y - C.y) * (B.x - A.x) - (D.x - C.x) * (B.y - A.y)
if denom == 0: if denom == 0:
return None return None
numera = (D.x-C.x) * (A.y-C.y) - (D.y-C.y) * (A.x-C.x) numera = (D.x - C.x) * (A.y - C.y) - (D.y - C.y) * (A.x - C.x)
numerb = (B.x-A.x) * (A.y-C.y) - (B.y-A.y) * (A.x-C.x) numerb = (B.x - A.x) * (A.y - C.y) - (B.y - A.y) * (A.x - C.x)
mua = numera / denom; mua = numera / denom
mub = numerb / denom; mub = numerb / denom
if (0 <= mua <= 1) and (0 <= mub <= 1): if (0 <= mua <= 1) and (0 <= mub <= 1):
x = A.x + mua * (B.x - A.x) x = A.x + mua * (B.x - A.x)
y = A.y + mua * (B.y - A.y) y = A.y + mua * (B.y - A.y)
return Point(x, y) return Point(x, y)
else: else:
return None return None
class Rect: class Rect:
def __init__(self, x, y, w, h): def __init__(self, x, y, w, h):
@ -85,43 +86,43 @@ class Rect:
self.y = y self.y = y
self.w = w self.w = w
self.h = h self.h = h
def __iter__(self): def __iter__(self):
yield self.x yield self.x
yield self.y yield self.y
yield self.w yield self.w
yield self.h yield self.h
def __repr__(self): def __repr__(self):
return '<Rect {:2.2f}, {:2.2f}, {:2.2f}, {:2.2f}>'.format(*self) return "<Rect {:2.2f}, {:2.2f}, {:2.2f}, {:2.2f}>".format(*self)
@classmethod @classmethod
def from_center(cls, center, width, height): def from_center(cls, center, width, height):
x = center.x - width / 2 x = center.x - width / 2
y = center.y - height / 2 y = center.y - height / 2
return cls(x, y, width, height) return cls(x, y, width, height)
@classmethod @classmethod
def from_corners(cls, pt1, pt2): def from_corners(cls, pt1, pt2):
x1, y1 = pt1 x1, y1 = pt1
x2, y2 = pt2 x2, y2 = pt2
return cls(min(x1, x2), min(y1, y2), abs(x1-x2), abs(y1-y2)) return cls(min(x1, x2), min(y1, y2), abs(x1 - x2), abs(y1 - y2))
def center(self): def center(self):
return Point(self.x + self.w/2, self.y + self.h/2) return Point(self.x + self.w / 2, self.y + self.h / 2)
def contains_point(self, point): def contains_point(self, point):
x, y = point x, y = point
(x1, y1), (x2, y2) = self.corners() (x1, y1), (x2, y2) = self.corners()
return (x1 <= x <= x2) and (y1 <= y <= y2) return (x1 <= x <= x2) and (y1 <= y <= y2)
def contains_rect(self, rect): def contains_rect(self, rect):
pt1, pt2 = rect.corners() pt1, pt2 = rect.corners()
return self.contains_point(pt1) and self.contains_point(pt2) return self.contains_point(pt1) and self.contains_point(pt2)
def corners(self): def corners(self):
return Point(self.x, self.y), Point(self.x+self.w, self.y+self.h) return Point(self.x, self.y), Point(self.x + self.w, self.y + self.h)
def intersects(self, other): def intersects(self, other):
r1pt1, r1pt2 = self.corners() r1pt1, r1pt2 = self.corners()
r2pt1, r2pt2 = other.corners() r2pt1, r2pt2 = other.corners()
@ -136,7 +137,7 @@ class Rect:
else: else:
yinter = r2pt2.y >= r1pt1.y yinter = r2pt2.y >= r1pt1.y
return yinter return yinter
def lines(self): def lines(self):
pt1, pt4 = self.corners() pt1, pt4 = self.corners()
pt2 = Point(pt4.x, pt1.y) pt2 = Point(pt4.x, pt1.y)
@ -146,7 +147,7 @@ class Rect:
l3 = Line(pt3, pt4) l3 = Line(pt3, pt4)
l4 = Line(pt1, pt3) l4 = Line(pt1, pt3)
return l1, l2, l3, l4 return l1, l2, l3, l4
def scaled_rect(self, dx, dy): def scaled_rect(self, dx, dy):
"""Returns a rect that has the same borders at self, but grown/shrunk by dx/dy on each side. """Returns a rect that has the same borders at self, but grown/shrunk by dx/dy on each side.
""" """
@ -156,7 +157,7 @@ class Rect:
w += dx * 2 w += dx * 2
h += dy * 2 h += dy * 2
return Rect(x, y, w, h) return Rect(x, y, w, h)
def united(self, other): def united(self, other):
"""Returns the bounding rectangle of this rectangle and `other`. """Returns the bounding rectangle of this rectangle and `other`.
""" """
@ -166,53 +167,52 @@ class Rect:
corner1 = Point(min(ulcorner1.x, ulcorner2.x), min(ulcorner1.y, ulcorner2.y)) corner1 = Point(min(ulcorner1.x, ulcorner2.x), min(ulcorner1.y, ulcorner2.y))
corner2 = Point(max(lrcorner1.x, lrcorner2.x), max(lrcorner1.y, lrcorner2.y)) corner2 = Point(max(lrcorner1.x, lrcorner2.x), max(lrcorner1.y, lrcorner2.y))
return Rect.from_corners(corner1, corner2) return Rect.from_corners(corner1, corner2)
#--- Properties # --- Properties
@property @property
def top(self): def top(self):
return self.y return self.y
@top.setter @top.setter
def top(self, value): def top(self, value):
self.y = value self.y = value
@property @property
def bottom(self): def bottom(self):
return self.y + self.h return self.y + self.h
@bottom.setter @bottom.setter
def bottom(self, value): def bottom(self, value):
self.y = value - self.h self.y = value - self.h
@property @property
def left(self): def left(self):
return self.x return self.x
@left.setter @left.setter
def left(self, value): def left(self, value):
self.x = value self.x = value
@property @property
def right(self): def right(self):
return self.x + self.w return self.x + self.w
@right.setter @right.setter
def right(self, value): def right(self, value):
self.x = value - self.w self.x = value - self.w
@property @property
def width(self): def width(self):
return self.w return self.w
@width.setter @width.setter
def width(self, value): def width(self, value):
self.w = value self.w = value
@property @property
def height(self): def height(self):
return self.h return self.h
@height.setter @height.setter
def height(self, value): def height(self, value):
self.h = value self.h = value

View File

@ -4,13 +4,16 @@
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
def noop(*args, **kwargs): def noop(*args, **kwargs):
pass pass
class NoopGUI: class NoopGUI:
def __getattr__(self, func_name): def __getattr__(self, func_name):
return noop return noop
class GUIObject: class GUIObject:
"""Cross-toolkit "model" representation of a GUI layer object. """Cross-toolkit "model" representation of a GUI layer object.
@ -32,6 +35,7 @@ class GUIObject:
However, sometimes you want to be able to re-bind another view. In this case, set the However, sometimes you want to be able to re-bind another view. In this case, set the
``multibind`` flag to ``True`` and the safeguard will be disabled. ``multibind`` flag to ``True`` and the safeguard will be disabled.
""" """
def __init__(self, multibind=False): def __init__(self, multibind=False):
self._view = None self._view = None
self._multibind = multibind self._multibind = multibind
@ -77,4 +81,3 @@ class GUIObject:
# Instead of None, we put a NoopGUI() there to avoid rogue view callback raising an # Instead of None, we put a NoopGUI() there to avoid rogue view callback raising an
# exception. # exception.
self._view = NoopGUI() self._view = NoopGUI()

View File

@ -1,21 +1,23 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-07-25 # Created On: 2010-07-25
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import copy import copy
from .base import GUIObject from .base import GUIObject
class Column: class Column:
"""Holds column attributes such as its name, width, visibility, etc. """Holds column attributes such as its name, width, visibility, etc.
These attributes are then used to correctly configure the column on the "view" side. These attributes are then used to correctly configure the column on the "view" side.
""" """
def __init__(self, name, display='', visible=True, optional=False):
def __init__(self, name, display="", visible=True, optional=False):
#: "programmatical" (not for display) name. Used as a reference in a couple of place, such #: "programmatical" (not for display) name. Used as a reference in a couple of place, such
#: as :meth:`Columns.column_by_name`. #: as :meth:`Columns.column_by_name`.
self.name = name self.name = name
@ -39,52 +41,57 @@ class Column:
self.default_visible = visible self.default_visible = visible
#: Whether the column can have :attr:`visible` set to false. #: Whether the column can have :attr:`visible` set to false.
self.optional = optional self.optional = optional
class ColumnsView: class ColumnsView:
"""Expected interface for :class:`Columns`'s view. """Expected interface for :class:`Columns`'s view.
*Not actually used in the code. For documentation purposes only.* *Not actually used in the code. For documentation purposes only.*
Our view, the columns controller of a table or outline, is expected to properly respond to Our view, the columns controller of a table or outline, is expected to properly respond to
callbacks. callbacks.
""" """
def restore_columns(self): def restore_columns(self):
"""Update all columns according to the model. """Update all columns according to the model.
When this is called, our view has to update the columns title, order and visibility of all When this is called, our view has to update the columns title, order and visibility of all
columns. columns.
""" """
def set_column_visible(self, colname, visible): def set_column_visible(self, colname, visible):
"""Update visibility of column ``colname``. """Update visibility of column ``colname``.
Called when the user toggles the visibility of a column, we must update the column Called when the user toggles the visibility of a column, we must update the column
``colname``'s visibility status to ``visible``. ``colname``'s visibility status to ``visible``.
""" """
class PrefAccessInterface: class PrefAccessInterface:
"""Expected interface for :class:`Columns`'s prefaccess. """Expected interface for :class:`Columns`'s prefaccess.
*Not actually used in the code. For documentation purposes only.* *Not actually used in the code. For documentation purposes only.*
""" """
def get_default(self, key, fallback_value): def get_default(self, key, fallback_value):
"""Retrieve the value for ``key`` in the currently running app's preference store. """Retrieve the value for ``key`` in the currently running app's preference store.
If the key doesn't exist, return ``fallback_value``. If the key doesn't exist, return ``fallback_value``.
""" """
def set_default(self, key, value): def set_default(self, key, value):
"""Set the value ``value`` for ``key`` in the currently running app's preference store. """Set the value ``value`` for ``key`` in the currently running app's preference store.
""" """
class Columns(GUIObject): class Columns(GUIObject):
"""Cross-toolkit GUI-enabled column set for tables or outlines. """Cross-toolkit GUI-enabled column set for tables or outlines.
Manages a column set's order, visibility and width. We also manage the persistence of these Manages a column set's order, visibility and width. We also manage the persistence of these
attributes so that we can restore them on the next run. attributes so that we can restore them on the next run.
Subclasses :class:`.GUIObject`. Expected view: :class:`ColumnsView`. Subclasses :class:`.GUIObject`. Expected view: :class:`ColumnsView`.
:param table: The table the columns belong to. It's from there that we retrieve our column :param table: The table the columns belong to. It's from there that we retrieve our column
configuration and it must have a ``COLUMNS`` attribute which is a list of configuration and it must have a ``COLUMNS`` attribute which is a list of
:class:`Column`. We also call :meth:`~.GUITable.save_edits` on it from time to :class:`Column`. We also call :meth:`~.GUITable.save_edits` on it from time to
@ -97,6 +104,7 @@ class Columns(GUIObject):
a prefix. Preferences are saved under more than one name, but they will all a prefix. Preferences are saved under more than one name, but they will all
have that same prefix. have that same prefix.
""" """
def __init__(self, table, prefaccess=None, savename=None): def __init__(self, table, prefaccess=None, savename=None):
GUIObject.__init__(self) GUIObject.__init__(self)
self.table = table self.table = table
@ -108,84 +116,88 @@ class Columns(GUIObject):
column.logical_index = i column.logical_index = i
column.ordered_index = i column.ordered_index = i
self.coldata = {col.name: col for col in self.column_list} self.coldata = {col.name: col for col in self.column_list}
#--- Private # --- Private
def _get_colname_attr(self, colname, attrname, default): def _get_colname_attr(self, colname, attrname, default):
try: try:
return getattr(self.coldata[colname], attrname) return getattr(self.coldata[colname], attrname)
except KeyError: except KeyError:
return default return default
def _set_colname_attr(self, colname, attrname, value): def _set_colname_attr(self, colname, attrname, value):
try: try:
col = self.coldata[colname] col = self.coldata[colname]
setattr(col, attrname, value) setattr(col, attrname, value)
except KeyError: except KeyError:
pass pass
def _optional_columns(self): def _optional_columns(self):
return [c for c in self.column_list if c.optional] return [c for c in self.column_list if c.optional]
#--- Override # --- Override
def _view_updated(self): def _view_updated(self):
self.restore_columns() self.restore_columns()
#--- Public # --- Public
def column_by_index(self, index): def column_by_index(self, index):
"""Return the :class:`Column` having the :attr:`~Column.logical_index` ``index``. """Return the :class:`Column` having the :attr:`~Column.logical_index` ``index``.
""" """
return self.column_list[index] return self.column_list[index]
def column_by_name(self, name): def column_by_name(self, name):
"""Return the :class:`Column` having the :attr:`~Column.name` ``name``. """Return the :class:`Column` having the :attr:`~Column.name` ``name``.
""" """
return self.coldata[name] return self.coldata[name]
def columns_count(self): def columns_count(self):
"""Returns the number of columns in our set. """Returns the number of columns in our set.
""" """
return len(self.column_list) return len(self.column_list)
def column_display(self, colname): def column_display(self, colname):
"""Returns display name for column named ``colname``, or ``''`` if there's none. """Returns display name for column named ``colname``, or ``''`` if there's none.
""" """
return self._get_colname_attr(colname, 'display', '') return self._get_colname_attr(colname, "display", "")
def column_is_visible(self, colname): def column_is_visible(self, colname):
"""Returns visibility for column named ``colname``, or ``True`` if there's none. """Returns visibility for column named ``colname``, or ``True`` if there's none.
""" """
return self._get_colname_attr(colname, 'visible', True) return self._get_colname_attr(colname, "visible", True)
def column_width(self, colname): def column_width(self, colname):
"""Returns width for column named ``colname``, or ``0`` if there's none. """Returns width for column named ``colname``, or ``0`` if there's none.
""" """
return self._get_colname_attr(colname, 'width', 0) return self._get_colname_attr(colname, "width", 0)
def columns_to_right(self, colname): def columns_to_right(self, colname):
"""Returns the list of all columns to the right of ``colname``. """Returns the list of all columns to the right of ``colname``.
"right" meaning "having a higher :attr:`Column.ordered_index`" in our left-to-right "right" meaning "having a higher :attr:`Column.ordered_index`" in our left-to-right
civilization. civilization.
""" """
column = self.coldata[colname] column = self.coldata[colname]
index = column.ordered_index index = column.ordered_index
return [col.name for col in self.column_list if (col.visible and col.ordered_index > index)] return [
col.name
for col in self.column_list
if (col.visible and col.ordered_index > index)
]
def menu_items(self): def menu_items(self):
"""Returns a list of items convenient for quick visibility menu generation. """Returns a list of items convenient for quick visibility menu generation.
Returns a list of ``(display_name, is_marked)`` items for each optional column in the Returns a list of ``(display_name, is_marked)`` items for each optional column in the
current view (``is_marked`` means that it's visible). current view (``is_marked`` means that it's visible).
You can use this to generate a menu to let the user toggle the visibility of an optional You can use this to generate a menu to let the user toggle the visibility of an optional
column. That is why we only show optional column, because the visibility of mandatory column. That is why we only show optional column, because the visibility of mandatory
columns can't be toggled. columns can't be toggled.
""" """
return [(c.display, c.visible) for c in self._optional_columns()] return [(c.display, c.visible) for c in self._optional_columns()]
def move_column(self, colname, index): def move_column(self, colname, index):
"""Moves column ``colname`` to ``index``. """Moves column ``colname`` to ``index``.
The column will be placed just in front of the column currently having that index, or to the The column will be placed just in front of the column currently having that index, or to the
end of the list if there's none. end of the list if there's none.
""" """
@ -193,7 +205,7 @@ class Columns(GUIObject):
colnames.remove(colname) colnames.remove(colname)
colnames.insert(index, colname) colnames.insert(index, colname)
self.set_column_order(colnames) self.set_column_order(colnames)
def reset_to_defaults(self): def reset_to_defaults(self):
"""Reset all columns' width and visibility to their default values. """Reset all columns' width and visibility to their default values.
""" """
@ -202,12 +214,12 @@ class Columns(GUIObject):
col.visible = col.default_visible col.visible = col.default_visible
col.width = col.default_width col.width = col.default_width
self.view.restore_columns() self.view.restore_columns()
def resize_column(self, colname, newwidth): def resize_column(self, colname, newwidth):
"""Set column ``colname``'s width to ``newwidth``. """Set column ``colname``'s width to ``newwidth``.
""" """
self._set_colname_attr(colname, 'width', newwidth) self._set_colname_attr(colname, "width", newwidth)
def restore_columns(self): def restore_columns(self):
"""Restore's column persistent attributes from the last :meth:`save_columns`. """Restore's column persistent attributes from the last :meth:`save_columns`.
""" """
@ -218,72 +230,73 @@ class Columns(GUIObject):
self.view.restore_columns() self.view.restore_columns()
return return
for col in self.column_list: for col in self.column_list:
pref_name = '{}.Columns.{}'.format(self.savename, col.name) pref_name = "{}.Columns.{}".format(self.savename, col.name)
coldata = self.prefaccess.get_default(pref_name, fallback_value={}) coldata = self.prefaccess.get_default(pref_name, fallback_value={})
if 'index' in coldata: if "index" in coldata:
col.ordered_index = coldata['index'] col.ordered_index = coldata["index"]
if 'width' in coldata: if "width" in coldata:
col.width = coldata['width'] col.width = coldata["width"]
if col.optional and 'visible' in coldata: if col.optional and "visible" in coldata:
col.visible = coldata['visible'] col.visible = coldata["visible"]
self.view.restore_columns() self.view.restore_columns()
def save_columns(self): def save_columns(self):
"""Save column attributes in persistent storage for restoration in :meth:`restore_columns`. """Save column attributes in persistent storage for restoration in :meth:`restore_columns`.
""" """
if not (self.prefaccess and self.savename and self.coldata): if not (self.prefaccess and self.savename and self.coldata):
return return
for col in self.column_list: for col in self.column_list:
pref_name = '{}.Columns.{}'.format(self.savename, col.name) pref_name = "{}.Columns.{}".format(self.savename, col.name)
coldata = {'index': col.ordered_index, 'width': col.width} coldata = {"index": col.ordered_index, "width": col.width}
if col.optional: if col.optional:
coldata['visible'] = col.visible coldata["visible"] = col.visible
self.prefaccess.set_default(pref_name, coldata) self.prefaccess.set_default(pref_name, coldata)
def set_column_order(self, colnames): def set_column_order(self, colnames):
"""Change the columns order so it matches the order in ``colnames``. """Change the columns order so it matches the order in ``colnames``.
:param colnames: A list of column names in the desired order. :param colnames: A list of column names in the desired order.
""" """
colnames = (name for name in colnames if name in self.coldata) colnames = (name for name in colnames if name in self.coldata)
for i, colname in enumerate(colnames): for i, colname in enumerate(colnames):
col = self.coldata[colname] col = self.coldata[colname]
col.ordered_index = i col.ordered_index = i
def set_column_visible(self, colname, visible): def set_column_visible(self, colname, visible):
"""Set the visibility of column ``colname``. """Set the visibility of column ``colname``.
""" """
self.table.save_edits() # the table on the GUI side will stop editing when the columns change self.table.save_edits() # the table on the GUI side will stop editing when the columns change
self._set_colname_attr(colname, 'visible', visible) self._set_colname_attr(colname, "visible", visible)
self.view.set_column_visible(colname, visible) self.view.set_column_visible(colname, visible)
def set_default_width(self, colname, width): def set_default_width(self, colname, width):
"""Set the default width or column ``colname``. """Set the default width or column ``colname``.
""" """
self._set_colname_attr(colname, 'default_width', width) self._set_colname_attr(colname, "default_width", width)
def toggle_menu_item(self, index): def toggle_menu_item(self, index):
"""Toggles the visibility of an optional column. """Toggles the visibility of an optional column.
You know, that optional column menu you've generated in :meth:`menu_items`? Well, ``index`` You know, that optional column menu you've generated in :meth:`menu_items`? Well, ``index``
is the index of them menu item in *that* menu that the user has clicked on to toggle it. is the index of them menu item in *that* menu that the user has clicked on to toggle it.
Returns whether the column in question ends up being visible or not. Returns whether the column in question ends up being visible or not.
""" """
col = self._optional_columns()[index] col = self._optional_columns()[index]
self.set_column_visible(col.name, not col.visible) self.set_column_visible(col.name, not col.visible)
return col.visible return col.visible
#--- Properties # --- Properties
@property @property
def ordered_columns(self): def ordered_columns(self):
"""List of :class:`Column` in visible order. """List of :class:`Column` in visible order.
""" """
return [col for col in sorted(self.column_list, key=lambda col: col.ordered_index)] return [
col for col in sorted(self.column_list, key=lambda col: col.ordered_index)
]
@property @property
def colnames(self): def colnames(self):
"""List of column names in visible order. """List of column names in visible order.
""" """
return [col.name for col in self.ordered_columns] return [col.name for col in self.ordered_columns]

View File

@ -8,6 +8,7 @@ from ..jobprogress.performer import ThreadedJobPerformer
from .base import GUIObject from .base import GUIObject
from .text_field import TextField from .text_field import TextField
class ProgressWindowView: class ProgressWindowView:
"""Expected interface for :class:`ProgressWindow`'s view. """Expected interface for :class:`ProgressWindow`'s view.
@ -18,6 +19,7 @@ class ProgressWindowView:
It's also expected to call :meth:`ProgressWindow.cancel` when the cancel button is clicked. It's also expected to call :meth:`ProgressWindow.cancel` when the cancel button is clicked.
""" """
def show(self): def show(self):
"""Show the dialog. """Show the dialog.
""" """
@ -36,6 +38,7 @@ class ProgressWindowView:
:param int progress: a value between ``0`` and ``100``. :param int progress: a value between ``0`` and ``100``.
""" """
class ProgressWindow(GUIObject, ThreadedJobPerformer): class ProgressWindow(GUIObject, ThreadedJobPerformer):
"""Cross-toolkit GUI-enabled progress window. """Cross-toolkit GUI-enabled progress window.
@ -58,6 +61,7 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
if you want to. If the function returns ``True``, ``finish_func()`` will be if you want to. If the function returns ``True``, ``finish_func()`` will be
called as if the job terminated normally. called as if the job terminated normally.
""" """
def __init__(self, finish_func, error_func=None): def __init__(self, finish_func, error_func=None):
# finish_func(jobid) is the function that is called when a job is completed. # finish_func(jobid) is the function that is called when a job is completed.
GUIObject.__init__(self) GUIObject.__init__(self)
@ -124,10 +128,9 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
# target is a function with its first argument being a Job. It can then be followed by other # target is a function with its first argument being a Job. It can then be followed by other
# arguments which are passed as `args`. # arguments which are passed as `args`.
self.jobid = jobid self.jobid = jobid
self.progressdesc_textfield.text = '' self.progressdesc_textfield.text = ""
j = self.create_job() j = self.create_job()
args = tuple([j] + list(args)) args = tuple([j] + list(args))
self.run_threaded(target, args) self.run_threaded(target, args)
self.jobdesc_textfield.text = title self.jobdesc_textfield.text = title
self.view.show() self.view.show()

View File

@ -1,92 +1,96 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2011-09-06 # Created On: 2011-09-06
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from collections import Sequence, MutableSequence from collections import Sequence, MutableSequence
from .base import GUIObject from .base import GUIObject
class Selectable(Sequence): class Selectable(Sequence):
"""Mix-in for a ``Sequence`` that manages its selection status. """Mix-in for a ``Sequence`` that manages its selection status.
When mixed in with a ``Sequence``, we enable it to manage its selection status. The selection When mixed in with a ``Sequence``, we enable it to manage its selection status. The selection
is held as a list of ``int`` indexes. Multiple selection is supported. is held as a list of ``int`` indexes. Multiple selection is supported.
""" """
def __init__(self): def __init__(self):
self._selected_indexes = [] self._selected_indexes = []
#--- Private # --- Private
def _check_selection_range(self): def _check_selection_range(self):
if not self: if not self:
self._selected_indexes = [] self._selected_indexes = []
if not self._selected_indexes: if not self._selected_indexes:
return return
self._selected_indexes = [index for index in self._selected_indexes if index < len(self)] self._selected_indexes = [
index for index in self._selected_indexes if index < len(self)
]
if not self._selected_indexes: if not self._selected_indexes:
self._selected_indexes = [len(self) - 1] self._selected_indexes = [len(self) - 1]
#--- Virtual # --- Virtual
def _update_selection(self): def _update_selection(self):
"""(Virtual) Updates the model's selection appropriately. """(Virtual) Updates the model's selection appropriately.
Called after selection has been updated. Takes the table's selection and does appropriates Called after selection has been updated. Takes the table's selection and does appropriates
updates on the view and/or model. Common sense would dictate that when the selection doesn't updates on the view and/or model. Common sense would dictate that when the selection doesn't
change, we don't update anything (and thus don't call ``_update_selection()`` at all), but change, we don't update anything (and thus don't call ``_update_selection()`` at all), but
there are cases where it's false. For example, if our list updates its items but doesn't there are cases where it's false. For example, if our list updates its items but doesn't
change its selection, we probably want to update the model's selection. change its selection, we probably want to update the model's selection.
By default, does nothing. By default, does nothing.
Important note: This is only called on :meth:`select`, not on changes to Important note: This is only called on :meth:`select`, not on changes to
:attr:`selected_indexes`. :attr:`selected_indexes`.
""" """
# A redesign of how this whole thing works is probably in order, but not now, there's too # A redesign of how this whole thing works is probably in order, but not now, there's too
# much breakage at once involved. # much breakage at once involved.
#--- Public # --- Public
def select(self, indexes): def select(self, indexes):
"""Update selection to ``indexes``. """Update selection to ``indexes``.
:meth:`_update_selection` is called afterwards. :meth:`_update_selection` is called afterwards.
:param list indexes: List of ``int`` that is to become the new selection. :param list indexes: List of ``int`` that is to become the new selection.
""" """
if isinstance(indexes, int): if isinstance(indexes, int):
indexes = [indexes] indexes = [indexes]
self.selected_indexes = indexes self.selected_indexes = indexes
self._update_selection() self._update_selection()
#--- Properties # --- Properties
@property @property
def selected_index(self): def selected_index(self):
"""Points to the first selected index. """Points to the first selected index.
*int*. *get/set*. *int*. *get/set*.
Thin wrapper around :attr:`selected_indexes`. ``None`` if selection is empty. Using this Thin wrapper around :attr:`selected_indexes`. ``None`` if selection is empty. Using this
property only makes sense if your selectable sequence supports single selection only. property only makes sense if your selectable sequence supports single selection only.
""" """
return self._selected_indexes[0] if self._selected_indexes else None return self._selected_indexes[0] if self._selected_indexes else None
@selected_index.setter @selected_index.setter
def selected_index(self, value): def selected_index(self, value):
self.selected_indexes = [value] self.selected_indexes = [value]
@property @property
def selected_indexes(self): def selected_indexes(self):
"""List of selected indexes. """List of selected indexes.
*list of int*. *get/set*. *list of int*. *get/set*.
When setting the value, automatically removes out-of-bounds indexes. The list is kept When setting the value, automatically removes out-of-bounds indexes. The list is kept
sorted. sorted.
""" """
return self._selected_indexes return self._selected_indexes
@selected_indexes.setter @selected_indexes.setter
def selected_indexes(self, value): def selected_indexes(self, value):
self._selected_indexes = value self._selected_indexes = value
@ -96,53 +100,54 @@ class Selectable(Sequence):
class SelectableList(MutableSequence, Selectable): class SelectableList(MutableSequence, Selectable):
"""A list that can manage selection of its items. """A list that can manage selection of its items.
Subclasses :class:`Selectable`. Behaves like a ``list``. Subclasses :class:`Selectable`. Behaves like a ``list``.
""" """
def __init__(self, items=None): def __init__(self, items=None):
Selectable.__init__(self) Selectable.__init__(self)
if items: if items:
self._items = list(items) self._items = list(items)
else: else:
self._items = [] self._items = []
def __delitem__(self, key): def __delitem__(self, key):
self._items.__delitem__(key) self._items.__delitem__(key)
self._check_selection_range() self._check_selection_range()
self._on_change() self._on_change()
def __getitem__(self, key): def __getitem__(self, key):
return self._items.__getitem__(key) return self._items.__getitem__(key)
def __len__(self): def __len__(self):
return len(self._items) return len(self._items)
def __setitem__(self, key, value): def __setitem__(self, key, value):
self._items.__setitem__(key, value) self._items.__setitem__(key, value)
self._on_change() self._on_change()
#--- Override # --- Override
def append(self, item): def append(self, item):
self._items.append(item) self._items.append(item)
self._on_change() self._on_change()
def insert(self, index, item): def insert(self, index, item):
self._items.insert(index, item) self._items.insert(index, item)
self._on_change() self._on_change()
def remove(self, row): def remove(self, row):
self._items.remove(row) self._items.remove(row)
self._check_selection_range() self._check_selection_range()
self._on_change() self._on_change()
#--- Virtual # --- Virtual
def _on_change(self): def _on_change(self):
"""(Virtual) Called whenever the contents of the list changes. """(Virtual) Called whenever the contents of the list changes.
By default, does nothing. By default, does nothing.
""" """
#--- Public # --- Public
def search_by_prefix(self, prefix): def search_by_prefix(self, prefix):
# XXX Why the heck is this method here? # XXX Why the heck is this method here?
prefix = prefix.lower() prefix = prefix.lower()
@ -150,59 +155,62 @@ class SelectableList(MutableSequence, Selectable):
if s.lower().startswith(prefix): if s.lower().startswith(prefix):
return index return index
return -1 return -1
class GUISelectableListView: class GUISelectableListView:
"""Expected interface for :class:`GUISelectableList`'s view. """Expected interface for :class:`GUISelectableList`'s view.
*Not actually used in the code. For documentation purposes only.* *Not actually used in the code. For documentation purposes only.*
Our view, some kind of list view or combobox, is expected to sync with the list's contents by Our view, some kind of list view or combobox, is expected to sync with the list's contents by
appropriately behave to all callbacks in this interface. appropriately behave to all callbacks in this interface.
""" """
def refresh(self): def refresh(self):
"""Refreshes the contents of the list widget. """Refreshes the contents of the list widget.
Ensures that the contents of the list widget is synced with the model. Ensures that the contents of the list widget is synced with the model.
""" """
def update_selection(self): def update_selection(self):
"""Update selection status. """Update selection status.
Ensures that the list widget's selection is in sync with the model. Ensures that the list widget's selection is in sync with the model.
""" """
class GUISelectableList(SelectableList, GUIObject): class GUISelectableList(SelectableList, GUIObject):
"""Cross-toolkit GUI-enabled list view. """Cross-toolkit GUI-enabled list view.
Represents a UI element presenting the user with a selectable list of items. Represents a UI element presenting the user with a selectable list of items.
Subclasses :class:`SelectableList` and :class:`.GUIObject`. Expected view: Subclasses :class:`SelectableList` and :class:`.GUIObject`. Expected view:
:class:`GUISelectableListView`. :class:`GUISelectableListView`.
:param iterable items: If specified, items to fill the list with initially. :param iterable items: If specified, items to fill the list with initially.
""" """
def __init__(self, items=None): def __init__(self, items=None):
SelectableList.__init__(self, items) SelectableList.__init__(self, items)
GUIObject.__init__(self) GUIObject.__init__(self)
def _view_updated(self): def _view_updated(self):
"""Refreshes the view contents with :meth:`GUISelectableListView.refresh`. """Refreshes the view contents with :meth:`GUISelectableListView.refresh`.
Overrides :meth:`~hscommon.gui.base.GUIObject._view_updated`. Overrides :meth:`~hscommon.gui.base.GUIObject._view_updated`.
""" """
self.view.refresh() self.view.refresh()
def _update_selection(self): def _update_selection(self):
"""Refreshes the view selection with :meth:`GUISelectableListView.update_selection`. """Refreshes the view selection with :meth:`GUISelectableListView.update_selection`.
Overrides :meth:`Selectable._update_selection`. Overrides :meth:`Selectable._update_selection`.
""" """
self.view.update_selection() self.view.update_selection()
def _on_change(self): def _on_change(self):
"""Refreshes the view contents with :meth:`GUISelectableListView.refresh`. """Refreshes the view contents with :meth:`GUISelectableListView.refresh`.
Overrides :meth:`SelectableList._on_change`. Overrides :meth:`SelectableList._on_change`.
""" """
self.view.refresh() self.view.refresh()

View File

@ -11,6 +11,7 @@ from collections import MutableSequence, namedtuple
from .base import GUIObject from .base import GUIObject
from .selectable_list import Selectable from .selectable_list import Selectable
# We used to directly subclass list, but it caused problems at some point with deepcopy # We used to directly subclass list, but it caused problems at some point with deepcopy
class Table(MutableSequence, Selectable): class Table(MutableSequence, Selectable):
"""Sortable and selectable sequence of :class:`Row`. """Sortable and selectable sequence of :class:`Row`.
@ -24,6 +25,7 @@ class Table(MutableSequence, Selectable):
Subclasses :class:`.Selectable`. Subclasses :class:`.Selectable`.
""" """
def __init__(self): def __init__(self):
Selectable.__init__(self) Selectable.__init__(self)
self._rows = [] self._rows = []
@ -101,7 +103,7 @@ class Table(MutableSequence, Selectable):
if self._footer is not None: if self._footer is not None:
self._rows.append(self._footer) self._rows.append(self._footer)
#--- Properties # --- Properties
@property @property
def footer(self): def footer(self):
"""If set, a row that always stay at the bottom of the table. """If set, a row that always stay at the bottom of the table.
@ -216,6 +218,7 @@ class GUITableView:
Whenever the user changes the selection, we expect the view to call :meth:`Table.select`. Whenever the user changes the selection, we expect the view to call :meth:`Table.select`.
""" """
def refresh(self): def refresh(self):
"""Refreshes the contents of the table widget. """Refreshes the contents of the table widget.
@ -238,7 +241,9 @@ class GUITableView:
""" """
SortDescriptor = namedtuple('SortDescriptor', 'column desc') SortDescriptor = namedtuple("SortDescriptor", "column desc")
class GUITable(Table, GUIObject): class GUITable(Table, GUIObject):
"""Cross-toolkit GUI-enabled table view. """Cross-toolkit GUI-enabled table view.
@ -254,6 +259,7 @@ class GUITable(Table, GUIObject):
Subclasses :class:`Table` and :class:`.GUIObject`. Expected view: Subclasses :class:`Table` and :class:`.GUIObject`. Expected view:
:class:`GUITableView`. :class:`GUITableView`.
""" """
def __init__(self): def __init__(self):
GUIObject.__init__(self) GUIObject.__init__(self)
Table.__init__(self) Table.__init__(self)
@ -261,7 +267,7 @@ class GUITable(Table, GUIObject):
self.edited = None self.edited = None
self._sort_descriptor = None self._sort_descriptor = None
#--- Virtual # --- Virtual
def _do_add(self): def _do_add(self):
"""(Virtual) Creates a new row, adds it in the table. """(Virtual) Creates a new row, adds it in the table.
@ -309,7 +315,7 @@ class GUITable(Table, GUIObject):
else: else:
self.select([len(self) - 1]) self.select([len(self) - 1])
#--- Public # --- Public
def add(self): def add(self):
"""Add a new row in edit mode. """Add a new row in edit mode.
@ -444,6 +450,7 @@ class Row:
Of course, this is only default behavior. This can be overriden. Of course, this is only default behavior. This can be overriden.
""" """
def __init__(self, table): def __init__(self, table):
super(Row, self).__init__() super(Row, self).__init__()
self.table = table self.table = table
@ -454,7 +461,7 @@ class Row:
assert self.table.edited is None assert self.table.edited is None
self.table.edited = self self.table.edited = self
#--- Virtual # --- Virtual
def can_edit(self): def can_edit(self):
"""(Virtual) Whether the whole row can be edited. """(Virtual) Whether the whole row can be edited.
@ -489,11 +496,11 @@ class Row:
there's none, raises ``AttributeError``. there's none, raises ``AttributeError``.
""" """
try: try:
return getattr(self, '_' + column_name) return getattr(self, "_" + column_name)
except AttributeError: except AttributeError:
return getattr(self, column_name) return getattr(self, column_name)
#--- Public # --- Public
def can_edit_cell(self, column_name): def can_edit_cell(self, column_name):
"""Returns whether cell for column ``column_name`` can be edited. """Returns whether cell for column ``column_name`` can be edited.
@ -511,18 +518,18 @@ class Row:
return False return False
# '_' is in case column is a python keyword # '_' is in case column is a python keyword
if not hasattr(self, column_name): if not hasattr(self, column_name):
if hasattr(self, column_name + '_'): if hasattr(self, column_name + "_"):
column_name = column_name + '_' column_name = column_name + "_"
else: else:
return False return False
if hasattr(self, 'can_edit_' + column_name): if hasattr(self, "can_edit_" + column_name):
return getattr(self, 'can_edit_' + column_name) return getattr(self, "can_edit_" + column_name)
# If the row has a settable property, we can edit the cell # If the row has a settable property, we can edit the cell
rowclass = self.__class__ rowclass = self.__class__
prop = getattr(rowclass, column_name, None) prop = getattr(rowclass, column_name, None)
if prop is None: if prop is None:
return False return False
return bool(getattr(prop, 'fset', None)) return bool(getattr(prop, "fset", None))
def get_cell_value(self, attrname): def get_cell_value(self, attrname):
"""Get cell value for ``attrname``. """Get cell value for ``attrname``.
@ -530,8 +537,8 @@ class Row:
By default, does a simple ``getattr()``, but it is used to allow subclasses to have By default, does a simple ``getattr()``, but it is used to allow subclasses to have
alternative value storage mechanisms. alternative value storage mechanisms.
""" """
if attrname == 'from': if attrname == "from":
attrname = 'from_' attrname = "from_"
return getattr(self, attrname) return getattr(self, attrname)
def set_cell_value(self, attrname, value): def set_cell_value(self, attrname, value):
@ -540,7 +547,6 @@ class Row:
By default, does a simple ``setattr()``, but it is used to allow subclasses to have By default, does a simple ``setattr()``, but it is used to allow subclasses to have
alternative value storage mechanisms. alternative value storage mechanisms.
""" """
if attrname == 'from': if attrname == "from":
attrname = 'from_' attrname = "from_"
setattr(self, attrname, value) setattr(self, attrname, value)

View File

@ -1,102 +1,106 @@
# Created On: 2012/01/23 # Created On: 2012/01/23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from .base import GUIObject from .base import GUIObject
from ..util import nonone from ..util import nonone
class TextFieldView: class TextFieldView:
"""Expected interface for :class:`TextField`'s view. """Expected interface for :class:`TextField`'s view.
*Not actually used in the code. For documentation purposes only.* *Not actually used in the code. For documentation purposes only.*
Our view is expected to sync with :attr:`TextField.text` "both ways", that is, update the Our view is expected to sync with :attr:`TextField.text` "both ways", that is, update the
model's text when the user types something, but also update the text field when :meth:`refresh` model's text when the user types something, but also update the text field when :meth:`refresh`
is called. is called.
""" """
def refresh(self): def refresh(self):
"""Refreshes the contents of the input widget. """Refreshes the contents of the input widget.
Ensures that the contents of the input widget is actually :attr:`TextField.text`. Ensures that the contents of the input widget is actually :attr:`TextField.text`.
""" """
class TextField(GUIObject): class TextField(GUIObject):
"""Cross-toolkit text field. """Cross-toolkit text field.
Represents a UI element allowing the user to input a text value. Its main attribute is Represents a UI element allowing the user to input a text value. Its main attribute is
:attr:`text` which acts as the store of the said value. :attr:`text` which acts as the store of the said value.
When our model value isn't a string, we have a built-in parsing/formatting mechanism allowing When our model value isn't a string, we have a built-in parsing/formatting mechanism allowing
us to directly retrieve/set our non-string value through :attr:`value`. us to directly retrieve/set our non-string value through :attr:`value`.
Subclasses :class:`.GUIObject`. Expected view: :class:`TextFieldView`. Subclasses :class:`.GUIObject`. Expected view: :class:`TextFieldView`.
""" """
def __init__(self): def __init__(self):
GUIObject.__init__(self) GUIObject.__init__(self)
self._text = '' self._text = ""
self._value = None self._value = None
#--- Virtual # --- Virtual
def _parse(self, text): def _parse(self, text):
"""(Virtual) Parses ``text`` to put into :attr:`value`. """(Virtual) Parses ``text`` to put into :attr:`value`.
Returns the parsed version of ``text``. Called whenever :attr:`text` changes. Returns the parsed version of ``text``. Called whenever :attr:`text` changes.
""" """
return text return text
def _format(self, value): def _format(self, value):
"""(Virtual) Formats ``value`` to put into :attr:`text`. """(Virtual) Formats ``value`` to put into :attr:`text`.
Returns the formatted version of ``value``. Called whenever :attr:`value` changes. Returns the formatted version of ``value``. Called whenever :attr:`value` changes.
""" """
return value return value
def _update(self, newvalue): def _update(self, newvalue):
"""(Virtual) Called whenever we have a new value. """(Virtual) Called whenever we have a new value.
Whenever our text/value store changes to a new value (different from the old one), this Whenever our text/value store changes to a new value (different from the old one), this
method is called. By default, it does nothing but you can override it if you want. method is called. By default, it does nothing but you can override it if you want.
""" """
#--- Override # --- Override
def _view_updated(self): def _view_updated(self):
self.view.refresh() self.view.refresh()
#--- Public # --- Public
def refresh(self): def refresh(self):
"""Triggers a view :meth:`~TextFieldView.refresh`. """Triggers a view :meth:`~TextFieldView.refresh`.
""" """
self.view.refresh() self.view.refresh()
@property @property
def text(self): def text(self):
"""The text that is currently displayed in the widget. """The text that is currently displayed in the widget.
*str*. *get/set*. *str*. *get/set*.
This property can be set. When it is, :meth:`refresh` is called and the view is synced with This property can be set. When it is, :meth:`refresh` is called and the view is synced with
our value. Always in sync with :attr:`value`. our value. Always in sync with :attr:`value`.
""" """
return self._text return self._text
@text.setter @text.setter
def text(self, newtext): def text(self, newtext):
self.value = self._parse(nonone(newtext, '')) self.value = self._parse(nonone(newtext, ""))
@property @property
def value(self): def value(self):
"""The "parsed" representation of :attr:`text`. """The "parsed" representation of :attr:`text`.
*arbitrary type*. *get/set*. *arbitrary type*. *get/set*.
By default, it's a mirror of :attr:`text`, but a subclass can override :meth:`_parse` and By default, it's a mirror of :attr:`text`, but a subclass can override :meth:`_parse` and
:meth:`_format` to have anything else. Always in sync with :attr:`text`. :meth:`_format` to have anything else. Always in sync with :attr:`text`.
""" """
return self._value return self._value
@value.setter @value.setter
def value(self, newvalue): def value(self, newvalue):
if newvalue == self._value: if newvalue == self._value:
@ -105,4 +109,3 @@ class TextField(GUIObject):
self._text = self._format(newvalue) self._text = self._format(newvalue)
self._update(self._value) self._update(self._value)
self.refresh() self.refresh()

View File

@ -1,16 +1,17 @@
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from collections import MutableSequence from collections import MutableSequence
from .base import GUIObject from .base import GUIObject
class Node(MutableSequence): class Node(MutableSequence):
"""Pretty bland node implementation to be used in a :class:`Tree`. """Pretty bland node implementation to be used in a :class:`Tree`.
It has a :attr:`parent`, behaves like a list, its content being its children. Link integrity It has a :attr:`parent`, behaves like a list, its content being its children. Link integrity
is somewhat enforced (adding a child to a node will set the child's :attr:`parent`, but that's is somewhat enforced (adding a child to a node will set the child's :attr:`parent`, but that's
pretty much as far as we go, integrity-wise. Nodes don't tend to move around much in a GUI pretty much as far as we go, integrity-wise. Nodes don't tend to move around much in a GUI
@ -19,57 +20,58 @@ class Node(MutableSequence):
Nodes are designed to be subclassed and given meaningful attributes (those you'll want to Nodes are designed to be subclassed and given meaningful attributes (those you'll want to
display in your tree view), but they all have a :attr:`name`, which is given on initialization. display in your tree view), but they all have a :attr:`name`, which is given on initialization.
""" """
def __init__(self, name): def __init__(self, name):
self._name = name self._name = name
self._parent = None self._parent = None
self._path = None self._path = None
self._children = [] self._children = []
def __repr__(self): def __repr__(self):
return '<Node %r>' % self.name return "<Node %r>" % self.name
#--- MutableSequence overrides # --- MutableSequence overrides
def __delitem__(self, key): def __delitem__(self, key):
self._children.__delitem__(key) self._children.__delitem__(key)
def __getitem__(self, key): def __getitem__(self, key):
return self._children.__getitem__(key) return self._children.__getitem__(key)
def __len__(self): def __len__(self):
return len(self._children) return len(self._children)
def __setitem__(self, key, value): def __setitem__(self, key, value):
self._children.__setitem__(key, value) self._children.__setitem__(key, value)
def append(self, node): def append(self, node):
self._children.append(node) self._children.append(node)
node._parent = self node._parent = self
node._path = None node._path = None
def insert(self, index, node): def insert(self, index, node):
self._children.insert(index, node) self._children.insert(index, node)
node._parent = self node._parent = self
node._path = None node._path = None
#--- Public # --- Public
def clear(self): def clear(self):
"""Clears the node of all its children. """Clears the node of all its children.
""" """
del self[:] del self[:]
def find(self, predicate, include_self=True): def find(self, predicate, include_self=True):
"""Return the first child to match ``predicate``. """Return the first child to match ``predicate``.
See :meth:`findall`. See :meth:`findall`.
""" """
try: try:
return next(self.findall(predicate, include_self=include_self)) return next(self.findall(predicate, include_self=include_self))
except StopIteration: except StopIteration:
return None return None
def findall(self, predicate, include_self=True): def findall(self, predicate, include_self=True):
"""Yield all children matching ``predicate``. """Yield all children matching ``predicate``.
:param predicate: ``f(node) --> bool`` :param predicate: ``f(node) --> bool``
:param include_self: Whether we can return ``self`` or we return only children. :param include_self: Whether we can return ``self`` or we return only children.
""" """
@ -78,10 +80,10 @@ class Node(MutableSequence):
for child in self: for child in self:
for found in child.findall(predicate, include_self=True): for found in child.findall(predicate, include_self=True):
yield found yield found
def get_node(self, index_path): def get_node(self, index_path):
"""Returns the node at ``index_path``. """Returns the node at ``index_path``.
:param index_path: a list of int indexes leading to our node. See :attr:`path`. :param index_path: a list of int indexes leading to our node. See :attr:`path`.
""" """
result = self result = self
@ -89,40 +91,40 @@ class Node(MutableSequence):
for index in index_path: for index in index_path:
result = result[index] result = result[index]
return result return result
def get_path(self, target_node): def get_path(self, target_node):
"""Returns the :attr:`path` of ``target_node``. """Returns the :attr:`path` of ``target_node``.
If ``target_node`` is ``None``, returns ``None``. If ``target_node`` is ``None``, returns ``None``.
""" """
if target_node is None: if target_node is None:
return None return None
return target_node.path return target_node.path
@property @property
def children_count(self): def children_count(self):
"""Same as ``len(self)``. """Same as ``len(self)``.
""" """
return len(self) return len(self)
@property @property
def name(self): def name(self):
"""Name for the node, supplied on init. """Name for the node, supplied on init.
""" """
return self._name return self._name
@property @property
def parent(self): def parent(self):
"""Parent of the node. """Parent of the node.
If ``None``, we have a root node. If ``None``, we have a root node.
""" """
return self._parent return self._parent
@property @property
def path(self): def path(self):
"""A list of node indexes leading from the root node to ``self``. """A list of node indexes leading from the root node to ``self``.
The path of a node is always related to its :attr:`root`. It's the sequences of index that The path of a node is always related to its :attr:`root`. It's the sequences of index that
we have to take to get to our node, starting from the root. For example, if we have to take to get to our node, starting from the root. For example, if
``node.path == [1, 2, 3, 4]``, it means that ``node.root[1][2][3][4] is node``. ``node.path == [1, 2, 3, 4]``, it means that ``node.root[1][2][3][4] is node``.
@ -133,112 +135,113 @@ class Node(MutableSequence):
else: else:
self._path = self._parent.path + [self._parent.index(self)] self._path = self._parent.path + [self._parent.index(self)]
return self._path return self._path
@property @property
def root(self): def root(self):
"""Root node of current node. """Root node of current node.
To get it, we recursively follow our :attr:`parent` chain until we have ``None``. To get it, we recursively follow our :attr:`parent` chain until we have ``None``.
""" """
if self._parent is None: if self._parent is None:
return self return self
else: else:
return self._parent.root return self._parent.root
class Tree(Node, GUIObject): class Tree(Node, GUIObject):
"""Cross-toolkit GUI-enabled tree view. """Cross-toolkit GUI-enabled tree view.
This class is a bit too thin to be used as a tree view controller out of the box and HS apps This class is a bit too thin to be used as a tree view controller out of the box and HS apps
that subclasses it each add quite a bit of logic to it to make it workable. Making this more that subclasses it each add quite a bit of logic to it to make it workable. Making this more
usable out of the box is a work in progress. usable out of the box is a work in progress.
This class is here (in addition to being a :class:`Node`) mostly to handle selection. This class is here (in addition to being a :class:`Node`) mostly to handle selection.
Subclasses :class:`Node` (it is the root node of all its children) and :class:`.GUIObject`. Subclasses :class:`Node` (it is the root node of all its children) and :class:`.GUIObject`.
""" """
def __init__(self): def __init__(self):
Node.__init__(self, '') Node.__init__(self, "")
GUIObject.__init__(self) GUIObject.__init__(self)
#: Where we store selected nodes (as a list of :class:`Node`) #: Where we store selected nodes (as a list of :class:`Node`)
self._selected_nodes = [] self._selected_nodes = []
#--- Virtual # --- Virtual
def _select_nodes(self, nodes): def _select_nodes(self, nodes):
"""(Virtual) Customize node selection behavior. """(Virtual) Customize node selection behavior.
By default, simply set :attr:`_selected_nodes`. By default, simply set :attr:`_selected_nodes`.
""" """
self._selected_nodes = nodes self._selected_nodes = nodes
#--- Override # --- Override
def _view_updated(self): def _view_updated(self):
self.view.refresh() self.view.refresh()
def clear(self): def clear(self):
self._selected_nodes = [] self._selected_nodes = []
Node.clear(self) Node.clear(self)
#--- Public # --- Public
@property @property
def selected_node(self): def selected_node(self):
"""Currently selected node. """Currently selected node.
*:class:`Node`*. *get/set*. *:class:`Node`*. *get/set*.
First of :attr:`selected_nodes`. ``None`` if empty. First of :attr:`selected_nodes`. ``None`` if empty.
""" """
return self._selected_nodes[0] if self._selected_nodes else None return self._selected_nodes[0] if self._selected_nodes else None
@selected_node.setter @selected_node.setter
def selected_node(self, node): def selected_node(self, node):
if node is not None: if node is not None:
self._select_nodes([node]) self._select_nodes([node])
else: else:
self._select_nodes([]) self._select_nodes([])
@property @property
def selected_nodes(self): def selected_nodes(self):
"""List of selected nodes in the tree. """List of selected nodes in the tree.
*List of :class:`Node`*. *get/set*. *List of :class:`Node`*. *get/set*.
We use nodes instead of indexes to store selection because it's simpler when it's time to We use nodes instead of indexes to store selection because it's simpler when it's time to
manage selection of multiple node levels. manage selection of multiple node levels.
""" """
return self._selected_nodes return self._selected_nodes
@selected_nodes.setter @selected_nodes.setter
def selected_nodes(self, nodes): def selected_nodes(self, nodes):
self._select_nodes(nodes) self._select_nodes(nodes)
@property @property
def selected_path(self): def selected_path(self):
"""Currently selected path. """Currently selected path.
*:attr:`Node.path`*. *get/set*. *:attr:`Node.path`*. *get/set*.
First of :attr:`selected_paths`. ``None`` if empty. First of :attr:`selected_paths`. ``None`` if empty.
""" """
return self.get_path(self.selected_node) return self.get_path(self.selected_node)
@selected_path.setter @selected_path.setter
def selected_path(self, index_path): def selected_path(self, index_path):
if index_path is not None: if index_path is not None:
self.selected_paths = [index_path] self.selected_paths = [index_path]
else: else:
self._select_nodes([]) self._select_nodes([])
@property @property
def selected_paths(self): def selected_paths(self):
"""List of selected paths in the tree. """List of selected paths in the tree.
*List of :attr:`Node.path`*. *get/set* *List of :attr:`Node.path`*. *get/set*
Computed from :attr:`selected_nodes`. Computed from :attr:`selected_nodes`.
""" """
return list(map(self.get_path, self._selected_nodes)) return list(map(self.get_path, self._selected_nodes))
@selected_paths.setter @selected_paths.setter
def selected_paths(self, index_paths): def selected_paths(self, index_paths):
nodes = [] nodes = []
@ -248,4 +251,3 @@ class Tree(Node, GUIObject):
except IndexError: except IndexError:
pass pass
self._select_nodes(nodes) self._select_nodes(nodes)

View File

@ -6,15 +6,19 @@
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
class JobCancelled(Exception): class JobCancelled(Exception):
"The user has cancelled the job" "The user has cancelled the job"
class JobInProgressError(Exception): class JobInProgressError(Exception):
"A job is already being performed, you can't perform more than one at the same time." "A job is already being performed, you can't perform more than one at the same time."
class JobCountError(Exception): class JobCountError(Exception):
"The number of jobs started have exceeded the number of jobs allowed" "The number of jobs started have exceeded the number of jobs allowed"
class Job: class Job:
"""Manages a job's progression and return it's progression through a callback. """Manages a job's progression and return it's progression through a callback.
@ -30,14 +34,15 @@ class Job:
Another one is that nothing stops you from calling add_progress right after Another one is that nothing stops you from calling add_progress right after
SkipJob. SkipJob.
""" """
#---Magic functions
# ---Magic functions
def __init__(self, job_proportions, callback): def __init__(self, job_proportions, callback):
"""Initialize the Job with 'jobcount' jobs. Start every job with """Initialize the Job with 'jobcount' jobs. Start every job with
start_job(). Every time the job progress is updated, 'callback' is called start_job(). Every time the job progress is updated, 'callback' is called
'callback' takes a 'progress' int param, and a optional 'desc' 'callback' takes a 'progress' int param, and a optional 'desc'
parameter. Callback must return false if the job must be cancelled. parameter. Callback must return false if the job must be cancelled.
""" """
if not hasattr(callback, '__call__'): if not hasattr(callback, "__call__"):
raise TypeError("'callback' MUST be set when creating a Job") raise TypeError("'callback' MUST be set when creating a Job")
if isinstance(job_proportions, int): if isinstance(job_proportions, int):
job_proportions = [1] * job_proportions job_proportions = [1] * job_proportions
@ -49,12 +54,12 @@ class Job:
self._progress = 0 self._progress = 0
self._currmax = 1 self._currmax = 1
#---Private # ---Private
def _subjob_callback(self, progress, desc=''): def _subjob_callback(self, progress, desc=""):
"""This is the callback passed to children jobs. """This is the callback passed to children jobs.
""" """
self.set_progress(progress, desc) self.set_progress(progress, desc)
return True #if JobCancelled has to be raised, it will be at the highest level return True # if JobCancelled has to be raised, it will be at the highest level
def _do_update(self, desc): def _do_update(self, desc):
"""Calls the callback function with a % progress as a parameter. """Calls the callback function with a % progress as a parameter.
@ -67,18 +72,18 @@ class Job:
total_progress = self._jobcount * self._currmax total_progress = self._jobcount * self._currmax
progress = ((passed_progress + current_progress) * 100) // total_progress progress = ((passed_progress + current_progress) * 100) // total_progress
else: else:
progress = -1 # indeterminate progress = -1 # indeterminate
# It's possible that callback doesn't support a desc arg # It's possible that callback doesn't support a desc arg
result = self._callback(progress, desc) if desc else self._callback(progress) result = self._callback(progress, desc) if desc else self._callback(progress)
if not result: if not result:
raise JobCancelled() raise JobCancelled()
#---Public # ---Public
def add_progress(self, progress=1, desc=''): def add_progress(self, progress=1, desc=""):
self.set_progress(self._progress + progress, desc) self.set_progress(self._progress + progress, desc)
def check_if_cancelled(self): def check_if_cancelled(self):
self._do_update('') self._do_update("")
def iter_with_progress(self, iterable, desc_format=None, every=1, count=None): def iter_with_progress(self, iterable, desc_format=None, every=1, count=None):
"""Iterate through ``iterable`` while automatically adding progress. """Iterate through ``iterable`` while automatically adding progress.
@ -89,7 +94,7 @@ class Job:
""" """
if count is None: if count is None:
count = len(iterable) count = len(iterable)
desc = '' desc = ""
if desc_format: if desc_format:
desc = desc_format % (0, count) desc = desc_format % (0, count)
self.start_job(count, desc) self.start_job(count, desc)
@ -103,7 +108,7 @@ class Job:
desc = desc_format % (count, count) desc = desc_format % (count, count)
self.set_progress(100, desc) self.set_progress(100, desc)
def start_job(self, max_progress=100, desc=''): def start_job(self, max_progress=100, desc=""):
"""Begin work on the next job. You must not call start_job more than """Begin work on the next job. You must not call start_job more than
'jobcount' (in __init__) times. 'jobcount' (in __init__) times.
'max' is the job units you are to perform. 'max' is the job units you are to perform.
@ -118,7 +123,7 @@ class Job:
self._currmax = max(1, max_progress) self._currmax = max(1, max_progress)
self._do_update(desc) self._do_update(desc)
def start_subjob(self, job_proportions, desc=''): def start_subjob(self, job_proportions, desc=""):
"""Starts a sub job. Use this when you want to split a job into """Starts a sub job. Use this when you want to split a job into
multiple smaller jobs. Pretty handy when starting a process where you multiple smaller jobs. Pretty handy when starting a process where you
know how many subjobs you will have, but don't know the work unit count know how many subjobs you will have, but don't know the work unit count
@ -128,7 +133,7 @@ class Job:
self.start_job(100, desc) self.start_job(100, desc)
return Job(job_proportions, self._subjob_callback) return Job(job_proportions, self._subjob_callback)
def set_progress(self, progress, desc=''): def set_progress(self, progress, desc=""):
"""Sets the progress of the current job to 'progress', and call the """Sets the progress of the current job to 'progress', and call the
callback callback
""" """

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-11-19 # Created On: 2010-11-19
# Copyright 2011 Hardcoded Software (http://www.hardcoded.net) # Copyright 2011 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from threading import Thread from threading import Thread
@ -11,29 +11,31 @@ import sys
from .job import Job, JobInProgressError, JobCancelled from .job import Job, JobInProgressError, JobCancelled
class ThreadedJobPerformer: class ThreadedJobPerformer:
"""Run threaded jobs and track progress. """Run threaded jobs and track progress.
To run a threaded job, first create a job with _create_job(), then call _run_threaded(), with To run a threaded job, first create a job with _create_job(), then call _run_threaded(), with
your work function as a parameter. your work function as a parameter.
Example: Example:
j = self._create_job() j = self._create_job()
self._run_threaded(self.some_work_func, (arg1, arg2, j)) self._run_threaded(self.some_work_func, (arg1, arg2, j))
""" """
_job_running = False _job_running = False
last_error = None last_error = None
#--- Protected # --- Protected
def create_job(self): def create_job(self):
if self._job_running: if self._job_running:
raise JobInProgressError() raise JobInProgressError()
self.last_progress = -1 self.last_progress = -1
self.last_desc = '' self.last_desc = ""
self.job_cancelled = False self.job_cancelled = False
return Job(1, self._update_progress) return Job(1, self._update_progress)
def _async_run(self, *args): def _async_run(self, *args):
target = args[0] target = args[0]
args = tuple(args[1:]) args = tuple(args[1:])
@ -49,24 +51,23 @@ class ThreadedJobPerformer:
finally: finally:
self._job_running = False self._job_running = False
self.last_progress = None self.last_progress = None
def reraise_if_error(self): def reraise_if_error(self):
"""Reraises the error that happened in the thread if any. """Reraises the error that happened in the thread if any.
Call this after the caller of run_threaded detected that self._job_running returned to False Call this after the caller of run_threaded detected that self._job_running returned to False
""" """
if self.last_error is not None: if self.last_error is not None:
raise self.last_error.with_traceback(self.last_traceback) raise self.last_error.with_traceback(self.last_traceback)
def _update_progress(self, newprogress, newdesc=''): def _update_progress(self, newprogress, newdesc=""):
self.last_progress = newprogress self.last_progress = newprogress
if newdesc: if newdesc:
self.last_desc = newdesc self.last_desc = newdesc
return not self.job_cancelled return not self.job_cancelled
def run_threaded(self, target, args=()): def run_threaded(self, target, args=()):
if self._job_running: if self._job_running:
raise JobInProgressError() raise JobInProgressError()
args = (target, ) + args args = (target,) + args
Thread(target=self._async_run, args=args).start() Thread(target=self._async_run, args=args).start()

View File

@ -11,17 +11,18 @@ from PyQt5.QtWidgets import QProgressDialog
from . import performer from . import performer
class Progress(QProgressDialog, performer.ThreadedJobPerformer): class Progress(QProgressDialog, performer.ThreadedJobPerformer):
finished = pyqtSignal(['QString']) finished = pyqtSignal(["QString"])
def __init__(self, parent): def __init__(self, parent):
flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint
QProgressDialog.__init__(self, '', "Cancel", 0, 100, parent, flags) QProgressDialog.__init__(self, "", "Cancel", 0, 100, parent, flags)
self.setModal(True) self.setModal(True)
self.setAutoReset(False) self.setAutoReset(False)
self.setAutoClose(False) self.setAutoClose(False)
self._timer = QTimer() self._timer = QTimer()
self._jobid = '' self._jobid = ""
self._timer.timeout.connect(self.updateProgress) self._timer.timeout.connect(self.updateProgress)
def updateProgress(self): def updateProgress(self):
@ -44,9 +45,8 @@ class Progress(QProgressDialog, performer.ThreadedJobPerformer):
def run(self, jobid, title, target, args=()): def run(self, jobid, title, target, args=()):
self._jobid = jobid self._jobid = jobid
self.reset() self.reset()
self.setLabelText('') self.setLabelText("")
self.run_threaded(target, args) self.run_threaded(target, args)
self.setWindowTitle(title) self.setWindowTitle(title)
self.show() self.show()
self._timer.start(500) self._timer.start(500)

View File

@ -7,26 +7,29 @@ import tempfile
import polib import polib
from . import pygettext from . import pygettext
from .util import modified_after, dedupe, ensure_folder, ensure_file from .util import modified_after, dedupe, ensure_folder
from .build import print_and_do, ensure_empty_folder, copy from .build import print_and_do, ensure_empty_folder
LC_MESSAGES = 'LC_MESSAGES' LC_MESSAGES = "LC_MESSAGES"
# There isn't a 1-on-1 exact fit between .po language codes and cocoa ones # There isn't a 1-on-1 exact fit between .po language codes and cocoa ones
PO2COCOA = { PO2COCOA = {
'pl_PL': 'pl', "pl_PL": "pl",
'pt_BR': 'pt-BR', "pt_BR": "pt-BR",
'zh_CN': 'zh-Hans', "zh_CN": "zh-Hans",
} }
COCOA2PO = {v: k for k, v in PO2COCOA.items()} COCOA2PO = {v: k for k, v in PO2COCOA.items()}
def get_langs(folder): def get_langs(folder):
return [name for name in os.listdir(folder) if op.isdir(op.join(folder, name))] return [name for name in os.listdir(folder) if op.isdir(op.join(folder, name))]
def files_with_ext(folder, ext): def files_with_ext(folder, ext):
return [op.join(folder, fn) for fn in os.listdir(folder) if fn.endswith(ext)] return [op.join(folder, fn) for fn in os.listdir(folder) if fn.endswith(ext)]
def generate_pot(folders, outpath, keywords, merge=False): def generate_pot(folders, outpath, keywords, merge=False):
if merge and not op.exists(outpath): if merge and not op.exists(outpath):
merge = False merge = False
@ -37,21 +40,23 @@ def generate_pot(folders, outpath, keywords, merge=False):
pyfiles = [] pyfiles = []
for folder in folders: for folder in folders:
for root, dirs, filenames in os.walk(folder): for root, dirs, filenames in os.walk(folder):
keep = [fn for fn in filenames if fn.endswith('.py')] keep = [fn for fn in filenames if fn.endswith(".py")]
pyfiles += [op.join(root, fn) for fn in keep] pyfiles += [op.join(root, fn) for fn in keep]
pygettext.main(pyfiles, outpath=genpath, keywords=keywords) pygettext.main(pyfiles, outpath=genpath, keywords=keywords)
if merge: if merge:
merge_po_and_preserve(genpath, outpath) merge_po_and_preserve(genpath, outpath)
os.remove(genpath) os.remove(genpath)
def compile_all_po(base_folder): def compile_all_po(base_folder):
langs = get_langs(base_folder) langs = get_langs(base_folder)
for lang in langs: for lang in langs:
pofolder = op.join(base_folder, lang, LC_MESSAGES) pofolder = op.join(base_folder, lang, LC_MESSAGES)
pofiles = files_with_ext(pofolder, '.po') pofiles = files_with_ext(pofolder, ".po")
for pofile in pofiles: for pofile in pofiles:
p = polib.pofile(pofile) p = polib.pofile(pofile)
p.save_as_mofile(pofile[:-3] + '.mo') p.save_as_mofile(pofile[:-3] + ".mo")
def merge_locale_dir(target, mergeinto): def merge_locale_dir(target, mergeinto):
langs = get_langs(target) langs = get_langs(target)
@ -59,22 +64,24 @@ def merge_locale_dir(target, mergeinto):
if not op.exists(op.join(mergeinto, lang)): if not op.exists(op.join(mergeinto, lang)):
continue continue
mofolder = op.join(target, lang, LC_MESSAGES) mofolder = op.join(target, lang, LC_MESSAGES)
mofiles = files_with_ext(mofolder, '.mo') mofiles = files_with_ext(mofolder, ".mo")
for mofile in mofiles: for mofile in mofiles:
shutil.copy(mofile, op.join(mergeinto, lang, LC_MESSAGES)) shutil.copy(mofile, op.join(mergeinto, lang, LC_MESSAGES))
def merge_pots_into_pos(folder): def merge_pots_into_pos(folder):
# We're going to take all pot files in `folder` and for each lang, merge it with the po file # We're going to take all pot files in `folder` and for each lang, merge it with the po file
# with the same name. # with the same name.
potfiles = files_with_ext(folder, '.pot') potfiles = files_with_ext(folder, ".pot")
for potfile in potfiles: for potfile in potfiles:
refpot = polib.pofile(potfile) refpot = polib.pofile(potfile)
refname = op.splitext(op.basename(potfile))[0] refname = op.splitext(op.basename(potfile))[0]
for lang in get_langs(folder): for lang in get_langs(folder):
po = polib.pofile(op.join(folder, lang, LC_MESSAGES, refname + '.po')) po = polib.pofile(op.join(folder, lang, LC_MESSAGES, refname + ".po"))
po.merge(refpot) po.merge(refpot)
po.save() po.save()
def merge_po_and_preserve(source, dest): def merge_po_and_preserve(source, dest):
# Merges source entries into dest, but keep old entries intact # Merges source entries into dest, but keep old entries intact
sourcepo = polib.pofile(source) sourcepo = polib.pofile(source)
@ -86,36 +93,41 @@ def merge_po_and_preserve(source, dest):
destpo.append(entry) destpo.append(entry)
destpo.save() destpo.save()
def normalize_all_pos(base_folder): def normalize_all_pos(base_folder):
"""Normalize the format of .po files in base_folder. """Normalize the format of .po files in base_folder.
When getting POs from external sources, such as Transifex, we end up with spurious diffs because When getting POs from external sources, such as Transifex, we end up with spurious diffs because
of a difference in the way line wrapping is handled. It wouldn't be a big deal if it happened of a difference in the way line wrapping is handled. It wouldn't be a big deal if it happened
once, but these spurious diffs keep overwriting each other, and it's annoying. once, but these spurious diffs keep overwriting each other, and it's annoying.
Our PO files will keep polib's format. Call this function to ensure that freshly pulled POs Our PO files will keep polib's format. Call this function to ensure that freshly pulled POs
are of the right format before committing them. are of the right format before committing them.
""" """
langs = get_langs(base_folder) langs = get_langs(base_folder)
for lang in langs: for lang in langs:
pofolder = op.join(base_folder, lang, LC_MESSAGES) pofolder = op.join(base_folder, lang, LC_MESSAGES)
pofiles = files_with_ext(pofolder, '.po') pofiles = files_with_ext(pofolder, ".po")
for pofile in pofiles: for pofile in pofiles:
p = polib.pofile(pofile) p = polib.pofile(pofile)
p.save() p.save()
#--- Cocoa
# --- Cocoa
def all_lproj_paths(folder): def all_lproj_paths(folder):
return files_with_ext(folder, '.lproj') return files_with_ext(folder, ".lproj")
def escape_cocoa_strings(s): def escape_cocoa_strings(s):
return s.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n') return s.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
def unescape_cocoa_strings(s): def unescape_cocoa_strings(s):
return s.replace('\\\\', '\\').replace('\\"', '"').replace('\\n', '\n') return s.replace("\\\\", "\\").replace('\\"', '"').replace("\\n", "\n")
def strings2pot(target, dest): def strings2pot(target, dest):
with open(target, 'rt', encoding='utf-8') as fp: with open(target, "rt", encoding="utf-8") as fp:
contents = fp.read() contents = fp.read()
# We're reading an en.lproj file. We only care about the righthand part of the translation. # We're reading an en.lproj file. We only care about the righthand part of the translation.
re_trans = re.compile(r'".*" = "(.*)";') re_trans = re.compile(r'".*" = "(.*)";')
@ -131,17 +143,21 @@ def strings2pot(target, dest):
entry = polib.POEntry(msgid=s) entry = polib.POEntry(msgid=s)
po.append(entry) po.append(entry)
# we don't know or care about a line number so we put 0 # we don't know or care about a line number so we put 0
entry.occurrences.append((target, '0')) entry.occurrences.append((target, "0"))
entry.occurrences = dedupe(entry.occurrences) entry.occurrences = dedupe(entry.occurrences)
po.save(dest) po.save(dest)
def allstrings2pot(lprojpath, dest, excludes=None): def allstrings2pot(lprojpath, dest, excludes=None):
allstrings = files_with_ext(lprojpath, '.strings') allstrings = files_with_ext(lprojpath, ".strings")
if excludes: if excludes:
allstrings = [p for p in allstrings if op.splitext(op.basename(p))[0] not in excludes] allstrings = [
p for p in allstrings if op.splitext(op.basename(p))[0] not in excludes
]
for strings_path in allstrings: for strings_path in allstrings:
strings2pot(strings_path, dest) strings2pot(strings_path, dest)
def po2strings(pofile, en_strings, dest): def po2strings(pofile, en_strings, dest):
# Takes en_strings and replace all righthand parts of "foo" = "bar"; entries with translations # Takes en_strings and replace all righthand parts of "foo" = "bar"; entries with translations
# in pofile, then puts the result in dest. # in pofile, then puts the result in dest.
@ -150,9 +166,10 @@ def po2strings(pofile, en_strings, dest):
return return
ensure_folder(op.dirname(dest)) ensure_folder(op.dirname(dest))
print("Creating {} from {}".format(dest, pofile)) print("Creating {} from {}".format(dest, pofile))
with open(en_strings, 'rt', encoding='utf-8') as fp: with open(en_strings, "rt", encoding="utf-8") as fp:
contents = fp.read() contents = fp.read()
re_trans = re.compile(r'(?<= = ").*(?=";\n)') re_trans = re.compile(r'(?<= = ").*(?=";\n)')
def repl(match): def repl(match):
s = match.group(0) s = match.group(0)
unescaped = unescape_cocoa_strings(s) unescaped = unescape_cocoa_strings(s)
@ -162,10 +179,12 @@ def po2strings(pofile, en_strings, dest):
return s return s
trans = entry.msgstr trans = entry.msgstr
return escape_cocoa_strings(trans) if trans else s return escape_cocoa_strings(trans) if trans else s
contents = re_trans.sub(repl, contents) contents = re_trans.sub(repl, contents)
with open(dest, 'wt', encoding='utf-8') as fp: with open(dest, "wt", encoding="utf-8") as fp:
fp.write(contents) fp.write(contents)
def generate_cocoa_strings_from_code(code_folder, dest_folder): def generate_cocoa_strings_from_code(code_folder, dest_folder):
# Uses the "genstrings" command to generate strings file from all .m files in "code_folder". # Uses the "genstrings" command to generate strings file from all .m files in "code_folder".
# The strings file (their name depends on the localization table used in the source) will be # The strings file (their name depends on the localization table used in the source) will be
@ -173,36 +192,49 @@ def generate_cocoa_strings_from_code(code_folder, dest_folder):
# genstrings produces utf-16 files with comments. After having generated the files, we convert # genstrings produces utf-16 files with comments. After having generated the files, we convert
# them to utf-8 and remove the comments. # them to utf-8 and remove the comments.
ensure_empty_folder(dest_folder) ensure_empty_folder(dest_folder)
print_and_do('genstrings -o "{}" `find "{}" -name *.m | xargs`'.format(dest_folder, code_folder)) print_and_do(
'genstrings -o "{}" `find "{}" -name *.m | xargs`'.format(
dest_folder, code_folder
)
)
for stringsfile in os.listdir(dest_folder): for stringsfile in os.listdir(dest_folder):
stringspath = op.join(dest_folder, stringsfile) stringspath = op.join(dest_folder, stringsfile)
with open(stringspath, 'rt', encoding='utf-16') as fp: with open(stringspath, "rt", encoding="utf-16") as fp:
content = fp.read() content = fp.read()
content = re.sub('/\*.*?\*/', '', content) content = re.sub(r"/\*.*?\*/", "", content)
content = re.sub('\n{2,}', '\n', content) content = re.sub(r"\n{2,}", "\n", content)
# I have no idea why, but genstrings seems to have problems with "%" character in strings # I have no idea why, but genstrings seems to have problems with "%" character in strings
# and inserts (number)$ after it. Find these bogus inserts and remove them. # and inserts (number)$ after it. Find these bogus inserts and remove them.
content = re.sub('%\d\$', '%', content) content = re.sub(r"%\d\$", "%", content)
with open(stringspath, 'wt', encoding='utf-8') as fp: with open(stringspath, "wt", encoding="utf-8") as fp:
fp.write(content) fp.write(content)
def generate_cocoa_strings_from_xib(xib_folder): def generate_cocoa_strings_from_xib(xib_folder):
xibs = [op.join(xib_folder, fn) for fn in os.listdir(xib_folder) if fn.endswith('.xib')] xibs = [
op.join(xib_folder, fn) for fn in os.listdir(xib_folder) if fn.endswith(".xib")
]
for xib in xibs: for xib in xibs:
dest = xib.replace('.xib', '.strings') dest = xib.replace(".xib", ".strings")
print_and_do('ibtool {} --generate-strings-file {}'.format(xib, dest)) print_and_do("ibtool {} --generate-strings-file {}".format(xib, dest))
print_and_do('iconv -f utf-16 -t utf-8 {0} | tee {0}'.format(dest)) print_and_do("iconv -f utf-16 -t utf-8 {0} | tee {0}".format(dest))
def localize_stringsfile(stringsfile, dest_root_folder): def localize_stringsfile(stringsfile, dest_root_folder):
stringsfile_name = op.basename(stringsfile) stringsfile_name = op.basename(stringsfile)
for lang in get_langs('locale'): for lang in get_langs("locale"):
pofile = op.join('locale', lang, 'LC_MESSAGES', 'ui.po') pofile = op.join("locale", lang, "LC_MESSAGES", "ui.po")
cocoa_lang = PO2COCOA.get(lang, lang) cocoa_lang = PO2COCOA.get(lang, lang)
dest_lproj = op.join(dest_root_folder, cocoa_lang + '.lproj') dest_lproj = op.join(dest_root_folder, cocoa_lang + ".lproj")
ensure_folder(dest_lproj) ensure_folder(dest_lproj)
po2strings(pofile, stringsfile, op.join(dest_lproj, stringsfile_name)) po2strings(pofile, stringsfile, op.join(dest_lproj, stringsfile_name))
def localize_all_stringsfiles(src_folder, dest_root_folder): def localize_all_stringsfiles(src_folder, dest_root_folder):
stringsfiles = [op.join(src_folder, fn) for fn in os.listdir(src_folder) if fn.endswith('.strings')] stringsfiles = [
op.join(src_folder, fn)
for fn in os.listdir(src_folder)
if fn.endswith(".strings")
]
for path in stringsfiles: for path in stringsfiles:
localize_stringsfile(path, dest_root_folder) localize_stringsfile(path, dest_root_folder)

View File

@ -1,7 +1,7 @@
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
"""Very simple inter-object notification system. """Very simple inter-object notification system.
@ -14,55 +14,58 @@ the method with the same name as the broadcasted message is called on the listen
from collections import defaultdict from collections import defaultdict
class Broadcaster: class Broadcaster:
"""Broadcasts messages that are received by all listeners. """Broadcasts messages that are received by all listeners.
""" """
def __init__(self): def __init__(self):
self.listeners = set() self.listeners = set()
def add_listener(self, listener): def add_listener(self, listener):
self.listeners.add(listener) self.listeners.add(listener)
def notify(self, msg): def notify(self, msg):
"""Notify all connected listeners of ``msg``. """Notify all connected listeners of ``msg``.
That means that each listeners will have their method with the same name as ``msg`` called. That means that each listeners will have their method with the same name as ``msg`` called.
""" """
for listener in self.listeners.copy(): # listeners can change during iteration for listener in self.listeners.copy(): # listeners can change during iteration
if listener in self.listeners: # disconnected during notification if listener in self.listeners: # disconnected during notification
listener.dispatch(msg) listener.dispatch(msg)
def remove_listener(self, listener): def remove_listener(self, listener):
self.listeners.discard(listener) self.listeners.discard(listener)
class Listener: class Listener:
"""A listener is initialized with the broadcaster it's going to listen to. Initially, it is not connected. """A listener is initialized with the broadcaster it's going to listen to. Initially, it is not connected.
""" """
def __init__(self, broadcaster): def __init__(self, broadcaster):
self.broadcaster = broadcaster self.broadcaster = broadcaster
self._bound_notifications = defaultdict(list) self._bound_notifications = defaultdict(list)
def bind_messages(self, messages, func): def bind_messages(self, messages, func):
"""Binds multiple message to the same function. """Binds multiple message to the same function.
Often, we perform the same thing on multiple messages. Instead of having the same function Often, we perform the same thing on multiple messages. Instead of having the same function
repeated again and agin in our class, we can use this method to bind multiple messages to repeated again and agin in our class, we can use this method to bind multiple messages to
the same function. the same function.
""" """
for message in messages: for message in messages:
self._bound_notifications[message].append(func) self._bound_notifications[message].append(func)
def connect(self): def connect(self):
"""Connects the listener to its broadcaster. """Connects the listener to its broadcaster.
""" """
self.broadcaster.add_listener(self) self.broadcaster.add_listener(self)
def disconnect(self): def disconnect(self):
"""Disconnects the listener from its broadcaster. """Disconnects the listener from its broadcaster.
""" """
self.broadcaster.remove_listener(self) self.broadcaster.remove_listener(self)
def dispatch(self, msg): def dispatch(self, msg):
if msg in self._bound_notifications: if msg in self._bound_notifications:
for func in self._bound_notifications[msg]: for func in self._bound_notifications[msg]:
@ -70,20 +73,19 @@ class Listener:
if hasattr(self, msg): if hasattr(self, msg):
method = getattr(self, msg) method = getattr(self, msg)
method() method()
class Repeater(Broadcaster, Listener): class Repeater(Broadcaster, Listener):
REPEATED_NOTIFICATIONS = None REPEATED_NOTIFICATIONS = None
def __init__(self, broadcaster): def __init__(self, broadcaster):
Broadcaster.__init__(self) Broadcaster.__init__(self)
Listener.__init__(self, broadcaster) Listener.__init__(self, broadcaster)
def _repeat_message(self, msg): def _repeat_message(self, msg):
if not self.REPEATED_NOTIFICATIONS or msg in self.REPEATED_NOTIFICATIONS: if not self.REPEATED_NOTIFICATIONS or msg in self.REPEATED_NOTIFICATIONS:
self.notify(msg) self.notify(msg)
def dispatch(self, msg): def dispatch(self, msg):
Listener.dispatch(self, msg) Listener.dispatch(self, msg)
self._repeat_message(msg) self._repeat_message(msg)

View File

@ -2,8 +2,8 @@
# Created On: 2006/02/21 # Created On: 2006/02/21
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import logging import logging
@ -15,19 +15,21 @@ from itertools import takewhile
from functools import wraps from functools import wraps
from inspect import signature from inspect import signature
class Path(tuple): class Path(tuple):
"""A handy class to work with paths. """A handy class to work with paths.
We subclass ``tuple``, each element of the tuple represents an element of the path. We subclass ``tuple``, each element of the tuple represents an element of the path.
* ``Path('/foo/bar/baz')[1]`` --> ``'bar'`` * ``Path('/foo/bar/baz')[1]`` --> ``'bar'``
* ``Path('/foo/bar/baz')[1:2]`` --> ``Path('bar/baz')`` * ``Path('/foo/bar/baz')[1:2]`` --> ``Path('bar/baz')``
* ``Path('/foo/bar')['baz']`` --> ``Path('/foo/bar/baz')`` * ``Path('/foo/bar')['baz']`` --> ``Path('/foo/bar/baz')``
* ``str(Path('/foo/bar/baz'))`` --> ``'/foo/bar/baz'`` * ``str(Path('/foo/bar/baz'))`` --> ``'/foo/bar/baz'``
""" """
# Saves a little bit of memory usage # Saves a little bit of memory usage
__slots__ = () __slots__ = ()
def __new__(cls, value, separator=None): def __new__(cls, value, separator=None):
def unicode_if_needed(s): def unicode_if_needed(s):
if isinstance(s, str): if isinstance(s, str):
@ -38,7 +40,7 @@ class Path(tuple):
except UnicodeDecodeError: except UnicodeDecodeError:
logging.warning("Could not decode %r", s) logging.warning("Could not decode %r", s)
raise raise
if isinstance(value, Path): if isinstance(value, Path):
return value return value
if not separator: if not separator:
@ -47,44 +49,53 @@ class Path(tuple):
value = unicode_if_needed(value) value = unicode_if_needed(value)
if isinstance(value, str): if isinstance(value, str):
if value: if value:
if (separator not in value) and ('/' in value): if (separator not in value) and ("/" in value):
separator = '/' separator = "/"
value = value.split(separator) value = value.split(separator)
else: else:
value = () value = ()
else: else:
if any(isinstance(x, bytes) for x in value): if any(isinstance(x, bytes) for x in value):
value = [unicode_if_needed(x) for x in value] value = [unicode_if_needed(x) for x in value]
#value is a tuple/list # value is a tuple/list
if any(separator in x for x in value): if any(separator in x for x in value):
#We have a component with a separator in it. Let's rejoin it, and generate another path. # We have a component with a separator in it. Let's rejoin it, and generate another path.
return Path(separator.join(value), separator) return Path(separator.join(value), separator)
if (len(value) > 1) and (not value[-1]): if (len(value) > 1) and (not value[-1]):
value = value[:-1] #We never want a path to end with a '' (because Path() can be called with a trailing slash ending path) value = value[
:-1
] # We never want a path to end with a '' (because Path() can be called with a trailing slash ending path)
return tuple.__new__(cls, value) return tuple.__new__(cls, value)
def __add__(self, other): def __add__(self, other):
other = Path(other) other = Path(other)
if other and (not other[0]): if other and (not other[0]):
other = other[1:] other = other[1:]
return Path(tuple.__add__(self, other)) return Path(tuple.__add__(self, other))
def __contains__(self, item): def __contains__(self, item):
if isinstance(item, Path): if isinstance(item, Path):
return item[:len(self)] == self return item[: len(self)] == self
else: else:
return tuple.__contains__(self, item) return tuple.__contains__(self, item)
def __eq__(self, other): def __eq__(self, other):
return tuple.__eq__(self, Path(other)) return tuple.__eq__(self, Path(other))
def __getitem__(self, key): def __getitem__(self, key):
if isinstance(key, slice): if isinstance(key, slice):
if isinstance(key.start, Path): if isinstance(key.start, Path):
equal_elems = list(takewhile(lambda pair: pair[0] == pair[1], zip(self, key.start))) equal_elems = list(
takewhile(lambda pair: pair[0] == pair[1], zip(self, key.start))
)
key = slice(len(equal_elems), key.stop, key.step) key = slice(len(equal_elems), key.stop, key.step)
if isinstance(key.stop, Path): if isinstance(key.stop, Path):
equal_elems = list(takewhile(lambda pair: pair[0] == pair[1], zip(reversed(self), reversed(key.stop)))) equal_elems = list(
takewhile(
lambda pair: pair[0] == pair[1],
zip(reversed(self), reversed(key.stop)),
)
)
stop = -len(equal_elems) if equal_elems else None stop = -len(equal_elems) if equal_elems else None
key = slice(key.start, stop, key.step) key = slice(key.start, stop, key.step)
return Path(tuple.__getitem__(self, key)) return Path(tuple.__getitem__(self, key))
@ -92,31 +103,31 @@ class Path(tuple):
return self + key return self + key
else: else:
return tuple.__getitem__(self, key) return tuple.__getitem__(self, key)
def __hash__(self): def __hash__(self):
return tuple.__hash__(self) return tuple.__hash__(self)
def __ne__(self, other): def __ne__(self, other):
return not self.__eq__(other) return not self.__eq__(other)
def __radd__(self, other): def __radd__(self, other):
return Path(other) + self return Path(other) + self
def __str__(self): def __str__(self):
if len(self) == 1: if len(self) == 1:
first = self[0] first = self[0]
if (len(first) == 2) and (first[1] == ':'): #Windows drive letter if (len(first) == 2) and (first[1] == ":"): # Windows drive letter
return first + '\\' return first + "\\"
elif not len(first): #root directory elif not len(first): # root directory
return '/' return "/"
return os.sep.join(self) return os.sep.join(self)
def has_drive_letter(self): def has_drive_letter(self):
if not self: if not self:
return False return False
first = self[0] first = self[0]
return (len(first) == 2) and (first[1] == ':') return (len(first) == 2) and (first[1] == ":")
def is_parent_of(self, other): def is_parent_of(self, other):
"""Whether ``other`` is a subpath of ``self``. """Whether ``other`` is a subpath of ``self``.
@ -133,29 +144,29 @@ class Path(tuple):
return self[1:] return self[1:]
else: else:
return self return self
def tobytes(self): def tobytes(self):
return str(self).encode(sys.getfilesystemencoding()) return str(self).encode(sys.getfilesystemencoding())
def parent(self): def parent(self):
"""Returns the parent path. """Returns the parent path.
``Path('/foo/bar/baz').parent()`` --> ``Path('/foo/bar')`` ``Path('/foo/bar/baz').parent()`` --> ``Path('/foo/bar')``
""" """
return self[:-1] return self[:-1]
@property @property
def name(self): def name(self):
"""Last element of the path (filename), with extension. """Last element of the path (filename), with extension.
``Path('/foo/bar/baz').name`` --> ``'baz'`` ``Path('/foo/bar/baz').name`` --> ``'baz'``
""" """
return self[-1] return self[-1]
# OS method wrappers # OS method wrappers
def exists(self): def exists(self):
return op.exists(str(self)) return op.exists(str(self))
def copy(self, dest_path): def copy(self, dest_path):
return shutil.copy(str(self), str(dest_path)) return shutil.copy(str(self), str(dest_path))
@ -200,36 +211,44 @@ class Path(tuple):
def stat(self): def stat(self):
return os.stat(str(self)) return os.stat(str(self))
def pathify(f): def pathify(f):
"""Ensure that every annotated :class:`Path` arguments are actually paths. """Ensure that every annotated :class:`Path` arguments are actually paths.
When a function is decorated with ``@pathify``, every argument with annotated as Path will be When a function is decorated with ``@pathify``, every argument with annotated as Path will be
converted to a Path if it wasn't already. Example:: converted to a Path if it wasn't already. Example::
@pathify @pathify
def foo(path: Path, otherarg): def foo(path: Path, otherarg):
return path.listdir() return path.listdir()
Calling ``foo('/bar', 0)`` will convert ``'/bar'`` to ``Path('/bar')``. Calling ``foo('/bar', 0)`` will convert ``'/bar'`` to ``Path('/bar')``.
""" """
sig = signature(f) sig = signature(f)
pindexes = {i for i, p in enumerate(sig.parameters.values()) if p.annotation is Path} pindexes = {
i for i, p in enumerate(sig.parameters.values()) if p.annotation is Path
}
pkeys = {k: v for k, v in sig.parameters.items() if v.annotation is Path} pkeys = {k: v for k, v in sig.parameters.items() if v.annotation is Path}
def path_or_none(p): def path_or_none(p):
return None if p is None else Path(p) return None if p is None else Path(p)
@wraps(f) @wraps(f)
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
args = tuple((path_or_none(a) if i in pindexes else a) for i, a in enumerate(args)) args = tuple(
(path_or_none(a) if i in pindexes else a) for i, a in enumerate(args)
)
kwargs = {k: (path_or_none(v) if k in pkeys else v) for k, v in kwargs.items()} kwargs = {k: (path_or_none(v) if k in pkeys else v) for k, v in kwargs.items()}
return f(*args, **kwargs) return f(*args, **kwargs)
return wrapped return wrapped
def log_io_error(func): def log_io_error(func):
""" Catches OSError, IOError and WindowsError and log them """ Catches OSError, IOError and WindowsError and log them
""" """
@wraps(func) @wraps(func)
def wrapper(path, *args, **kwargs): def wrapper(path, *args, **kwargs):
try: try:
@ -239,5 +258,5 @@ def log_io_error(func):
classname = e.__class__.__name__ classname = e.__class__.__name__
funcname = func.__name__ funcname = func.__name__
logging.warn(msg.format(classname, funcname, str(path), str(e))) logging.warn(msg.format(classname, funcname, str(path), str(e)))
return wrapper return wrapper

View File

@ -1,8 +1,8 @@
# Created On: 2011/09/22 # Created On: 2011/09/22
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
# Yes, I know, there's the 'platform' unit for this kind of stuff, but the thing is that I got a # Yes, I know, there's the 'platform' unit for this kind of stuff, but the thing is that I got a
@ -11,6 +11,6 @@
import sys import sys
ISWINDOWS = sys.platform == 'win32' ISWINDOWS = sys.platform == "win32"
ISOSX = sys.platform == 'darwin' ISOSX = sys.platform == "darwin"
ISLINUX = sys.platform.startswith('linux') ISLINUX = sys.platform.startswith("linux")

View File

@ -18,20 +18,17 @@ import os
import imp import imp
import sys import sys
import glob import glob
import time
import token import token
import tokenize import tokenize
import operator
__version__ = '1.5' __version__ = "1.5"
default_keywords = ['_'] default_keywords = ["_"]
DEFAULTKEYWORDS = ', '.join(default_keywords) DEFAULTKEYWORDS = ", ".join(default_keywords)
EMPTYSTRING = '' EMPTYSTRING = ""
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's # The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
# there. # there.
pot_header = """ pot_header = """
@ -41,17 +38,17 @@ msgstr ""
"Content-Transfer-Encoding: utf-8\\n" "Content-Transfer-Encoding: utf-8\\n"
""" """
def usage(code, msg=''): def usage(code, msg=""):
print(__doc__ % globals(), file=sys.stderr) print(__doc__ % globals(), file=sys.stderr)
if msg: if msg:
print(msg, file=sys.stderr) print(msg, file=sys.stderr)
sys.exit(code) sys.exit(code)
escapes = [] escapes = []
def make_escapes(pass_iso8859): def make_escapes(pass_iso8859):
global escapes global escapes
if pass_iso8859: if pass_iso8859:
@ -66,11 +63,11 @@ def make_escapes(pass_iso8859):
escapes.append(chr(i)) escapes.append(chr(i))
else: else:
escapes.append("\\%03o" % i) escapes.append("\\%03o" % i)
escapes[ord('\\')] = '\\\\' escapes[ord("\\")] = "\\\\"
escapes[ord('\t')] = '\\t' escapes[ord("\t")] = "\\t"
escapes[ord('\r')] = '\\r' escapes[ord("\r")] = "\\r"
escapes[ord('\n')] = '\\n' escapes[ord("\n")] = "\\n"
escapes[ord('\"')] = '\\"' escapes[ord('"')] = '\\"'
def escape(s): def escape(s):
@ -83,26 +80,26 @@ def escape(s):
def safe_eval(s): def safe_eval(s):
# unwrap quotes, safely # unwrap quotes, safely
return eval(s, {'__builtins__':{}}, {}) return eval(s, {"__builtins__": {}}, {})
def normalize(s): def normalize(s):
# This converts the various Python string types into a format that is # This converts the various Python string types into a format that is
# appropriate for .po files, namely much closer to C style. # appropriate for .po files, namely much closer to C style.
lines = s.split('\n') lines = s.split("\n")
if len(lines) == 1: if len(lines) == 1:
s = '"' + escape(s) + '"' s = '"' + escape(s) + '"'
else: else:
if not lines[-1]: if not lines[-1]:
del lines[-1] del lines[-1]
lines[-1] = lines[-1] + '\n' lines[-1] = lines[-1] + "\n"
for i in range(len(lines)): for i in range(len(lines)):
lines[i] = escape(lines[i]) lines[i] = escape(lines[i])
lineterm = '\\n"\n"' lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"' s = '""\n"' + lineterm.join(lines) + '"'
return s return s
def containsAny(str, set): def containsAny(str, set):
"""Check whether 'str' contains ANY of the chars in 'set'""" """Check whether 'str' contains ANY of the chars in 'set'"""
return 1 in [c in str for c in set] return 1 in [c in str for c in set]
@ -111,20 +108,24 @@ def containsAny(str, set):
def _visit_pyfiles(list, dirname, names): def _visit_pyfiles(list, dirname, names):
"""Helper for getFilesForName().""" """Helper for getFilesForName()."""
# get extension for python source files # get extension for python source files
if '_py_ext' not in globals(): if "_py_ext" not in globals():
global _py_ext global _py_ext
_py_ext = [triple[0] for triple in imp.get_suffixes() _py_ext = [
if triple[2] == imp.PY_SOURCE][0] triple[0] for triple in imp.get_suffixes() if triple[2] == imp.PY_SOURCE
][0]
# don't recurse into CVS directories # don't recurse into CVS directories
if 'CVS' in names: if "CVS" in names:
names.remove('CVS') names.remove("CVS")
# add all *.py files to list # add all *.py files to list
list.extend( list.extend(
[os.path.join(dirname, file) for file in names [
if os.path.splitext(file)[1] == _py_ext] os.path.join(dirname, file)
) for file in names
if os.path.splitext(file)[1] == _py_ext
]
)
def _get_modpkg_path(dotted_name, pathlist=None): def _get_modpkg_path(dotted_name, pathlist=None):
@ -135,13 +136,14 @@ def _get_modpkg_path(dotted_name, pathlist=None):
extension module. extension module.
""" """
# split off top-most name # split off top-most name
parts = dotted_name.split('.', 1) parts = dotted_name.split(".", 1)
if len(parts) > 1: if len(parts) > 1:
# we have a dotted path, import top-level package # we have a dotted path, import top-level package
try: try:
file, pathname, description = imp.find_module(parts[0], pathlist) file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close() if file:
file.close()
except ImportError: except ImportError:
return None return None
@ -154,8 +156,7 @@ def _get_modpkg_path(dotted_name, pathlist=None):
else: else:
# plain name # plain name
try: try:
file, pathname, description = imp.find_module( file, pathname, description = imp.find_module(dotted_name, pathlist)
dotted_name, pathlist)
if file: if file:
file.close() file.close()
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]: if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
@ -195,7 +196,7 @@ def getFilesForName(name):
return [] return []
class TokenEater: class TokenEater:
def __init__(self, options): def __init__(self, options):
self.__options = options self.__options = options
@ -208,9 +209,9 @@ class TokenEater:
def __call__(self, ttype, tstring, stup, etup, line): def __call__(self, ttype, tstring, stup, etup, line):
# dispatch # dispatch
## import token # import token
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \ # print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
## 'tstring:', tstring # 'tstring:', tstring
self.__state(ttype, tstring, stup[0]) self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno): def __waiting(self, ttype, tstring, lineno):
@ -226,7 +227,7 @@ class TokenEater:
self.__freshmodule = 0 self.__freshmodule = 0
return return
# class docstring? # class docstring?
if ttype == tokenize.NAME and tstring in ('class', 'def'): if ttype == tokenize.NAME and tstring in ("class", "def"):
self.__state = self.__suiteseen self.__state = self.__suiteseen
return return
if ttype == tokenize.NAME and tstring in opts.keywords: if ttype == tokenize.NAME and tstring in opts.keywords:
@ -234,7 +235,7 @@ class TokenEater:
def __suiteseen(self, ttype, tstring, lineno): def __suiteseen(self, ttype, tstring, lineno):
# ignore anything until we see the colon # ignore anything until we see the colon
if ttype == tokenize.OP and tstring == ':': if ttype == tokenize.OP and tstring == ":":
self.__state = self.__suitedocstring self.__state = self.__suitedocstring
def __suitedocstring(self, ttype, tstring, lineno): def __suitedocstring(self, ttype, tstring, lineno):
@ -242,13 +243,12 @@ class TokenEater:
if ttype == tokenize.STRING: if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1) self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, tokenize.COMMENT):
tokenize.COMMENT):
# there was no class docstring # there was no class docstring
self.__state = self.__waiting self.__state = self.__waiting
def __keywordseen(self, ttype, tstring, lineno): def __keywordseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == '(': if ttype == tokenize.OP and tstring == "(":
self.__data = [] self.__data = []
self.__lineno = lineno self.__lineno = lineno
self.__state = self.__openseen self.__state = self.__openseen
@ -256,7 +256,7 @@ class TokenEater:
self.__state = self.__waiting self.__state = self.__waiting
def __openseen(self, ttype, tstring, lineno): def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')': if ttype == tokenize.OP and tstring == ")":
# We've seen the last of the translatable strings. Record the # We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list # line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there # of messages seen. Reset state for the next batch. If there
@ -266,20 +266,25 @@ class TokenEater:
self.__state = self.__waiting self.__state = self.__waiting
elif ttype == tokenize.STRING: elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring)) self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, elif ttype not in [
token.NEWLINE, tokenize.NL]: tokenize.COMMENT,
token.INDENT,
token.DEDENT,
token.NEWLINE,
tokenize.NL,
]:
# warn if we see anything else than STRING or whitespace # warn if we see anything else than STRING or whitespace
print('*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' % { print(
'token': tstring, '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
'file': self.__curfile, % {"token": tstring, "file": self.__curfile, "lineno": self.__lineno},
'lineno': self.__lineno file=sys.stderr,
}, file=sys.stderr) )
self.__state = self.__waiting self.__state = self.__waiting
def __addentry(self, msg, lineno=None, isdocstring=0): def __addentry(self, msg, lineno=None, isdocstring=0):
if lineno is None: if lineno is None:
lineno = self.__lineno lineno = self.__lineno
if not msg in self.__options.toexclude: if msg not in self.__options.toexclude:
entry = (self.__curfile, lineno) entry = (self.__curfile, lineno)
self.__messages.setdefault(msg, {})[entry] = isdocstring self.__messages.setdefault(msg, {})[entry] = isdocstring
@ -289,7 +294,6 @@ class TokenEater:
def write(self, fp): def write(self, fp):
options = self.__options options = self.__options
timestamp = time.strftime('%Y-%m-%d %H:%M+%Z')
# The time stamp in the header doesn't have the same format as that # The time stamp in the header doesn't have the same format as that
# generated by xgettext... # generated by xgettext...
print(pot_header, file=fp) print(pot_header, file=fp)
@ -317,15 +321,15 @@ class TokenEater:
# location comments are different b/w Solaris and GNU: # location comments are different b/w Solaris and GNU:
elif options.locationstyle == options.SOLARIS: elif options.locationstyle == options.SOLARIS:
for filename, lineno in v: for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno} d = {"filename": filename, "lineno": lineno}
print('# File: %(filename)s, line: %(lineno)d' % d, file=fp) print("# File: %(filename)s, line: %(lineno)d" % d, file=fp)
elif options.locationstyle == options.GNU: elif options.locationstyle == options.GNU:
# fit as many locations on one line, as long as the # fit as many locations on one line, as long as the
# resulting line length doesn't exceeds 'options.width' # resulting line length doesn't exceeds 'options.width'
locline = '#:' locline = "#:"
for filename, lineno in v: for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno} d = {"filename": filename, "lineno": lineno}
s = ' %(filename)s:%(lineno)d' % d s = " %(filename)s:%(lineno)d" % d
if len(locline) + len(s) <= options.width: if len(locline) + len(s) <= options.width:
locline = locline + s locline = locline + s
else: else:
@ -334,37 +338,34 @@ class TokenEater:
if len(locline) > 2: if len(locline) > 2:
print(locline, file=fp) print(locline, file=fp)
if isdocstring: if isdocstring:
print('#, docstring', file=fp) print("#, docstring", file=fp)
print('msgid', normalize(k), file=fp) print("msgid", normalize(k), file=fp)
print('msgstr ""\n', file=fp) print('msgstr ""\n', file=fp)
def main(source_files, outpath, keywords=None): def main(source_files, outpath, keywords=None):
global default_keywords global default_keywords
# for holding option values # for holding option values
class Options: class Options:
# constants # constants
GNU = 1 GNU = 1
SOLARIS = 2 SOLARIS = 2
# defaults # defaults
extractall = 0 # FIXME: currently this option has no effect at all. extractall = 0 # FIXME: currently this option has no effect at all.
escape = 0 escape = 0
keywords = [] keywords = []
outfile = 'messages.pot' outfile = "messages.pot"
writelocations = 1 writelocations = 1
locationstyle = GNU locationstyle = GNU
verbose = 0 verbose = 0
width = 78 width = 78
excludefilename = '' excludefilename = ""
docstrings = 0 docstrings = 0
nodocstrings = {} nodocstrings = {}
options = Options() options = Options()
locations = {'gnu' : options.GNU,
'solaris' : options.SOLARIS,
}
options.outfile = outpath options.outfile = outpath
if keywords: if keywords:
options.keywords = keywords options.keywords = keywords
@ -378,11 +379,14 @@ def main(source_files, outpath, keywords=None):
# initialize list of strings to exclude # initialize list of strings to exclude
if options.excludefilename: if options.excludefilename:
try: try:
fp = open(options.excludefilename, encoding='utf-8') fp = open(options.excludefilename, encoding="utf-8")
options.toexclude = fp.readlines() options.toexclude = fp.readlines()
fp.close() fp.close()
except IOError: except IOError:
print("Can't read --exclude-file: %s" % options.excludefilename, file=sys.stderr) print(
"Can't read --exclude-file: %s" % options.excludefilename,
file=sys.stderr,
)
sys.exit(1) sys.exit(1)
else: else:
options.toexclude = [] options.toexclude = []
@ -391,8 +395,8 @@ def main(source_files, outpath, keywords=None):
eater = TokenEater(options) eater = TokenEater(options)
for filename in source_files: for filename in source_files:
if options.verbose: if options.verbose:
print('Working on %s' % filename) print("Working on %s" % filename)
fp = open(filename, encoding='utf-8') fp = open(filename, encoding="utf-8")
closep = 1 closep = 1
try: try:
eater.set_filename(filename) eater.set_filename(filename)
@ -401,14 +405,16 @@ def main(source_files, outpath, keywords=None):
for _token in tokens: for _token in tokens:
eater(*_token) eater(*_token)
except tokenize.TokenError as e: except tokenize.TokenError as e:
print('%s: %s, line %d, column %d' % ( print(
e.args[0], filename, e.args[1][0], e.args[1][1]), "%s: %s, line %d, column %d"
file=sys.stderr) % (e.args[0], filename, e.args[1][0], e.args[1][1]),
file=sys.stderr,
)
finally: finally:
if closep: if closep:
fp.close() fp.close()
fp = open(options.outfile, 'w', encoding='utf-8') fp = open(options.outfile, "w", encoding="utf-8")
closep = 1 closep = 1
try: try:
eater.write(fp) eater.write(fp)

View File

@ -19,16 +19,28 @@ CHANGELOG_FORMAT = """
{description} {description}
""" """
def tixgen(tixurl): def tixgen(tixurl):
"""This is a filter *generator*. tixurl is a url pattern for the tix with a {0} placeholder """This is a filter *generator*. tixurl is a url pattern for the tix with a {0} placeholder
for the tix # for the tix #
""" """
urlpattern = tixurl.format('\\1') # will be replaced buy the content of the first group in re urlpattern = tixurl.format(
R = re.compile(r'#(\d+)') "\\1"
repl = '`#\\1 <{}>`__'.format(urlpattern) ) # will be replaced buy the content of the first group in re
R = re.compile(r"#(\d+)")
repl = "`#\\1 <{}>`__".format(urlpattern)
return lambda text: R.sub(repl, text) return lambda text: R.sub(repl, text)
def gen(basepath, destpath, changelogpath, tixurl, confrepl=None, confpath=None, changelogtmpl=None):
def gen(
basepath,
destpath,
changelogpath,
tixurl,
confrepl=None,
confpath=None,
changelogtmpl=None,
):
"""Generate sphinx docs with all bells and whistles. """Generate sphinx docs with all bells and whistles.
basepath: The base sphinx source path. basepath: The base sphinx source path.
@ -40,41 +52,47 @@ def gen(basepath, destpath, changelogpath, tixurl, confrepl=None, confpath=None,
if confrepl is None: if confrepl is None:
confrepl = {} confrepl = {}
if confpath is None: if confpath is None:
confpath = op.join(basepath, 'conf.tmpl') confpath = op.join(basepath, "conf.tmpl")
if changelogtmpl is None: if changelogtmpl is None:
changelogtmpl = op.join(basepath, 'changelog.tmpl') changelogtmpl = op.join(basepath, "changelog.tmpl")
changelog = read_changelog_file(changelogpath) changelog = read_changelog_file(changelogpath)
tix = tixgen(tixurl) tix = tixgen(tixurl)
rendered_logs = [] rendered_logs = []
for log in changelog: for log in changelog:
description = tix(log['description']) description = tix(log["description"])
# The format of the changelog descriptions is in markdown, but since we only use bulled list # The format of the changelog descriptions is in markdown, but since we only use bulled list
# and links, it's not worth depending on the markdown package. A simple regexp suffice. # and links, it's not worth depending on the markdown package. A simple regexp suffice.
description = re.sub(r'\[(.*?)\]\((.*?)\)', '`\\1 <\\2>`__', description) description = re.sub(r"\[(.*?)\]\((.*?)\)", "`\\1 <\\2>`__", description)
rendered = CHANGELOG_FORMAT.format(version=log['version'], date=log['date_str'], rendered = CHANGELOG_FORMAT.format(
description=description) version=log["version"], date=log["date_str"], description=description
)
rendered_logs.append(rendered) rendered_logs.append(rendered)
confrepl['version'] = changelog[0]['version'] confrepl["version"] = changelog[0]["version"]
changelog_out = op.join(basepath, 'changelog.rst') changelog_out = op.join(basepath, "changelog.rst")
filereplace(changelogtmpl, changelog_out, changelog='\n'.join(rendered_logs)) filereplace(changelogtmpl, changelog_out, changelog="\n".join(rendered_logs))
if op.exists(confpath): if op.exists(confpath):
conf_out = op.join(basepath, 'conf.py') conf_out = op.join(basepath, "conf.py")
filereplace(confpath, conf_out, **confrepl) filereplace(confpath, conf_out, **confrepl)
if LooseVersion(get_distribution("sphinx").version) >= LooseVersion("1.7.0"): if LooseVersion(get_distribution("sphinx").version) >= LooseVersion("1.7.0"):
from sphinx.cmd.build import build_main as sphinx_build from sphinx.cmd.build import build_main as sphinx_build
# Call the sphinx_build function, which is the same as doing sphinx-build from cli # Call the sphinx_build function, which is the same as doing sphinx-build from cli
try: try:
sphinx_build([basepath, destpath]) sphinx_build([basepath, destpath])
except SystemExit: except SystemExit:
print("Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit") print(
"Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit"
)
else: else:
# We used to call sphinx-build with print_and_do(), but the problem was that the virtualenv # We used to call sphinx-build with print_and_do(), but the problem was that the virtualenv
# of the calling python wasn't correctly considered and caused problems with documentation # of the calling python wasn't correctly considered and caused problems with documentation
# relying on autodoc (which tries to import the module to auto-document, but fail because of # relying on autodoc (which tries to import the module to auto-document, but fail because of
# missing dependencies which are in the virtualenv). Here, we do exactly what is done when # missing dependencies which are in the virtualenv). Here, we do exactly what is done when
# calling the command from bash. # calling the command from bash.
cmd = load_entry_point('Sphinx', 'console_scripts', 'sphinx-build') cmd = load_entry_point("Sphinx", "console_scripts", "sphinx-build")
try: try:
cmd(['sphinx-build', basepath, destpath]) cmd(["sphinx-build", basepath, destpath])
except SystemExit: except SystemExit:
print("Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit") print(
"Sphinx called sys.exit(), but we're cancelling it because we don't actually want to exit"
)

View File

@ -2,39 +2,39 @@
# Created On: 2007/05/19 # Created On: 2007/05/19
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import sys
import os import os
import os.path as op import os.path as op
import threading import threading
from queue import Queue from queue import Queue
import time
import sqlite3 as sqlite import sqlite3 as sqlite
STOP = object() STOP = object()
COMMIT = object() COMMIT = object()
ROLLBACK = object() ROLLBACK = object()
class FakeCursor(list): class FakeCursor(list):
# It's not possible to use sqlite cursors on another thread than the connection. Thus, # It's not possible to use sqlite cursors on another thread than the connection. Thus,
# we can't directly return the cursor. We have to fatch all results, and support its interface. # we can't directly return the cursor. We have to fatch all results, and support its interface.
def fetchall(self): def fetchall(self):
return self return self
def fetchone(self): def fetchone(self):
try: try:
return self.pop(0) return self.pop(0)
except IndexError: except IndexError:
return None return None
class _ActualThread(threading.Thread): class _ActualThread(threading.Thread):
''' We can't use this class directly because thread object are not automatically freed when """ We can't use this class directly because thread object are not automatically freed when
nothing refers to it, making it hang the application if not explicitely closed. nothing refers to it, making it hang the application if not explicitely closed.
''' """
def __init__(self, dbname, autocommit): def __init__(self, dbname, autocommit):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self._queries = Queue() self._queries = Queue()
@ -47,7 +47,7 @@ class _ActualThread(threading.Thread):
self.lastrowid = -1 self.lastrowid = -1
self.setDaemon(True) self.setDaemon(True)
self.start() self.start()
def _query(self, query): def _query(self, query):
with self._lock: with self._lock:
wait_token = object() wait_token = object()
@ -56,30 +56,30 @@ class _ActualThread(threading.Thread):
self._waiting_list.remove(wait_token) self._waiting_list.remove(wait_token)
result = self._results.get() result = self._results.get()
return result return result
def close(self): def close(self):
if not self._run: if not self._run:
return return
self._query(STOP) self._query(STOP)
def commit(self): def commit(self):
if not self._run: if not self._run:
return None # Connection closed return None # Connection closed
self._query(COMMIT) self._query(COMMIT)
def execute(self, sql, values=()): def execute(self, sql, values=()):
if not self._run: if not self._run:
return None # Connection closed return None # Connection closed
result = self._query((sql, values)) result = self._query((sql, values))
if isinstance(result, Exception): if isinstance(result, Exception):
raise result raise result
return result return result
def rollback(self): def rollback(self):
if not self._run: if not self._run:
return None # Connection closed return None # Connection closed
self._query(ROLLBACK) self._query(ROLLBACK)
def run(self): def run(self):
# The whole chdir thing is because sqlite doesn't handle directory names with non-asci char in the AT ALL. # The whole chdir thing is because sqlite doesn't handle directory names with non-asci char in the AT ALL.
oldpath = os.getcwd() oldpath = os.getcwd()
@ -111,31 +111,31 @@ class _ActualThread(threading.Thread):
result = e result = e
self._results.put(result) self._results.put(result)
con.close() con.close()
class ThreadedConn: class ThreadedConn:
"""``sqlite`` connections can't be used across threads. ``TheadedConn`` opens a sqlite """``sqlite`` connections can't be used across threads. ``TheadedConn`` opens a sqlite
connection in its own thread and sends it queries through a queue, making it suitable in connection in its own thread and sends it queries through a queue, making it suitable in
multi-threaded environment. multi-threaded environment.
""" """
def __init__(self, dbname, autocommit): def __init__(self, dbname, autocommit):
self._t = _ActualThread(dbname, autocommit) self._t = _ActualThread(dbname, autocommit)
self.lastrowid = -1 self.lastrowid = -1
def __del__(self): def __del__(self):
self.close() self.close()
def close(self): def close(self):
self._t.close() self._t.close()
def commit(self): def commit(self):
self._t.commit() self._t.commit()
def execute(self, sql, values=()): def execute(self, sql, values=()):
result = self._t.execute(sql, values) result = self._t.execute(sql, values)
self.lastrowid = self._t.lastrowid self.lastrowid = self._t.lastrowid
return result return result
def rollback(self): def rollback(self):
self._t.rollback() self._t.rollback()

View File

@ -2,103 +2,105 @@
# Created On: 2008-01-08 # Created On: 2008-01-08
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from ..conflict import * from ..conflict import *
from ..path import Path from ..path import Path
from ..testutil import eq_ from ..testutil import eq_
class TestCase_GetConflictedName: class TestCase_GetConflictedName:
def test_simple(self): def test_simple(self):
name = get_conflicted_name(['bar'], 'bar') name = get_conflicted_name(["bar"], "bar")
eq_('[000] bar', name) eq_("[000] bar", name)
name = get_conflicted_name(['bar', '[000] bar'], 'bar') name = get_conflicted_name(["bar", "[000] bar"], "bar")
eq_('[001] bar', name) eq_("[001] bar", name)
def test_no_conflict(self): def test_no_conflict(self):
name = get_conflicted_name(['bar'], 'foobar') name = get_conflicted_name(["bar"], "foobar")
eq_('foobar', name) eq_("foobar", name)
def test_fourth_digit(self): def test_fourth_digit(self):
# This test is long because every time we have to add a conflicted name, # This test is long because every time we have to add a conflicted name,
# a test must be made for every other conflicted name existing... # a test must be made for every other conflicted name existing...
# Anyway, this has very few chances to happen. # Anyway, this has very few chances to happen.
names = ['bar'] + ['[%03d] bar' % i for i in range(1000)] names = ["bar"] + ["[%03d] bar" % i for i in range(1000)]
name = get_conflicted_name(names, 'bar') name = get_conflicted_name(names, "bar")
eq_('[1000] bar', name) eq_("[1000] bar", name)
def test_auto_unconflict(self): def test_auto_unconflict(self):
# Automatically unconflict the name if it's already conflicted. # Automatically unconflict the name if it's already conflicted.
name = get_conflicted_name([], '[000] foobar') name = get_conflicted_name([], "[000] foobar")
eq_('foobar', name) eq_("foobar", name)
name = get_conflicted_name(['bar'], '[001] bar') name = get_conflicted_name(["bar"], "[001] bar")
eq_('[000] bar', name) eq_("[000] bar", name)
class TestCase_GetUnconflictedName: class TestCase_GetUnconflictedName:
def test_main(self): def test_main(self):
eq_('foobar',get_unconflicted_name('[000] foobar')) eq_("foobar", get_unconflicted_name("[000] foobar"))
eq_('foobar',get_unconflicted_name('[9999] foobar')) eq_("foobar", get_unconflicted_name("[9999] foobar"))
eq_('[000]foobar',get_unconflicted_name('[000]foobar')) eq_("[000]foobar", get_unconflicted_name("[000]foobar"))
eq_('[000a] foobar',get_unconflicted_name('[000a] foobar')) eq_("[000a] foobar", get_unconflicted_name("[000a] foobar"))
eq_('foobar',get_unconflicted_name('foobar')) eq_("foobar", get_unconflicted_name("foobar"))
eq_('foo [000] bar',get_unconflicted_name('foo [000] bar')) eq_("foo [000] bar", get_unconflicted_name("foo [000] bar"))
class TestCase_IsConflicted: class TestCase_IsConflicted:
def test_main(self): def test_main(self):
assert is_conflicted('[000] foobar') assert is_conflicted("[000] foobar")
assert is_conflicted('[9999] foobar') assert is_conflicted("[9999] foobar")
assert not is_conflicted('[000]foobar') assert not is_conflicted("[000]foobar")
assert not is_conflicted('[000a] foobar') assert not is_conflicted("[000a] foobar")
assert not is_conflicted('foobar') assert not is_conflicted("foobar")
assert not is_conflicted('foo [000] bar') assert not is_conflicted("foo [000] bar")
class TestCase_move_copy: class TestCase_move_copy:
def pytest_funcarg__do_setup(self, request): def pytest_funcarg__do_setup(self, request):
tmpdir = request.getfuncargvalue('tmpdir') tmpdir = request.getfuncargvalue("tmpdir")
self.path = Path(str(tmpdir)) self.path = Path(str(tmpdir))
self.path['foo'].open('w').close() self.path["foo"].open("w").close()
self.path['bar'].open('w').close() self.path["bar"].open("w").close()
self.path['dir'].mkdir() self.path["dir"].mkdir()
def test_move_no_conflict(self, do_setup): def test_move_no_conflict(self, do_setup):
smart_move(self.path + 'foo', self.path + 'baz') smart_move(self.path + "foo", self.path + "baz")
assert self.path['baz'].exists() assert self.path["baz"].exists()
assert not self.path['foo'].exists() assert not self.path["foo"].exists()
def test_copy_no_conflict(self, do_setup): # No need to duplicate the rest of the tests... Let's just test on move def test_copy_no_conflict(
smart_copy(self.path + 'foo', self.path + 'baz') self, do_setup
assert self.path['baz'].exists() ): # No need to duplicate the rest of the tests... Let's just test on move
assert self.path['foo'].exists() smart_copy(self.path + "foo", self.path + "baz")
assert self.path["baz"].exists()
assert self.path["foo"].exists()
def test_move_no_conflict_dest_is_dir(self, do_setup): def test_move_no_conflict_dest_is_dir(self, do_setup):
smart_move(self.path + 'foo', self.path + 'dir') smart_move(self.path + "foo", self.path + "dir")
assert self.path['dir']['foo'].exists() assert self.path["dir"]["foo"].exists()
assert not self.path['foo'].exists() assert not self.path["foo"].exists()
def test_move_conflict(self, do_setup): def test_move_conflict(self, do_setup):
smart_move(self.path + 'foo', self.path + 'bar') smart_move(self.path + "foo", self.path + "bar")
assert self.path['[000] bar'].exists() assert self.path["[000] bar"].exists()
assert not self.path['foo'].exists() assert not self.path["foo"].exists()
def test_move_conflict_dest_is_dir(self, do_setup): def test_move_conflict_dest_is_dir(self, do_setup):
smart_move(self.path['foo'], self.path['dir']) smart_move(self.path["foo"], self.path["dir"])
smart_move(self.path['bar'], self.path['foo']) smart_move(self.path["bar"], self.path["foo"])
smart_move(self.path['foo'], self.path['dir']) smart_move(self.path["foo"], self.path["dir"])
assert self.path['dir']['foo'].exists() assert self.path["dir"]["foo"].exists()
assert self.path['dir']['[000] foo'].exists() assert self.path["dir"]["[000] foo"].exists()
assert not self.path['foo'].exists() assert not self.path["foo"].exists()
assert not self.path['bar'].exists() assert not self.path["bar"].exists()
def test_copy_folder(self, tmpdir): def test_copy_folder(self, tmpdir):
# smart_copy also works on folders # smart_copy also works on folders
path = Path(str(tmpdir)) path = Path(str(tmpdir))
path['foo'].mkdir() path["foo"].mkdir()
path['bar'].mkdir() path["bar"].mkdir()
smart_copy(path['foo'], path['bar']) # no crash smart_copy(path["foo"], path["bar"]) # no crash
assert path['[000] bar'].exists() assert path["[000] bar"].exists()

View File

@ -1,12 +1,13 @@
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from ..testutil import eq_ from ..testutil import eq_
from ..notify import Broadcaster, Listener, Repeater from ..notify import Broadcaster, Listener, Repeater
class HelloListener(Listener): class HelloListener(Listener):
def __init__(self, broadcaster): def __init__(self, broadcaster):
Listener.__init__(self, broadcaster) Listener.__init__(self, broadcaster)
@ -15,6 +16,7 @@ class HelloListener(Listener):
def hello(self): def hello(self):
self.hello_count += 1 self.hello_count += 1
class HelloRepeater(Repeater): class HelloRepeater(Repeater):
def __init__(self, broadcaster): def __init__(self, broadcaster):
Repeater.__init__(self, broadcaster) Repeater.__init__(self, broadcaster)
@ -23,13 +25,15 @@ class HelloRepeater(Repeater):
def hello(self): def hello(self):
self.hello_count += 1 self.hello_count += 1
def create_pair(): def create_pair():
b = Broadcaster() b = Broadcaster()
l = HelloListener(b) l = HelloListener(b)
return b, l return b, l
def test_disconnect_during_notification(): def test_disconnect_during_notification():
# When a listener disconnects another listener the other listener will not receive a # When a listener disconnects another listener the other listener will not receive a
# notification. # notification.
# This whole complication scheme below is because the order of the notification is not # This whole complication scheme below is because the order of the notification is not
# guaranteed. We could disconnect everything from self.broadcaster.listeners, but this # guaranteed. We could disconnect everything from self.broadcaster.listeners, but this
@ -38,103 +42,116 @@ def test_disconnect_during_notification():
def __init__(self, broadcaster): def __init__(self, broadcaster):
Listener.__init__(self, broadcaster) Listener.__init__(self, broadcaster)
self.hello_count = 0 self.hello_count = 0
def hello(self): def hello(self):
self.hello_count += 1 self.hello_count += 1
self.other.disconnect() self.other.disconnect()
broadcaster = Broadcaster() broadcaster = Broadcaster()
first = Disconnecter(broadcaster) first = Disconnecter(broadcaster)
second = Disconnecter(broadcaster) second = Disconnecter(broadcaster)
first.other, second.other = second, first first.other, second.other = second, first
first.connect() first.connect()
second.connect() second.connect()
broadcaster.notify('hello') broadcaster.notify("hello")
# only one of them was notified # only one of them was notified
eq_(first.hello_count + second.hello_count, 1) eq_(first.hello_count + second.hello_count, 1)
def test_disconnect(): def test_disconnect():
# After a disconnect, the listener doesn't hear anything. # After a disconnect, the listener doesn't hear anything.
b, l = create_pair() b, l = create_pair()
l.connect() l.connect()
l.disconnect() l.disconnect()
b.notify('hello') b.notify("hello")
eq_(l.hello_count, 0) eq_(l.hello_count, 0)
def test_disconnect_when_not_connected(): def test_disconnect_when_not_connected():
# When disconnecting an already disconnected listener, nothing happens. # When disconnecting an already disconnected listener, nothing happens.
b, l = create_pair() b, l = create_pair()
l.disconnect() l.disconnect()
def test_not_connected_on_init(): def test_not_connected_on_init():
# A listener is not initialized connected. # A listener is not initialized connected.
b, l = create_pair() b, l = create_pair()
b.notify('hello') b.notify("hello")
eq_(l.hello_count, 0) eq_(l.hello_count, 0)
def test_notify(): def test_notify():
# The listener listens to the broadcaster. # The listener listens to the broadcaster.
b, l = create_pair() b, l = create_pair()
l.connect() l.connect()
b.notify('hello') b.notify("hello")
eq_(l.hello_count, 1) eq_(l.hello_count, 1)
def test_reconnect(): def test_reconnect():
# It's possible to reconnect a listener after disconnection. # It's possible to reconnect a listener after disconnection.
b, l = create_pair() b, l = create_pair()
l.connect() l.connect()
l.disconnect() l.disconnect()
l.connect() l.connect()
b.notify('hello') b.notify("hello")
eq_(l.hello_count, 1) eq_(l.hello_count, 1)
def test_repeater(): def test_repeater():
b = Broadcaster() b = Broadcaster()
r = HelloRepeater(b) r = HelloRepeater(b)
l = HelloListener(r) l = HelloListener(r)
r.connect() r.connect()
l.connect() l.connect()
b.notify('hello') b.notify("hello")
eq_(r.hello_count, 1) eq_(r.hello_count, 1)
eq_(l.hello_count, 1) eq_(l.hello_count, 1)
def test_repeater_with_repeated_notifications(): def test_repeater_with_repeated_notifications():
# If REPEATED_NOTIFICATIONS is not empty, only notifs in this set are repeated (but they're # If REPEATED_NOTIFICATIONS is not empty, only notifs in this set are repeated (but they're
# still dispatched locally). # still dispatched locally).
class MyRepeater(HelloRepeater): class MyRepeater(HelloRepeater):
REPEATED_NOTIFICATIONS = set(['hello']) REPEATED_NOTIFICATIONS = set(["hello"])
def __init__(self, broadcaster): def __init__(self, broadcaster):
HelloRepeater.__init__(self, broadcaster) HelloRepeater.__init__(self, broadcaster)
self.foo_count = 0 self.foo_count = 0
def foo(self): def foo(self):
self.foo_count += 1 self.foo_count += 1
b = Broadcaster() b = Broadcaster()
r = MyRepeater(b) r = MyRepeater(b)
l = HelloListener(r) l = HelloListener(r)
r.connect() r.connect()
l.connect() l.connect()
b.notify('hello') b.notify("hello")
b.notify('foo') # if the repeater repeated this notif, we'd get a crash on HelloListener b.notify(
"foo"
) # if the repeater repeated this notif, we'd get a crash on HelloListener
eq_(r.hello_count, 1) eq_(r.hello_count, 1)
eq_(l.hello_count, 1) eq_(l.hello_count, 1)
eq_(r.foo_count, 1) eq_(r.foo_count, 1)
def test_repeater_doesnt_try_to_dispatch_to_self_if_it_cant(): def test_repeater_doesnt_try_to_dispatch_to_self_if_it_cant():
# if a repeater doesn't handle a particular message, it doesn't crash and simply repeats it. # if a repeater doesn't handle a particular message, it doesn't crash and simply repeats it.
b = Broadcaster() b = Broadcaster()
r = Repeater(b) # doesnt handle hello r = Repeater(b) # doesnt handle hello
l = HelloListener(r) l = HelloListener(r)
r.connect() r.connect()
l.connect() l.connect()
b.notify('hello') # no crash b.notify("hello") # no crash
eq_(l.hello_count, 1) eq_(l.hello_count, 1)
def test_bind_messages(): def test_bind_messages():
b, l = create_pair() b, l = create_pair()
l.bind_messages({'foo', 'bar'}, l.hello) l.bind_messages({"foo", "bar"}, l.hello)
l.connect() l.connect()
b.notify('foo') b.notify("foo")
b.notify('bar') b.notify("bar")
b.notify('hello') # Normal dispatching still work b.notify("hello") # Normal dispatching still work
eq_(l.hello_count, 3) eq_(l.hello_count, 3)

View File

@ -2,8 +2,8 @@
# Created On: 2006/02/21 # Created On: 2006/02/21
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import sys import sys
@ -14,33 +14,39 @@ from pytest import raises, mark
from ..path import Path, pathify from ..path import Path, pathify
from ..testutil import eq_ from ..testutil import eq_
def pytest_funcarg__force_ossep(request): def pytest_funcarg__force_ossep(request):
monkeypatch = request.getfuncargvalue('monkeypatch') monkeypatch = request.getfuncargvalue("monkeypatch")
monkeypatch.setattr(os, 'sep', '/') monkeypatch.setattr(os, "sep", "/")
def test_empty(force_ossep): def test_empty(force_ossep):
path = Path('') path = Path("")
eq_('',str(path)) eq_("", str(path))
eq_(0,len(path)) eq_(0, len(path))
path = Path(()) path = Path(())
eq_('',str(path)) eq_("", str(path))
eq_(0,len(path)) eq_(0, len(path))
def test_single(force_ossep): def test_single(force_ossep):
path = Path('foobar') path = Path("foobar")
eq_('foobar',path) eq_("foobar", path)
eq_(1,len(path)) eq_(1, len(path))
def test_multiple(force_ossep): def test_multiple(force_ossep):
path = Path('foo/bar') path = Path("foo/bar")
eq_('foo/bar',path) eq_("foo/bar", path)
eq_(2,len(path)) eq_(2, len(path))
def test_init_with_tuple_and_list(force_ossep): def test_init_with_tuple_and_list(force_ossep):
path = Path(('foo','bar')) path = Path(("foo", "bar"))
eq_('foo/bar',path) eq_("foo/bar", path)
path = Path(['foo','bar']) path = Path(["foo", "bar"])
eq_('foo/bar',path) eq_("foo/bar", path)
def test_init_with_invalid_value(force_ossep): def test_init_with_invalid_value(force_ossep):
try: try:
@ -49,208 +55,236 @@ def test_init_with_invalid_value(force_ossep):
except TypeError: except TypeError:
pass pass
def test_access(force_ossep): def test_access(force_ossep):
path = Path('foo/bar/bleh') path = Path("foo/bar/bleh")
eq_('foo',path[0]) eq_("foo", path[0])
eq_('foo',path[-3]) eq_("foo", path[-3])
eq_('bar',path[1]) eq_("bar", path[1])
eq_('bar',path[-2]) eq_("bar", path[-2])
eq_('bleh',path[2]) eq_("bleh", path[2])
eq_('bleh',path[-1]) eq_("bleh", path[-1])
def test_slicing(force_ossep): def test_slicing(force_ossep):
path = Path('foo/bar/bleh') path = Path("foo/bar/bleh")
subpath = path[:2] subpath = path[:2]
eq_('foo/bar',subpath) eq_("foo/bar", subpath)
assert isinstance(subpath,Path)
def test_parent(force_ossep):
path = Path('foo/bar/bleh')
subpath = path.parent()
eq_('foo/bar', subpath)
assert isinstance(subpath, Path) assert isinstance(subpath, Path)
def test_parent(force_ossep):
path = Path("foo/bar/bleh")
subpath = path.parent()
eq_("foo/bar", subpath)
assert isinstance(subpath, Path)
def test_filename(force_ossep): def test_filename(force_ossep):
path = Path('foo/bar/bleh.ext') path = Path("foo/bar/bleh.ext")
eq_(path.name, 'bleh.ext') eq_(path.name, "bleh.ext")
def test_deal_with_empty_components(force_ossep): def test_deal_with_empty_components(force_ossep):
"""Keep ONLY a leading space, which means we want a leading slash. """Keep ONLY a leading space, which means we want a leading slash.
""" """
eq_('foo//bar',str(Path(('foo','','bar')))) eq_("foo//bar", str(Path(("foo", "", "bar"))))
eq_('/foo/bar',str(Path(('','foo','bar')))) eq_("/foo/bar", str(Path(("", "foo", "bar"))))
eq_('foo/bar',str(Path('foo/bar/'))) eq_("foo/bar", str(Path("foo/bar/")))
def test_old_compare_paths(force_ossep): def test_old_compare_paths(force_ossep):
eq_(Path('foobar'),Path('foobar')) eq_(Path("foobar"), Path("foobar"))
eq_(Path('foobar/'),Path('foobar\\','\\')) eq_(Path("foobar/"), Path("foobar\\", "\\"))
eq_(Path('/foobar/'),Path('\\foobar\\','\\')) eq_(Path("/foobar/"), Path("\\foobar\\", "\\"))
eq_(Path('/foo/bar'),Path('\\foo\\bar','\\')) eq_(Path("/foo/bar"), Path("\\foo\\bar", "\\"))
eq_(Path('/foo/bar'),Path('\\foo\\bar\\','\\')) eq_(Path("/foo/bar"), Path("\\foo\\bar\\", "\\"))
assert Path('/foo/bar') != Path('\\foo\\foo','\\') assert Path("/foo/bar") != Path("\\foo\\foo", "\\")
#We also have to test __ne__ # We also have to test __ne__
assert not (Path('foobar') != Path('foobar')) assert not (Path("foobar") != Path("foobar"))
assert Path('/a/b/c.x') != Path('/a/b/c.y') assert Path("/a/b/c.x") != Path("/a/b/c.y")
def test_old_split_path(force_ossep): def test_old_split_path(force_ossep):
eq_(Path('foobar'),('foobar',)) eq_(Path("foobar"), ("foobar",))
eq_(Path('foo/bar'),('foo','bar')) eq_(Path("foo/bar"), ("foo", "bar"))
eq_(Path('/foo/bar/'),('','foo','bar')) eq_(Path("/foo/bar/"), ("", "foo", "bar"))
eq_(Path('\\foo\\bar','\\'),('','foo','bar')) eq_(Path("\\foo\\bar", "\\"), ("", "foo", "bar"))
def test_representation(force_ossep): def test_representation(force_ossep):
eq_("('foo', 'bar')",repr(Path(('foo','bar')))) eq_("('foo', 'bar')", repr(Path(("foo", "bar"))))
def test_add(force_ossep): def test_add(force_ossep):
eq_('foo/bar/bar/foo',Path(('foo','bar')) + Path('bar/foo')) eq_("foo/bar/bar/foo", Path(("foo", "bar")) + Path("bar/foo"))
eq_('foo/bar/bar/foo',Path('foo/bar') + 'bar/foo') eq_("foo/bar/bar/foo", Path("foo/bar") + "bar/foo")
eq_('foo/bar/bar/foo',Path('foo/bar') + ('bar','foo')) eq_("foo/bar/bar/foo", Path("foo/bar") + ("bar", "foo"))
eq_('foo/bar/bar/foo',('foo','bar') + Path('bar/foo')) eq_("foo/bar/bar/foo", ("foo", "bar") + Path("bar/foo"))
eq_('foo/bar/bar/foo','foo/bar' + Path('bar/foo')) eq_("foo/bar/bar/foo", "foo/bar" + Path("bar/foo"))
#Invalid concatenation # Invalid concatenation
try: try:
Path(('foo','bar')) + 1 Path(("foo", "bar")) + 1
assert False assert False
except TypeError: except TypeError:
pass pass
def test_path_slice(force_ossep): def test_path_slice(force_ossep):
foo = Path('foo') foo = Path("foo")
bar = Path('bar') bar = Path("bar")
foobar = Path('foo/bar') foobar = Path("foo/bar")
eq_('bar',foobar[foo:]) eq_("bar", foobar[foo:])
eq_('foo',foobar[:bar]) eq_("foo", foobar[:bar])
eq_('foo/bar',foobar[bar:]) eq_("foo/bar", foobar[bar:])
eq_('foo/bar',foobar[:foo]) eq_("foo/bar", foobar[:foo])
eq_((),foobar[foobar:]) eq_((), foobar[foobar:])
eq_((),foobar[:foobar]) eq_((), foobar[:foobar])
abcd = Path('a/b/c/d') abcd = Path("a/b/c/d")
a = Path('a') a = Path("a")
b = Path('b') b = Path("b")
c = Path('c') c = Path("c")
d = Path('d') d = Path("d")
z = Path('z') z = Path("z")
eq_('b/c',abcd[a:d]) eq_("b/c", abcd[a:d])
eq_('b/c/d',abcd[a:d+z]) eq_("b/c/d", abcd[a : d + z])
eq_('b/c',abcd[a:z+d]) eq_("b/c", abcd[a : z + d])
eq_('a/b/c/d',abcd[:z]) eq_("a/b/c/d", abcd[:z])
def test_add_with_root_path(force_ossep): def test_add_with_root_path(force_ossep):
"""if I perform /a/b/c + /d/e/f, I want /a/b/c/d/e/f, not /a/b/c//d/e/f """if I perform /a/b/c + /d/e/f, I want /a/b/c/d/e/f, not /a/b/c//d/e/f
""" """
eq_('/foo/bar',str(Path('/foo') + Path('/bar'))) eq_("/foo/bar", str(Path("/foo") + Path("/bar")))
def test_create_with_tuple_that_have_slash_inside(force_ossep, monkeypatch): def test_create_with_tuple_that_have_slash_inside(force_ossep, monkeypatch):
eq_(('','foo','bar'), Path(('/foo','bar'))) eq_(("", "foo", "bar"), Path(("/foo", "bar")))
monkeypatch.setattr(os, 'sep', '\\') monkeypatch.setattr(os, "sep", "\\")
eq_(('','foo','bar'), Path(('\\foo','bar'))) eq_(("", "foo", "bar"), Path(("\\foo", "bar")))
def test_auto_decode_os_sep(force_ossep, monkeypatch): def test_auto_decode_os_sep(force_ossep, monkeypatch):
"""Path should decode any either / or os.sep, but always encode in os.sep. """Path should decode any either / or os.sep, but always encode in os.sep.
""" """
eq_(('foo\\bar','bleh'),Path('foo\\bar/bleh')) eq_(("foo\\bar", "bleh"), Path("foo\\bar/bleh"))
monkeypatch.setattr(os, 'sep', '\\') monkeypatch.setattr(os, "sep", "\\")
eq_(('foo','bar/bleh'),Path('foo\\bar/bleh')) eq_(("foo", "bar/bleh"), Path("foo\\bar/bleh"))
path = Path('foo/bar') path = Path("foo/bar")
eq_(('foo','bar'),path) eq_(("foo", "bar"), path)
eq_('foo\\bar',str(path)) eq_("foo\\bar", str(path))
def test_contains(force_ossep): def test_contains(force_ossep):
p = Path(('foo','bar')) p = Path(("foo", "bar"))
assert Path(('foo','bar','bleh')) in p assert Path(("foo", "bar", "bleh")) in p
assert Path(('foo','bar')) in p assert Path(("foo", "bar")) in p
assert 'foo' in p assert "foo" in p
assert 'bleh' not in p assert "bleh" not in p
assert Path('foo') not in p assert Path("foo") not in p
def test_is_parent_of(force_ossep): def test_is_parent_of(force_ossep):
assert Path(('foo','bar')).is_parent_of(Path(('foo','bar','bleh'))) assert Path(("foo", "bar")).is_parent_of(Path(("foo", "bar", "bleh")))
assert not Path(('foo','bar')).is_parent_of(Path(('foo','baz'))) assert not Path(("foo", "bar")).is_parent_of(Path(("foo", "baz")))
assert not Path(('foo','bar')).is_parent_of(Path(('foo','bar'))) assert not Path(("foo", "bar")).is_parent_of(Path(("foo", "bar")))
def test_windows_drive_letter(force_ossep): def test_windows_drive_letter(force_ossep):
p = Path(('c:',)) p = Path(("c:",))
eq_('c:\\',str(p)) eq_("c:\\", str(p))
def test_root_path(force_ossep): def test_root_path(force_ossep):
p = Path('/') p = Path("/")
eq_('/',str(p)) eq_("/", str(p))
def test_str_encodes_unicode_to_getfilesystemencoding(force_ossep): def test_str_encodes_unicode_to_getfilesystemencoding(force_ossep):
p = Path(('foo','bar\u00e9')) p = Path(("foo", "bar\u00e9"))
eq_('foo/bar\u00e9'.encode(sys.getfilesystemencoding()), p.tobytes()) eq_("foo/bar\u00e9".encode(sys.getfilesystemencoding()), p.tobytes())
def test_unicode(force_ossep): def test_unicode(force_ossep):
p = Path(('foo','bar\u00e9')) p = Path(("foo", "bar\u00e9"))
eq_('foo/bar\u00e9',str(p)) eq_("foo/bar\u00e9", str(p))
def test_str_repr_of_mix_between_non_ascii_str_and_unicode(force_ossep): def test_str_repr_of_mix_between_non_ascii_str_and_unicode(force_ossep):
u = 'foo\u00e9' u = "foo\u00e9"
encoded = u.encode(sys.getfilesystemencoding()) encoded = u.encode(sys.getfilesystemencoding())
p = Path((encoded,'bar')) p = Path((encoded, "bar"))
print(repr(tuple(p))) print(repr(tuple(p)))
eq_('foo\u00e9/bar'.encode(sys.getfilesystemencoding()), p.tobytes()) eq_("foo\u00e9/bar".encode(sys.getfilesystemencoding()), p.tobytes())
def test_Path_of_a_Path_returns_self(force_ossep): def test_Path_of_a_Path_returns_self(force_ossep):
#if Path() is called with a path as value, just return value. # if Path() is called with a path as value, just return value.
p = Path('foo/bar') p = Path("foo/bar")
assert Path(p) is p assert Path(p) is p
def test_getitem_str(force_ossep): def test_getitem_str(force_ossep):
# path['something'] returns the child path corresponding to the name # path['something'] returns the child path corresponding to the name
p = Path('/foo/bar') p = Path("/foo/bar")
eq_(p['baz'], Path('/foo/bar/baz')) eq_(p["baz"], Path("/foo/bar/baz"))
def test_getitem_path(force_ossep): def test_getitem_path(force_ossep):
# path[Path('something')] returns the child path corresponding to the name (or subpath) # path[Path('something')] returns the child path corresponding to the name (or subpath)
p = Path('/foo/bar') p = Path("/foo/bar")
eq_(p[Path('baz/bleh')], Path('/foo/bar/baz/bleh')) eq_(p[Path("baz/bleh")], Path("/foo/bar/baz/bleh"))
@mark.xfail(reason="pytest's capture mechanism is flaky, I have to investigate") @mark.xfail(reason="pytest's capture mechanism is flaky, I have to investigate")
def test_log_unicode_errors(force_ossep, monkeypatch, capsys): def test_log_unicode_errors(force_ossep, monkeypatch, capsys):
# When an there's a UnicodeDecodeError on path creation, log it so it can be possible # When an there's a UnicodeDecodeError on path creation, log it so it can be possible
# to debug the cause of it. # to debug the cause of it.
monkeypatch.setattr(sys, 'getfilesystemencoding', lambda: 'ascii') monkeypatch.setattr(sys, "getfilesystemencoding", lambda: "ascii")
with raises(UnicodeDecodeError): with raises(UnicodeDecodeError):
Path(['', b'foo\xe9']) Path(["", b"foo\xe9"])
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert repr(b'foo\xe9') in err assert repr(b"foo\xe9") in err
def test_has_drive_letter(monkeypatch): def test_has_drive_letter(monkeypatch):
monkeypatch.setattr(os, 'sep', '\\') monkeypatch.setattr(os, "sep", "\\")
p = Path('foo\\bar') p = Path("foo\\bar")
assert not p.has_drive_letter() assert not p.has_drive_letter()
p = Path('C:\\') p = Path("C:\\")
assert p.has_drive_letter() assert p.has_drive_letter()
p = Path('z:\\foo') p = Path("z:\\foo")
assert p.has_drive_letter() assert p.has_drive_letter()
def test_remove_drive_letter(monkeypatch): def test_remove_drive_letter(monkeypatch):
monkeypatch.setattr(os, 'sep', '\\') monkeypatch.setattr(os, "sep", "\\")
p = Path('foo\\bar') p = Path("foo\\bar")
eq_(p.remove_drive_letter(), Path('foo\\bar')) eq_(p.remove_drive_letter(), Path("foo\\bar"))
p = Path('C:\\') p = Path("C:\\")
eq_(p.remove_drive_letter(), Path('')) eq_(p.remove_drive_letter(), Path(""))
p = Path('z:\\foo') p = Path("z:\\foo")
eq_(p.remove_drive_letter(), Path('foo')) eq_(p.remove_drive_letter(), Path("foo"))
def test_pathify(): def test_pathify():
@pathify @pathify
def foo(a: Path, b, c:Path): def foo(a: Path, b, c: Path):
return a, b, c return a, b, c
a, b, c = foo('foo', 0, c=Path('bar')) a, b, c = foo("foo", 0, c=Path("bar"))
assert isinstance(a, Path) assert isinstance(a, Path)
assert a == Path('foo') assert a == Path("foo")
assert b == 0 assert b == 0
assert isinstance(c, Path) assert isinstance(c, Path)
assert c == Path('bar') assert c == Path("bar")
def test_pathify_preserve_none(): def test_pathify_preserve_none():
# @pathify preserves None value and doesn't try to return a Path # @pathify preserves None value and doesn't try to return a Path
@pathify @pathify
def foo(a: Path): def foo(a: Path):
return a return a
a = foo(None) a = foo(None)
assert a is None assert a is None

View File

@ -1,14 +1,15 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2011-09-06 # Created On: 2011-09-06
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from ..testutil import eq_, callcounter, CallLogger from ..testutil import eq_, callcounter, CallLogger
from ..gui.selectable_list import SelectableList, GUISelectableList from ..gui.selectable_list import SelectableList, GUISelectableList
def test_in(): def test_in():
# When a SelectableList is in a list, doing "in list" with another instance returns false, even # When a SelectableList is in a list, doing "in list" with another instance returns false, even
# if they're the same as lists. # if they're the same as lists.
@ -16,50 +17,56 @@ def test_in():
some_list = [sl] some_list = [sl]
assert SelectableList() not in some_list assert SelectableList() not in some_list
def test_selection_range(): def test_selection_range():
# selection is correctly adjusted on deletion # selection is correctly adjusted on deletion
sl = SelectableList(['foo', 'bar', 'baz']) sl = SelectableList(["foo", "bar", "baz"])
sl.selected_index = 3 sl.selected_index = 3
eq_(sl.selected_index, 2) eq_(sl.selected_index, 2)
del sl[2] del sl[2]
eq_(sl.selected_index, 1) eq_(sl.selected_index, 1)
def test_update_selection_called(): def test_update_selection_called():
# _update_selection_is called after a change in selection. However, we only do so on select() # _update_selection_is called after a change in selection. However, we only do so on select()
# calls. I follow the old behavior of the Table class. At the moment, I don't quite remember # calls. I follow the old behavior of the Table class. At the moment, I don't quite remember
# why there was a specific select() method for triggering _update_selection(), but I think I # why there was a specific select() method for triggering _update_selection(), but I think I
# remember there was a reason, so I keep it that way. # remember there was a reason, so I keep it that way.
sl = SelectableList(['foo', 'bar']) sl = SelectableList(["foo", "bar"])
sl._update_selection = callcounter() sl._update_selection = callcounter()
sl.select(1) sl.select(1)
eq_(sl._update_selection.callcount, 1) eq_(sl._update_selection.callcount, 1)
sl.selected_index = 0 sl.selected_index = 0
eq_(sl._update_selection.callcount, 1) # no call eq_(sl._update_selection.callcount, 1) # no call
def test_guicalls(): def test_guicalls():
# A GUISelectableList appropriately calls its view. # A GUISelectableList appropriately calls its view.
sl = GUISelectableList(['foo', 'bar']) sl = GUISelectableList(["foo", "bar"])
sl.view = CallLogger() sl.view = CallLogger()
sl.view.check_gui_calls(['refresh']) # Upon setting the view, we get a call to refresh() sl.view.check_gui_calls(
sl[1] = 'baz' ["refresh"]
sl.view.check_gui_calls(['refresh']) ) # Upon setting the view, we get a call to refresh()
sl.append('foo') sl[1] = "baz"
sl.view.check_gui_calls(['refresh']) sl.view.check_gui_calls(["refresh"])
sl.append("foo")
sl.view.check_gui_calls(["refresh"])
del sl[2] del sl[2]
sl.view.check_gui_calls(['refresh']) sl.view.check_gui_calls(["refresh"])
sl.remove('baz') sl.remove("baz")
sl.view.check_gui_calls(['refresh']) sl.view.check_gui_calls(["refresh"])
sl.insert(0, 'foo') sl.insert(0, "foo")
sl.view.check_gui_calls(['refresh']) sl.view.check_gui_calls(["refresh"])
sl.select(1) sl.select(1)
sl.view.check_gui_calls(['update_selection']) sl.view.check_gui_calls(["update_selection"])
# XXX We have to give up on this for now because of a breakage it causes in the tables. # XXX We have to give up on this for now because of a breakage it causes in the tables.
# sl.select(1) # don't update when selection stays the same # sl.select(1) # don't update when selection stays the same
# gui.check_gui_calls([]) # gui.check_gui_calls([])
def test_search_by_prefix(): def test_search_by_prefix():
sl = SelectableList(['foo', 'bAr', 'baZ']) sl = SelectableList(["foo", "bAr", "baZ"])
eq_(sl.search_by_prefix('b'), 1) eq_(sl.search_by_prefix("b"), 1)
eq_(sl.search_by_prefix('BA'), 1) eq_(sl.search_by_prefix("BA"), 1)
eq_(sl.search_by_prefix('BAZ'), 2) eq_(sl.search_by_prefix("BAZ"), 2)
eq_(sl.search_by_prefix('BAZZ'), -1) eq_(sl.search_by_prefix("BAZZ"), -1)

View File

@ -2,8 +2,8 @@
# Created On: 2007/05/19 # Created On: 2007/05/19
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
import time import time
@ -19,69 +19,75 @@ from ..sqlite import ThreadedConn
# Threading is hard to test. In a lot of those tests, a failure means that the test run will # Threading is hard to test. In a lot of those tests, a failure means that the test run will
# hang forever. Well... I don't know a better alternative. # hang forever. Well... I don't know a better alternative.
def test_can_access_from_multiple_threads(): def test_can_access_from_multiple_threads():
def run(): def run():
con.execute('insert into foo(bar) values(\'baz\')') con.execute("insert into foo(bar) values('baz')")
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
t = threading.Thread(target=run) t = threading.Thread(target=run)
t.start() t.start()
t.join() t.join()
result = con.execute('select * from foo') result = con.execute("select * from foo")
eq_(1, len(result)) eq_(1, len(result))
eq_('baz', result[0][0]) eq_("baz", result[0][0])
def test_exception_during_query(): def test_exception_during_query():
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
with raises(sqlite.OperationalError): with raises(sqlite.OperationalError):
con.execute('select * from bleh') con.execute("select * from bleh")
def test_not_autocommit(tmpdir): def test_not_autocommit(tmpdir):
dbpath = str(tmpdir.join('foo.db')) dbpath = str(tmpdir.join("foo.db"))
con = ThreadedConn(dbpath, False) con = ThreadedConn(dbpath, False)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
con.execute('insert into foo(bar) values(\'baz\')') con.execute("insert into foo(bar) values('baz')")
del con del con
#The data shouldn't have been inserted # The data shouldn't have been inserted
con = ThreadedConn(dbpath, False) con = ThreadedConn(dbpath, False)
result = con.execute('select * from foo') result = con.execute("select * from foo")
eq_(0, len(result)) eq_(0, len(result))
con.execute('insert into foo(bar) values(\'baz\')') con.execute("insert into foo(bar) values('baz')")
con.commit() con.commit()
del con del con
# Now the data should be there # Now the data should be there
con = ThreadedConn(dbpath, False) con = ThreadedConn(dbpath, False)
result = con.execute('select * from foo') result = con.execute("select * from foo")
eq_(1, len(result)) eq_(1, len(result))
def test_rollback(): def test_rollback():
con = ThreadedConn(':memory:', False) con = ThreadedConn(":memory:", False)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
con.execute('insert into foo(bar) values(\'baz\')') con.execute("insert into foo(bar) values('baz')")
con.rollback() con.rollback()
result = con.execute('select * from foo') result = con.execute("select * from foo")
eq_(0, len(result)) eq_(0, len(result))
def test_query_palceholders(): def test_query_palceholders():
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
con.execute('insert into foo(bar) values(?)', ['baz']) con.execute("insert into foo(bar) values(?)", ["baz"])
result = con.execute('select * from foo') result = con.execute("select * from foo")
eq_(1, len(result)) eq_(1, len(result))
eq_('baz', result[0][0]) eq_("baz", result[0][0])
def test_make_sure_theres_no_messup_between_queries(): def test_make_sure_theres_no_messup_between_queries():
def run(expected_rowid): def run(expected_rowid):
time.sleep(0.1) time.sleep(0.1)
result = con.execute('select rowid from foo where rowid = ?', [expected_rowid]) result = con.execute("select rowid from foo where rowid = ?", [expected_rowid])
assert expected_rowid == result[0][0] assert expected_rowid == result[0][0]
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
for i in range(100): for i in range(100):
con.execute('insert into foo(bar) values(\'baz\')') con.execute("insert into foo(bar) values('baz')")
threads = [] threads = []
for i in range(1, 101): for i in range(1, 101):
t = threading.Thread(target=run, args=(i,)) t = threading.Thread(target=run, args=(i,))
@ -91,36 +97,41 @@ def test_make_sure_theres_no_messup_between_queries():
time.sleep(0.1) time.sleep(0.1)
threads = [t for t in threads if t.isAlive()] threads = [t for t in threads if t.isAlive()]
def test_query_after_close(): def test_query_after_close():
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.close() con.close()
con.execute('select 1') con.execute("select 1")
def test_lastrowid(): def test_lastrowid():
# It's not possible to return a cursor because of the threading, but lastrowid should be # It's not possible to return a cursor because of the threading, but lastrowid should be
# fetchable from the connection itself # fetchable from the connection itself
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
con.execute('insert into foo(bar) values(\'baz\')') con.execute("insert into foo(bar) values('baz')")
eq_(1, con.lastrowid) eq_(1, con.lastrowid)
def test_add_fetchone_fetchall_interface_to_results(): def test_add_fetchone_fetchall_interface_to_results():
con = ThreadedConn(':memory:', True) con = ThreadedConn(":memory:", True)
con.execute('create table foo(bar TEXT)') con.execute("create table foo(bar TEXT)")
con.execute('insert into foo(bar) values(\'baz1\')') con.execute("insert into foo(bar) values('baz1')")
con.execute('insert into foo(bar) values(\'baz2\')') con.execute("insert into foo(bar) values('baz2')")
result = con.execute('select * from foo') result = con.execute("select * from foo")
ref = result[:] ref = result[:]
eq_(ref, result.fetchall()) eq_(ref, result.fetchall())
eq_(ref[0], result.fetchone()) eq_(ref[0], result.fetchone())
eq_(ref[1], result.fetchone()) eq_(ref[1], result.fetchone())
assert result.fetchone() is None assert result.fetchone() is None
def test_non_ascii_dbname(tmpdir): def test_non_ascii_dbname(tmpdir):
ThreadedConn(str(tmpdir.join('foo\u00e9.db')), True) ThreadedConn(str(tmpdir.join("foo\u00e9.db")), True)
def test_non_ascii_dbdir(tmpdir): def test_non_ascii_dbdir(tmpdir):
# when this test fails, it doesn't fail gracefully, it brings the whole test suite with it. # when this test fails, it doesn't fail gracefully, it brings the whole test suite with it.
dbdir = tmpdir.join('foo\u00e9') dbdir = tmpdir.join("foo\u00e9")
os.mkdir(str(dbdir)) os.mkdir(str(dbdir))
ThreadedConn(str(dbdir.join('foo.db')), True) ThreadedConn(str(dbdir.join("foo.db")), True)

View File

@ -9,6 +9,7 @@
from ..testutil import CallLogger, eq_ from ..testutil import CallLogger, eq_
from ..gui.table import Table, GUITable, Row from ..gui.table import Table, GUITable, Row
class TestRow(Row): class TestRow(Row):
def __init__(self, table, index, is_new=False): def __init__(self, table, index, is_new=False):
Row.__init__(self, table) Row.__init__(self, table)
@ -55,6 +56,7 @@ def table_with_footer():
table.footer = footer table.footer = footer
return table, footer return table, footer
def table_with_header(): def table_with_header():
table = Table() table = Table()
table.append(TestRow(table, 1)) table.append(TestRow(table, 1))
@ -62,24 +64,28 @@ def table_with_header():
table.header = header table.header = header
return table, header return table, header
#--- Tests
# --- Tests
def test_allow_edit_when_attr_is_property_with_fset(): def test_allow_edit_when_attr_is_property_with_fset():
# When a row has a property that has a fset, by default, make that cell editable. # When a row has a property that has a fset, by default, make that cell editable.
class TestRow(Row): class TestRow(Row):
@property @property
def foo(self): def foo(self):
pass pass
@property @property
def bar(self): def bar(self):
pass pass
@bar.setter @bar.setter
def bar(self, value): def bar(self, value):
pass pass
row = TestRow(Table()) row = TestRow(Table())
assert row.can_edit_cell('bar') assert row.can_edit_cell("bar")
assert not row.can_edit_cell('foo') assert not row.can_edit_cell("foo")
assert not row.can_edit_cell('baz') # doesn't exist, can't edit assert not row.can_edit_cell("baz") # doesn't exist, can't edit
def test_can_edit_prop_has_priority_over_fset_checks(): def test_can_edit_prop_has_priority_over_fset_checks():
# When a row has a cen_edit_* property, it's the result of that property that is used, not the # When a row has a cen_edit_* property, it's the result of that property that is used, not the
@ -88,13 +94,16 @@ def test_can_edit_prop_has_priority_over_fset_checks():
@property @property
def bar(self): def bar(self):
pass pass
@bar.setter @bar.setter
def bar(self, value): def bar(self, value):
pass pass
can_edit_bar = False can_edit_bar = False
row = TestRow(Table()) row = TestRow(Table())
assert not row.can_edit_cell('bar') assert not row.can_edit_cell("bar")
def test_in(): def test_in():
# When a table is in a list, doing "in list" with another instance returns false, even if # When a table is in a list, doing "in list" with another instance returns false, even if
@ -103,12 +112,14 @@ def test_in():
some_list = [table] some_list = [table]
assert Table() not in some_list assert Table() not in some_list
def test_footer_del_all(): def test_footer_del_all():
# Removing all rows doesn't crash when doing the footer check. # Removing all rows doesn't crash when doing the footer check.
table, footer = table_with_footer() table, footer = table_with_footer()
del table[:] del table[:]
assert table.footer is None assert table.footer is None
def test_footer_del_row(): def test_footer_del_row():
# Removing the footer row sets it to None # Removing the footer row sets it to None
table, footer = table_with_footer() table, footer = table_with_footer()
@ -116,18 +127,21 @@ def test_footer_del_row():
assert table.footer is None assert table.footer is None
eq_(len(table), 1) eq_(len(table), 1)
def test_footer_is_appened_to_table(): def test_footer_is_appened_to_table():
# A footer is appended at the table's bottom # A footer is appended at the table's bottom
table, footer = table_with_footer() table, footer = table_with_footer()
eq_(len(table), 2) eq_(len(table), 2)
assert table[1] is footer assert table[1] is footer
def test_footer_remove(): def test_footer_remove():
# remove() on footer sets it to None # remove() on footer sets it to None
table, footer = table_with_footer() table, footer = table_with_footer()
table.remove(footer) table.remove(footer)
assert table.footer is None assert table.footer is None
def test_footer_replaces_old_footer(): def test_footer_replaces_old_footer():
table, footer = table_with_footer() table, footer = table_with_footer()
other = Row(table) other = Row(table)
@ -136,18 +150,21 @@ def test_footer_replaces_old_footer():
eq_(len(table), 2) eq_(len(table), 2)
assert table[1] is other assert table[1] is other
def test_footer_rows_and_row_count(): def test_footer_rows_and_row_count():
# rows() and row_count() ignore footer. # rows() and row_count() ignore footer.
table, footer = table_with_footer() table, footer = table_with_footer()
eq_(table.row_count, 1) eq_(table.row_count, 1)
eq_(table.rows, table[:-1]) eq_(table.rows, table[:-1])
def test_footer_setting_to_none_removes_old_one(): def test_footer_setting_to_none_removes_old_one():
table, footer = table_with_footer() table, footer = table_with_footer()
table.footer = None table.footer = None
assert table.footer is None assert table.footer is None
eq_(len(table), 1) eq_(len(table), 1)
def test_footer_stays_there_on_append(): def test_footer_stays_there_on_append():
# Appending another row puts it above the footer # Appending another row puts it above the footer
table, footer = table_with_footer() table, footer = table_with_footer()
@ -155,6 +172,7 @@ def test_footer_stays_there_on_append():
eq_(len(table), 3) eq_(len(table), 3)
assert table[2] is footer assert table[2] is footer
def test_footer_stays_there_on_insert(): def test_footer_stays_there_on_insert():
# Inserting another row puts it above the footer # Inserting another row puts it above the footer
table, footer = table_with_footer() table, footer = table_with_footer()
@ -162,12 +180,14 @@ def test_footer_stays_there_on_insert():
eq_(len(table), 3) eq_(len(table), 3)
assert table[2] is footer assert table[2] is footer
def test_header_del_all(): def test_header_del_all():
# Removing all rows doesn't crash when doing the header check. # Removing all rows doesn't crash when doing the header check.
table, header = table_with_header() table, header = table_with_header()
del table[:] del table[:]
assert table.header is None assert table.header is None
def test_header_del_row(): def test_header_del_row():
# Removing the header row sets it to None # Removing the header row sets it to None
table, header = table_with_header() table, header = table_with_header()
@ -175,18 +195,21 @@ def test_header_del_row():
assert table.header is None assert table.header is None
eq_(len(table), 1) eq_(len(table), 1)
def test_header_is_inserted_in_table(): def test_header_is_inserted_in_table():
# A header is inserted at the table's top # A header is inserted at the table's top
table, header = table_with_header() table, header = table_with_header()
eq_(len(table), 2) eq_(len(table), 2)
assert table[0] is header assert table[0] is header
def test_header_remove(): def test_header_remove():
# remove() on header sets it to None # remove() on header sets it to None
table, header = table_with_header() table, header = table_with_header()
table.remove(header) table.remove(header)
assert table.header is None assert table.header is None
def test_header_replaces_old_header(): def test_header_replaces_old_header():
table, header = table_with_header() table, header = table_with_header()
other = Row(table) other = Row(table)
@ -195,18 +218,21 @@ def test_header_replaces_old_header():
eq_(len(table), 2) eq_(len(table), 2)
assert table[0] is other assert table[0] is other
def test_header_rows_and_row_count(): def test_header_rows_and_row_count():
# rows() and row_count() ignore header. # rows() and row_count() ignore header.
table, header = table_with_header() table, header = table_with_header()
eq_(table.row_count, 1) eq_(table.row_count, 1)
eq_(table.rows, table[1:]) eq_(table.rows, table[1:])
def test_header_setting_to_none_removes_old_one(): def test_header_setting_to_none_removes_old_one():
table, header = table_with_header() table, header = table_with_header()
table.header = None table.header = None
assert table.header is None assert table.header is None
eq_(len(table), 1) eq_(len(table), 1)
def test_header_stays_there_on_insert(): def test_header_stays_there_on_insert():
# Inserting another row at the top puts it below the header # Inserting another row at the top puts it below the header
table, header = table_with_header() table, header = table_with_header()
@ -214,21 +240,24 @@ def test_header_stays_there_on_insert():
eq_(len(table), 3) eq_(len(table), 3)
assert table[0] is header assert table[0] is header
def test_refresh_view_on_refresh(): def test_refresh_view_on_refresh():
# If refresh_view is not False, we refresh the table's view on refresh() # If refresh_view is not False, we refresh the table's view on refresh()
table = TestGUITable(1) table = TestGUITable(1)
table.refresh() table.refresh()
table.view.check_gui_calls(['refresh']) table.view.check_gui_calls(["refresh"])
table.view.clear_calls() table.view.clear_calls()
table.refresh(refresh_view=False) table.refresh(refresh_view=False)
table.view.check_gui_calls([]) table.view.check_gui_calls([])
def test_restore_selection(): def test_restore_selection():
# By default, after a refresh, selection goes on the last row # By default, after a refresh, selection goes on the last row
table = TestGUITable(10) table = TestGUITable(10)
table.refresh() table.refresh()
eq_(table.selected_indexes, [9]) eq_(table.selected_indexes, [9])
def test_restore_selection_after_cancel_edits(): def test_restore_selection_after_cancel_edits():
# _restore_selection() is called after cancel_edits(). Previously, only _update_selection would # _restore_selection() is called after cancel_edits(). Previously, only _update_selection would
# be called. # be called.
@ -242,6 +271,7 @@ def test_restore_selection_after_cancel_edits():
table.cancel_edits() table.cancel_edits()
eq_(table.selected_indexes, [6]) eq_(table.selected_indexes, [6])
def test_restore_selection_with_previous_selection(): def test_restore_selection_with_previous_selection():
# By default, we try to restore the selection that was there before a refresh # By default, we try to restore the selection that was there before a refresh
table = TestGUITable(10) table = TestGUITable(10)
@ -250,6 +280,7 @@ def test_restore_selection_with_previous_selection():
table.refresh() table.refresh()
eq_(table.selected_indexes, [2, 4]) eq_(table.selected_indexes, [2, 4])
def test_restore_selection_custom(): def test_restore_selection_custom():
# After a _fill() called, the virtual _restore_selection() is called so that it's possible for a # After a _fill() called, the virtual _restore_selection() is called so that it's possible for a
# GUITable subclass to customize its post-refresh selection behavior. # GUITable subclass to customize its post-refresh selection behavior.
@ -261,58 +292,64 @@ def test_restore_selection_custom():
table.refresh() table.refresh()
eq_(table.selected_indexes, [6]) eq_(table.selected_indexes, [6])
def test_row_cell_value(): def test_row_cell_value():
# *_cell_value() correctly mangles attrnames that are Python reserved words. # *_cell_value() correctly mangles attrnames that are Python reserved words.
row = Row(Table()) row = Row(Table())
row.from_ = 'foo' row.from_ = "foo"
eq_(row.get_cell_value('from'), 'foo') eq_(row.get_cell_value("from"), "foo")
row.set_cell_value('from', 'bar') row.set_cell_value("from", "bar")
eq_(row.get_cell_value('from'), 'bar') eq_(row.get_cell_value("from"), "bar")
def test_sort_table_also_tries_attributes_without_underscores(): def test_sort_table_also_tries_attributes_without_underscores():
# When determining a sort key, after having unsuccessfully tried the attribute with the, # When determining a sort key, after having unsuccessfully tried the attribute with the,
# underscore, try the one without one. # underscore, try the one without one.
table = Table() table = Table()
row1 = Row(table) row1 = Row(table)
row1._foo = 'a' # underscored attr must be checked first row1._foo = "a" # underscored attr must be checked first
row1.foo = 'b' row1.foo = "b"
row1.bar = 'c' row1.bar = "c"
row2 = Row(table) row2 = Row(table)
row2._foo = 'b' row2._foo = "b"
row2.foo = 'a' row2.foo = "a"
row2.bar = 'b' row2.bar = "b"
table.append(row1) table.append(row1)
table.append(row2) table.append(row2)
table.sort_by('foo') table.sort_by("foo")
assert table[0] is row1 assert table[0] is row1
assert table[1] is row2 assert table[1] is row2
table.sort_by('bar') table.sort_by("bar")
assert table[0] is row2 assert table[0] is row2
assert table[1] is row1 assert table[1] is row1
def test_sort_table_updates_selection(): def test_sort_table_updates_selection():
table = TestGUITable(10) table = TestGUITable(10)
table.refresh() table.refresh()
table.select([2, 4]) table.select([2, 4])
table.sort_by('index', desc=True) table.sort_by("index", desc=True)
# Now, the updated rows should be 7 and 5 # Now, the updated rows should be 7 and 5
eq_(len(table.updated_rows), 2) eq_(len(table.updated_rows), 2)
r1, r2 = table.updated_rows r1, r2 = table.updated_rows
eq_(r1.index, 7) eq_(r1.index, 7)
eq_(r2.index, 5) eq_(r2.index, 5)
def test_sort_table_with_footer(): def test_sort_table_with_footer():
# Sorting a table with a footer keeps it at the bottom # Sorting a table with a footer keeps it at the bottom
table, footer = table_with_footer() table, footer = table_with_footer()
table.sort_by('index', desc=True) table.sort_by("index", desc=True)
assert table[-1] is footer assert table[-1] is footer
def test_sort_table_with_header(): def test_sort_table_with_header():
# Sorting a table with a header keeps it at the top # Sorting a table with a header keeps it at the top
table, header = table_with_header() table, header = table_with_header()
table.sort_by('index', desc=True) table.sort_by("index", desc=True)
assert table[0] is header assert table[0] is header
def test_add_with_view_that_saves_during_refresh(): def test_add_with_view_that_saves_during_refresh():
# Calling save_edits during refresh() called by add() is ignored. # Calling save_edits during refresh() called by add() is ignored.
class TableView(CallLogger): class TableView(CallLogger):
@ -321,5 +358,4 @@ def test_add_with_view_that_saves_during_refresh():
table = TestGUITable(10, viewclass=TableView) table = TestGUITable(10, viewclass=TableView)
table.add() table.add()
assert table.edited is not None # still in edit mode assert table.edited is not None # still in edit mode

View File

@ -1,23 +1,25 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-02-12 # Created On: 2010-02-12
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from ..testutil import eq_ from ..testutil import eq_
from ..gui.tree import Tree, Node from ..gui.tree import Tree, Node
def tree_with_some_nodes(): def tree_with_some_nodes():
t = Tree() t = Tree()
t.append(Node('foo')) t.append(Node("foo"))
t.append(Node('bar')) t.append(Node("bar"))
t.append(Node('baz')) t.append(Node("baz"))
t[0].append(Node('sub1')) t[0].append(Node("sub1"))
t[0].append(Node('sub2')) t[0].append(Node("sub2"))
return t return t
def test_selection(): def test_selection():
t = tree_with_some_nodes() t = tree_with_some_nodes()
assert t.selected_node is None assert t.selected_node is None
@ -25,6 +27,7 @@ def test_selection():
assert t.selected_path is None assert t.selected_path is None
eq_(t.selected_paths, []) eq_(t.selected_paths, [])
def test_select_one_node(): def test_select_one_node():
t = tree_with_some_nodes() t = tree_with_some_nodes()
t.selected_node = t[0][0] t.selected_node = t[0][0]
@ -33,33 +36,39 @@ def test_select_one_node():
eq_(t.selected_path, [0, 0]) eq_(t.selected_path, [0, 0])
eq_(t.selected_paths, [[0, 0]]) eq_(t.selected_paths, [[0, 0]])
def test_select_one_path(): def test_select_one_path():
t = tree_with_some_nodes() t = tree_with_some_nodes()
t.selected_path = [0, 1] t.selected_path = [0, 1]
assert t.selected_node is t[0][1] assert t.selected_node is t[0][1]
def test_select_multiple_nodes(): def test_select_multiple_nodes():
t = tree_with_some_nodes() t = tree_with_some_nodes()
t.selected_nodes = [t[0], t[1]] t.selected_nodes = [t[0], t[1]]
eq_(t.selected_paths, [[0], [1]]) eq_(t.selected_paths, [[0], [1]])
def test_select_multiple_paths(): def test_select_multiple_paths():
t = tree_with_some_nodes() t = tree_with_some_nodes()
t.selected_paths = [[0], [1]] t.selected_paths = [[0], [1]]
eq_(t.selected_nodes, [t[0], t[1]]) eq_(t.selected_nodes, [t[0], t[1]])
def test_select_none_path(): def test_select_none_path():
# setting selected_path to None clears the selection # setting selected_path to None clears the selection
t = Tree() t = Tree()
t.selected_path = None t.selected_path = None
assert t.selected_path is None assert t.selected_path is None
def test_select_none_node(): def test_select_none_node():
# setting selected_node to None clears the selection # setting selected_node to None clears the selection
t = Tree() t = Tree()
t.selected_node = None t.selected_node = None
eq_(t.selected_nodes, []) eq_(t.selected_nodes, [])
def test_clear_removes_selection(): def test_clear_removes_selection():
# When clearing a tree, we want to clear the selection as well or else we end up with a crash # When clearing a tree, we want to clear the selection as well or else we end up with a crash
# when calling selected_paths. # when calling selected_paths.
@ -68,15 +77,16 @@ def test_clear_removes_selection():
t.clear() t.clear()
assert t.selected_node is None assert t.selected_node is None
def test_selection_override(): def test_selection_override():
# All selection changed pass through the _select_node() method so it's easy for subclasses to # All selection changed pass through the _select_node() method so it's easy for subclasses to
# customize the tree's behavior. # customize the tree's behavior.
class MyTree(Tree): class MyTree(Tree):
called = False called = False
def _select_nodes(self, nodes): def _select_nodes(self, nodes):
self.called = True self.called = True
t = MyTree() t = MyTree()
t.selected_paths = [] t.selected_paths = []
assert t.called assert t.called
@ -84,26 +94,32 @@ def test_selection_override():
t.selected_node = None t.selected_node = None
assert t.called assert t.called
def test_findall(): def test_findall():
t = tree_with_some_nodes() t = tree_with_some_nodes()
r = t.findall(lambda n: n.name.startswith('sub')) r = t.findall(lambda n: n.name.startswith("sub"))
eq_(set(r), set([t[0][0], t[0][1]])) eq_(set(r), set([t[0][0], t[0][1]]))
def test_findall_dont_include_self(): def test_findall_dont_include_self():
# When calling findall with include_self=False, the node itself is never evaluated. # When calling findall with include_self=False, the node itself is never evaluated.
t = tree_with_some_nodes() t = tree_with_some_nodes()
del t._name # so that if the predicate is called on `t`, we crash del t._name # so that if the predicate is called on `t`, we crash
r = t.findall(lambda n: not n.name.startswith('sub'), include_self=False) # no crash r = t.findall(
lambda n: not n.name.startswith("sub"), include_self=False
) # no crash
eq_(set(r), set([t[0], t[1], t[2]])) eq_(set(r), set([t[0], t[1], t[2]]))
def test_find_dont_include_self(): def test_find_dont_include_self():
# When calling find with include_self=False, the node itself is never evaluated. # When calling find with include_self=False, the node itself is never evaluated.
t = tree_with_some_nodes() t = tree_with_some_nodes()
del t._name # so that if the predicate is called on `t`, we crash del t._name # so that if the predicate is called on `t`, we crash
r = t.find(lambda n: not n.name.startswith('sub'), include_self=False) # no crash r = t.find(lambda n: not n.name.startswith("sub"), include_self=False) # no crash
assert r is t[0] assert r is t[0]
def test_find_none(): def test_find_none():
# when find() yields no result, return None # when find() yields no result, return None
t = Tree() t = Tree()
assert t.find(lambda n: False) is None # no StopIteration exception assert t.find(lambda n: False) is None # no StopIteration exception

View File

@ -14,43 +14,53 @@ from ..testutil import eq_
from ..path import Path from ..path import Path
from ..util import * from ..util import *
def test_nonone(): def test_nonone():
eq_('foo', nonone('foo', 'bar')) eq_("foo", nonone("foo", "bar"))
eq_('bar', nonone(None, 'bar')) eq_("bar", nonone(None, "bar"))
def test_tryint(): def test_tryint():
eq_(42,tryint('42')) eq_(42, tryint("42"))
eq_(0,tryint('abc')) eq_(0, tryint("abc"))
eq_(0,tryint(None)) eq_(0, tryint(None))
eq_(42,tryint(None, 42)) eq_(42, tryint(None, 42))
def test_minmax(): def test_minmax():
eq_(minmax(2, 1, 3), 2) eq_(minmax(2, 1, 3), 2)
eq_(minmax(0, 1, 3), 1) eq_(minmax(0, 1, 3), 1)
eq_(minmax(4, 1, 3), 3) eq_(minmax(4, 1, 3), 3)
#--- Sequence
# --- Sequence
def test_first(): def test_first():
eq_(first([3, 2, 1]), 3) eq_(first([3, 2, 1]), 3)
eq_(first(i for i in [3, 2, 1] if i < 3), 2) eq_(first(i for i in [3, 2, 1] if i < 3), 2)
def test_flatten(): def test_flatten():
eq_([1,2,3,4],flatten([[1,2],[3,4]])) eq_([1, 2, 3, 4], flatten([[1, 2], [3, 4]]))
eq_([],flatten([])) eq_([], flatten([]))
def test_dedupe(): def test_dedupe():
reflist = [0,7,1,2,3,4,4,5,6,7,1,2,3] reflist = [0, 7, 1, 2, 3, 4, 4, 5, 6, 7, 1, 2, 3]
eq_(dedupe(reflist),[0,7,1,2,3,4,5,6]) eq_(dedupe(reflist), [0, 7, 1, 2, 3, 4, 5, 6])
def test_stripfalse(): def test_stripfalse():
eq_([1, 2, 3], stripfalse([None, 0, 1, 2, 3, None])) eq_([1, 2, 3], stripfalse([None, 0, 1, 2, 3, None]))
def test_extract(): def test_extract():
wheat, shaft = extract(lambda n: n % 2 == 0, list(range(10))) wheat, shaft = extract(lambda n: n % 2 == 0, list(range(10)))
eq_(wheat, [0, 2, 4, 6, 8]) eq_(wheat, [0, 2, 4, 6, 8])
eq_(shaft, [1, 3, 5, 7, 9]) eq_(shaft, [1, 3, 5, 7, 9])
def test_allsame(): def test_allsame():
assert allsame([42, 42, 42]) assert allsame([42, 42, 42])
assert not allsame([42, 43, 42]) assert not allsame([42, 43, 42])
@ -58,25 +68,32 @@ def test_allsame():
# Works on non-sequence as well # Works on non-sequence as well
assert allsame(iter([42, 42, 42])) assert allsame(iter([42, 42, 42]))
def test_trailiter(): def test_trailiter():
eq_(list(trailiter([])), []) eq_(list(trailiter([])), [])
eq_(list(trailiter(['foo'])), [(None, 'foo')]) eq_(list(trailiter(["foo"])), [(None, "foo")])
eq_(list(trailiter(['foo', 'bar'])), [(None, 'foo'), ('foo', 'bar')]) eq_(list(trailiter(["foo", "bar"])), [(None, "foo"), ("foo", "bar")])
eq_(list(trailiter(['foo', 'bar'], skipfirst=True)), [('foo', 'bar')]) eq_(list(trailiter(["foo", "bar"], skipfirst=True)), [("foo", "bar")])
eq_(list(trailiter([], skipfirst=True)), []) # no crash eq_(list(trailiter([], skipfirst=True)), []) # no crash
def test_iterconsume(): def test_iterconsume():
# We just want to make sure that we return *all* items and that we're not mistakenly skipping # We just want to make sure that we return *all* items and that we're not mistakenly skipping
# one. # one.
eq_(list(range(2500)), list(iterconsume(list(range(2500))))) eq_(list(range(2500)), list(iterconsume(list(range(2500)))))
eq_(list(reversed(range(2500))), list(iterconsume(list(range(2500)), reverse=False))) eq_(
list(reversed(range(2500))), list(iterconsume(list(range(2500)), reverse=False))
)
# --- String
#--- String
def test_escape(): def test_escape():
eq_('f\\o\\ob\\ar', escape('foobar', 'oa')) eq_("f\\o\\ob\\ar", escape("foobar", "oa"))
eq_('f*o*ob*ar', escape('foobar', 'oa', '*')) eq_("f*o*ob*ar", escape("foobar", "oa", "*"))
eq_('f*o*ob*ar', escape('foobar', set('oa'), '*')) eq_("f*o*ob*ar", escape("foobar", set("oa"), "*"))
def test_get_file_ext(): def test_get_file_ext():
eq_(get_file_ext("foobar"), "") eq_(get_file_ext("foobar"), "")
@ -84,146 +101,155 @@ def test_get_file_ext():
eq_(get_file_ext("foobar."), "") eq_(get_file_ext("foobar."), "")
eq_(get_file_ext(".foobar"), "foobar") eq_(get_file_ext(".foobar"), "foobar")
def test_rem_file_ext(): def test_rem_file_ext():
eq_(rem_file_ext("foobar"), "foobar") eq_(rem_file_ext("foobar"), "foobar")
eq_(rem_file_ext("foo.bar"), "foo") eq_(rem_file_ext("foo.bar"), "foo")
eq_(rem_file_ext("foobar."), "foobar") eq_(rem_file_ext("foobar."), "foobar")
eq_(rem_file_ext(".foobar"), "") eq_(rem_file_ext(".foobar"), "")
def test_pluralize(): def test_pluralize():
eq_('0 song', pluralize(0,'song')) eq_("0 song", pluralize(0, "song"))
eq_('1 song', pluralize(1,'song')) eq_("1 song", pluralize(1, "song"))
eq_('2 songs', pluralize(2,'song')) eq_("2 songs", pluralize(2, "song"))
eq_('1 song', pluralize(1.1,'song')) eq_("1 song", pluralize(1.1, "song"))
eq_('2 songs', pluralize(1.5,'song')) eq_("2 songs", pluralize(1.5, "song"))
eq_('1.1 songs', pluralize(1.1,'song',1)) eq_("1.1 songs", pluralize(1.1, "song", 1))
eq_('1.5 songs', pluralize(1.5,'song',1)) eq_("1.5 songs", pluralize(1.5, "song", 1))
eq_('2 entries', pluralize(2,'entry', plural_word='entries')) eq_("2 entries", pluralize(2, "entry", plural_word="entries"))
def test_format_time(): def test_format_time():
eq_(format_time(0),'00:00:00') eq_(format_time(0), "00:00:00")
eq_(format_time(1),'00:00:01') eq_(format_time(1), "00:00:01")
eq_(format_time(23),'00:00:23') eq_(format_time(23), "00:00:23")
eq_(format_time(60),'00:01:00') eq_(format_time(60), "00:01:00")
eq_(format_time(101),'00:01:41') eq_(format_time(101), "00:01:41")
eq_(format_time(683),'00:11:23') eq_(format_time(683), "00:11:23")
eq_(format_time(3600),'01:00:00') eq_(format_time(3600), "01:00:00")
eq_(format_time(3754),'01:02:34') eq_(format_time(3754), "01:02:34")
eq_(format_time(36000),'10:00:00') eq_(format_time(36000), "10:00:00")
eq_(format_time(366666),'101:51:06') eq_(format_time(366666), "101:51:06")
eq_(format_time(0, with_hours=False),'00:00') eq_(format_time(0, with_hours=False), "00:00")
eq_(format_time(1, with_hours=False),'00:01') eq_(format_time(1, with_hours=False), "00:01")
eq_(format_time(23, with_hours=False),'00:23') eq_(format_time(23, with_hours=False), "00:23")
eq_(format_time(60, with_hours=False),'01:00') eq_(format_time(60, with_hours=False), "01:00")
eq_(format_time(101, with_hours=False),'01:41') eq_(format_time(101, with_hours=False), "01:41")
eq_(format_time(683, with_hours=False),'11:23') eq_(format_time(683, with_hours=False), "11:23")
eq_(format_time(3600, with_hours=False),'60:00') eq_(format_time(3600, with_hours=False), "60:00")
eq_(format_time(6036, with_hours=False),'100:36') eq_(format_time(6036, with_hours=False), "100:36")
eq_(format_time(60360, with_hours=False),'1006:00') eq_(format_time(60360, with_hours=False), "1006:00")
def test_format_time_decimal(): def test_format_time_decimal():
eq_(format_time_decimal(0), '0.0 second') eq_(format_time_decimal(0), "0.0 second")
eq_(format_time_decimal(1), '1.0 second') eq_(format_time_decimal(1), "1.0 second")
eq_(format_time_decimal(23), '23.0 seconds') eq_(format_time_decimal(23), "23.0 seconds")
eq_(format_time_decimal(60), '1.0 minute') eq_(format_time_decimal(60), "1.0 minute")
eq_(format_time_decimal(101), '1.7 minutes') eq_(format_time_decimal(101), "1.7 minutes")
eq_(format_time_decimal(683), '11.4 minutes') eq_(format_time_decimal(683), "11.4 minutes")
eq_(format_time_decimal(3600), '1.0 hour') eq_(format_time_decimal(3600), "1.0 hour")
eq_(format_time_decimal(6036), '1.7 hours') eq_(format_time_decimal(6036), "1.7 hours")
eq_(format_time_decimal(86400), '1.0 day') eq_(format_time_decimal(86400), "1.0 day")
eq_(format_time_decimal(160360), '1.9 days') eq_(format_time_decimal(160360), "1.9 days")
def test_format_size(): def test_format_size():
eq_(format_size(1024), '1 KB') eq_(format_size(1024), "1 KB")
eq_(format_size(1024,2), '1.00 KB') eq_(format_size(1024, 2), "1.00 KB")
eq_(format_size(1024,0,2), '1 MB') eq_(format_size(1024, 0, 2), "1 MB")
eq_(format_size(1024,2,2), '0.01 MB') eq_(format_size(1024, 2, 2), "0.01 MB")
eq_(format_size(1024,3,2), '0.001 MB') eq_(format_size(1024, 3, 2), "0.001 MB")
eq_(format_size(1024,3,2,False), '0.001') eq_(format_size(1024, 3, 2, False), "0.001")
eq_(format_size(1023), '1023 B') eq_(format_size(1023), "1023 B")
eq_(format_size(1023,0,1), '1 KB') eq_(format_size(1023, 0, 1), "1 KB")
eq_(format_size(511,0,1), '1 KB') eq_(format_size(511, 0, 1), "1 KB")
eq_(format_size(9), '9 B') eq_(format_size(9), "9 B")
eq_(format_size(99), '99 B') eq_(format_size(99), "99 B")
eq_(format_size(999), '999 B') eq_(format_size(999), "999 B")
eq_(format_size(9999), '10 KB') eq_(format_size(9999), "10 KB")
eq_(format_size(99999), '98 KB') eq_(format_size(99999), "98 KB")
eq_(format_size(999999), '977 KB') eq_(format_size(999999), "977 KB")
eq_(format_size(9999999), '10 MB') eq_(format_size(9999999), "10 MB")
eq_(format_size(99999999), '96 MB') eq_(format_size(99999999), "96 MB")
eq_(format_size(999999999), '954 MB') eq_(format_size(999999999), "954 MB")
eq_(format_size(9999999999), '10 GB') eq_(format_size(9999999999), "10 GB")
eq_(format_size(99999999999), '94 GB') eq_(format_size(99999999999), "94 GB")
eq_(format_size(999999999999), '932 GB') eq_(format_size(999999999999), "932 GB")
eq_(format_size(9999999999999), '10 TB') eq_(format_size(9999999999999), "10 TB")
eq_(format_size(99999999999999), '91 TB') eq_(format_size(99999999999999), "91 TB")
eq_(format_size(999999999999999), '910 TB') eq_(format_size(999999999999999), "910 TB")
eq_(format_size(9999999999999999), '9 PB') eq_(format_size(9999999999999999), "9 PB")
eq_(format_size(99999999999999999), '89 PB') eq_(format_size(99999999999999999), "89 PB")
eq_(format_size(999999999999999999), '889 PB') eq_(format_size(999999999999999999), "889 PB")
eq_(format_size(9999999999999999999), '9 EB') eq_(format_size(9999999999999999999), "9 EB")
eq_(format_size(99999999999999999999), '87 EB') eq_(format_size(99999999999999999999), "87 EB")
eq_(format_size(999999999999999999999), '868 EB') eq_(format_size(999999999999999999999), "868 EB")
eq_(format_size(9999999999999999999999), '9 ZB') eq_(format_size(9999999999999999999999), "9 ZB")
eq_(format_size(99999999999999999999999), '85 ZB') eq_(format_size(99999999999999999999999), "85 ZB")
eq_(format_size(999999999999999999999999), '848 ZB') eq_(format_size(999999999999999999999999), "848 ZB")
def test_remove_invalid_xml(): def test_remove_invalid_xml():
eq_(remove_invalid_xml('foo\0bar\x0bbaz'), 'foo bar baz') eq_(remove_invalid_xml("foo\0bar\x0bbaz"), "foo bar baz")
# surrogate blocks have to be replaced, but not the rest # surrogate blocks have to be replaced, but not the rest
eq_(remove_invalid_xml('foo\ud800bar\udfffbaz\ue000'), 'foo bar baz\ue000') eq_(remove_invalid_xml("foo\ud800bar\udfffbaz\ue000"), "foo bar baz\ue000")
# replace with something else # replace with something else
eq_(remove_invalid_xml('foo\0baz', replace_with='bar'), 'foobarbaz') eq_(remove_invalid_xml("foo\0baz", replace_with="bar"), "foobarbaz")
def test_multi_replace(): def test_multi_replace():
eq_('136',multi_replace('123456',('2','45'))) eq_("136", multi_replace("123456", ("2", "45")))
eq_('1 3 6',multi_replace('123456',('2','45'),' ')) eq_("1 3 6", multi_replace("123456", ("2", "45"), " "))
eq_('1 3 6',multi_replace('123456','245',' ')) eq_("1 3 6", multi_replace("123456", "245", " "))
eq_('173896',multi_replace('123456','245','789')) eq_("173896", multi_replace("123456", "245", "789"))
eq_('173896',multi_replace('123456','245',('7','8','9'))) eq_("173896", multi_replace("123456", "245", ("7", "8", "9")))
eq_('17386',multi_replace('123456',('2','45'),'78')) eq_("17386", multi_replace("123456", ("2", "45"), "78"))
eq_('17386',multi_replace('123456',('2','45'),('7','8'))) eq_("17386", multi_replace("123456", ("2", "45"), ("7", "8")))
with raises(ValueError): with raises(ValueError):
multi_replace('123456',('2','45'),('7','8','9')) multi_replace("123456", ("2", "45"), ("7", "8", "9"))
eq_('17346',multi_replace('12346',('2','45'),'78')) eq_("17346", multi_replace("12346", ("2", "45"), "78"))
# --- Files
#--- Files
class TestCase_modified_after: class TestCase_modified_after:
def test_first_is_modified_after(self, monkeyplus): def test_first_is_modified_after(self, monkeyplus):
monkeyplus.patch_osstat('first', st_mtime=42) monkeyplus.patch_osstat("first", st_mtime=42)
monkeyplus.patch_osstat('second', st_mtime=41) monkeyplus.patch_osstat("second", st_mtime=41)
assert modified_after('first', 'second') assert modified_after("first", "second")
def test_second_is_modified_after(self, monkeyplus): def test_second_is_modified_after(self, monkeyplus):
monkeyplus.patch_osstat('first', st_mtime=42) monkeyplus.patch_osstat("first", st_mtime=42)
monkeyplus.patch_osstat('second', st_mtime=43) monkeyplus.patch_osstat("second", st_mtime=43)
assert not modified_after('first', 'second') assert not modified_after("first", "second")
def test_same_mtime(self, monkeyplus): def test_same_mtime(self, monkeyplus):
monkeyplus.patch_osstat('first', st_mtime=42) monkeyplus.patch_osstat("first", st_mtime=42)
monkeyplus.patch_osstat('second', st_mtime=42) monkeyplus.patch_osstat("second", st_mtime=42)
assert not modified_after('first', 'second') assert not modified_after("first", "second")
def test_first_file_does_not_exist(self, monkeyplus): def test_first_file_does_not_exist(self, monkeyplus):
# when the first file doesn't exist, we return False # when the first file doesn't exist, we return False
monkeyplus.patch_osstat('second', st_mtime=42) monkeyplus.patch_osstat("second", st_mtime=42)
assert not modified_after('does_not_exist', 'second') # no crash assert not modified_after("does_not_exist", "second") # no crash
def test_second_file_does_not_exist(self, monkeyplus): def test_second_file_does_not_exist(self, monkeyplus):
# when the second file doesn't exist, we return True # when the second file doesn't exist, we return True
monkeyplus.patch_osstat('first', st_mtime=42) monkeyplus.patch_osstat("first", st_mtime=42)
assert modified_after('first', 'does_not_exist') # no crash assert modified_after("first", "does_not_exist") # no crash
def test_first_file_is_none(self, monkeyplus): def test_first_file_is_none(self, monkeyplus):
# when the first file is None, we return False # when the first file is None, we return False
monkeyplus.patch_osstat('second', st_mtime=42) monkeyplus.patch_osstat("second", st_mtime=42)
assert not modified_after(None, 'second') # no crash assert not modified_after(None, "second") # no crash
def test_second_file_is_none(self, monkeyplus): def test_second_file_is_none(self, monkeyplus):
# when the second file is None, we return True # when the second file is None, we return True
monkeyplus.patch_osstat('first', st_mtime=42) monkeyplus.patch_osstat("first", st_mtime=42)
assert modified_after('first', None) # no crash assert modified_after("first", None) # no crash
class TestCase_delete_if_empty: class TestCase_delete_if_empty:
@ -234,92 +260,91 @@ class TestCase_delete_if_empty:
def test_not_empty(self, tmpdir): def test_not_empty(self, tmpdir):
testpath = Path(str(tmpdir)) testpath = Path(str(tmpdir))
testpath['foo'].mkdir() testpath["foo"].mkdir()
assert not delete_if_empty(testpath) assert not delete_if_empty(testpath)
assert testpath.exists() assert testpath.exists()
def test_with_files_to_delete(self, tmpdir): def test_with_files_to_delete(self, tmpdir):
testpath = Path(str(tmpdir)) testpath = Path(str(tmpdir))
testpath['foo'].open('w') testpath["foo"].open("w")
testpath['bar'].open('w') testpath["bar"].open("w")
assert delete_if_empty(testpath, ['foo', 'bar']) assert delete_if_empty(testpath, ["foo", "bar"])
assert not testpath.exists() assert not testpath.exists()
def test_directory_in_files_to_delete(self, tmpdir): def test_directory_in_files_to_delete(self, tmpdir):
testpath = Path(str(tmpdir)) testpath = Path(str(tmpdir))
testpath['foo'].mkdir() testpath["foo"].mkdir()
assert not delete_if_empty(testpath, ['foo']) assert not delete_if_empty(testpath, ["foo"])
assert testpath.exists() assert testpath.exists()
def test_delete_files_to_delete_only_if_dir_is_empty(self, tmpdir): def test_delete_files_to_delete_only_if_dir_is_empty(self, tmpdir):
testpath = Path(str(tmpdir)) testpath = Path(str(tmpdir))
testpath['foo'].open('w') testpath["foo"].open("w")
testpath['bar'].open('w') testpath["bar"].open("w")
assert not delete_if_empty(testpath, ['foo']) assert not delete_if_empty(testpath, ["foo"])
assert testpath.exists() assert testpath.exists()
assert testpath['foo'].exists() assert testpath["foo"].exists()
def test_doesnt_exist(self): def test_doesnt_exist(self):
# When the 'path' doesn't exist, just do nothing. # When the 'path' doesn't exist, just do nothing.
delete_if_empty(Path('does_not_exist')) # no crash delete_if_empty(Path("does_not_exist")) # no crash
def test_is_file(self, tmpdir): def test_is_file(self, tmpdir):
# When 'path' is a file, do nothing. # When 'path' is a file, do nothing.
p = Path(str(tmpdir)) + 'filename' p = Path(str(tmpdir)) + "filename"
p.open('w').close() p.open("w").close()
delete_if_empty(p) # no crash delete_if_empty(p) # no crash
def test_ioerror(self, tmpdir, monkeypatch): def test_ioerror(self, tmpdir, monkeypatch):
# if an IO error happens during the operation, ignore it. # if an IO error happens during the operation, ignore it.
def do_raise(*args, **kw): def do_raise(*args, **kw):
raise OSError() raise OSError()
monkeypatch.setattr(Path, 'rmdir', do_raise) monkeypatch.setattr(Path, "rmdir", do_raise)
delete_if_empty(Path(str(tmpdir))) # no crash delete_if_empty(Path(str(tmpdir))) # no crash
class TestCase_open_if_filename: class TestCase_open_if_filename:
def test_file_name(self, tmpdir): def test_file_name(self, tmpdir):
filepath = str(tmpdir.join('test.txt')) filepath = str(tmpdir.join("test.txt"))
open(filepath, 'wb').write(b'test_data') open(filepath, "wb").write(b"test_data")
file, close = open_if_filename(filepath) file, close = open_if_filename(filepath)
assert close assert close
eq_(b'test_data', file.read()) eq_(b"test_data", file.read())
file.close() file.close()
def test_opened_file(self): def test_opened_file(self):
sio = StringIO() sio = StringIO()
sio.write('test_data') sio.write("test_data")
sio.seek(0) sio.seek(0)
file, close = open_if_filename(sio) file, close = open_if_filename(sio)
assert not close assert not close
eq_('test_data', file.read()) eq_("test_data", file.read())
def test_mode_is_passed_to_open(self, tmpdir): def test_mode_is_passed_to_open(self, tmpdir):
filepath = str(tmpdir.join('test.txt')) filepath = str(tmpdir.join("test.txt"))
open(filepath, 'w').close() open(filepath, "w").close()
file, close = open_if_filename(filepath, 'a') file, close = open_if_filename(filepath, "a")
eq_('a', file.mode) eq_("a", file.mode)
file.close() file.close()
class TestCase_FileOrPath: class TestCase_FileOrPath:
def test_path(self, tmpdir): def test_path(self, tmpdir):
filepath = str(tmpdir.join('test.txt')) filepath = str(tmpdir.join("test.txt"))
open(filepath, 'wb').write(b'test_data') open(filepath, "wb").write(b"test_data")
with FileOrPath(filepath) as fp: with FileOrPath(filepath) as fp:
eq_(b'test_data', fp.read()) eq_(b"test_data", fp.read())
def test_opened_file(self): def test_opened_file(self):
sio = StringIO() sio = StringIO()
sio.write('test_data') sio.write("test_data")
sio.seek(0) sio.seek(0)
with FileOrPath(sio) as fp: with FileOrPath(sio) as fp:
eq_('test_data', fp.read()) eq_("test_data", fp.read())
def test_mode_is_passed_to_open(self, tmpdir): def test_mode_is_passed_to_open(self, tmpdir):
filepath = str(tmpdir.join('test.txt')) filepath = str(tmpdir.join("test.txt"))
open(filepath, 'w').close() open(filepath, "w").close()
with FileOrPath(filepath, 'a') as fp: with FileOrPath(filepath, "a") as fp:
eq_('a', fp.mode) eq_("a", fp.mode)

View File

@ -9,10 +9,12 @@
import threading import threading
import py.path import py.path
def eq_(a, b, msg=None): def eq_(a, b, msg=None):
__tracebackhide__ = True __tracebackhide__ = True
assert a == b, msg or "%r != %r" % (a, b) assert a == b, msg or "%r != %r" % (a, b)
def eq_sorted(a, b, msg=None): def eq_sorted(a, b, msg=None):
"""If both a and b are iterable sort them and compare using eq_, otherwise just pass them through to eq_ anyway.""" """If both a and b are iterable sort them and compare using eq_, otherwise just pass them through to eq_ anyway."""
try: try:
@ -20,10 +22,12 @@ def eq_sorted(a, b, msg=None):
except TypeError: except TypeError:
eq_(a, b, msg) eq_(a, b, msg)
def assert_almost_equal(a, b, places=7): def assert_almost_equal(a, b, places=7):
__tracebackhide__ = True __tracebackhide__ = True
assert round(a, ndigits=places) == round(b, ndigits=places) assert round(a, ndigits=places) == round(b, ndigits=places)
def callcounter(): def callcounter():
def f(*args, **kwargs): def f(*args, **kwargs):
f.callcount += 1 f.callcount += 1
@ -31,6 +35,7 @@ def callcounter():
f.callcount = 0 f.callcount = 0
return f return f
class TestData: class TestData:
def __init__(self, datadirpath): def __init__(self, datadirpath):
self.datadirpath = py.path.local(datadirpath) self.datadirpath = py.path.local(datadirpath)
@ -53,12 +58,14 @@ class CallLogger:
It is used to simulate the GUI layer. It is used to simulate the GUI layer.
""" """
def __init__(self): def __init__(self):
self.calls = [] self.calls = []
def __getattr__(self, func_name): def __getattr__(self, func_name):
def func(*args, **kw): def func(*args, **kw):
self.calls.append(func_name) self.calls.append(func_name)
return func return func
def clear_calls(self): def clear_calls(self):
@ -77,7 +84,9 @@ class CallLogger:
eq_(set(self.calls), set(expected)) eq_(set(self.calls), set(expected))
self.clear_calls() self.clear_calls()
def check_gui_calls_partial(self, expected=None, not_expected=None, verify_order=False): def check_gui_calls_partial(
self, expected=None, not_expected=None, verify_order=False
):
"""Checks that the expected calls have been made to 'self', then clears the log. """Checks that the expected calls have been made to 'self', then clears the log.
`expected` is an iterable of strings representing method names. Order doesn't matter. `expected` is an iterable of strings representing method names. Order doesn't matter.
@ -88,17 +97,25 @@ class CallLogger:
__tracebackhide__ = True __tracebackhide__ = True
if expected is not None: if expected is not None:
not_called = set(expected) - set(self.calls) not_called = set(expected) - set(self.calls)
assert not not_called, "These calls haven't been made: {0}".format(not_called) assert not not_called, "These calls haven't been made: {0}".format(
not_called
)
if verify_order: if verify_order:
max_index = 0 max_index = 0
for call in expected: for call in expected:
index = self.calls.index(call) index = self.calls.index(call)
if index < max_index: if index < max_index:
raise AssertionError("The call {0} hasn't been made in the correct order".format(call)) raise AssertionError(
"The call {0} hasn't been made in the correct order".format(
call
)
)
max_index = index max_index = index
if not_expected is not None: if not_expected is not None:
called = set(not_expected) & set(self.calls) called = set(not_expected) & set(self.calls)
assert not called, "These calls shouldn't have been made: {0}".format(called) assert not called, "These calls shouldn't have been made: {0}".format(
called
)
self.clear_calls() self.clear_calls()
@ -124,7 +141,7 @@ class TestApp:
parent = self.default_parent parent = self.default_parent
if holder is None: if holder is None:
holder = self holder = self
setattr(holder, '{0}_gui'.format(name), view) setattr(holder, "{0}_gui".format(name), view)
gui = class_(parent) gui = class_(parent)
gui.view = view gui.view = view
setattr(holder, name, gui) setattr(holder, name, gui)
@ -136,38 +153,44 @@ def with_app(setupfunc):
def decorator(func): def decorator(func):
func.setupfunc = setupfunc func.setupfunc = setupfunc
return func return func
return decorator return decorator
def pytest_funcarg__app(request): def pytest_funcarg__app(request):
setupfunc = request.function.setupfunc setupfunc = request.function.setupfunc
if hasattr(setupfunc, '__code__'): if hasattr(setupfunc, "__code__"):
argnames = setupfunc.__code__.co_varnames[:setupfunc.__code__.co_argcount] argnames = setupfunc.__code__.co_varnames[: setupfunc.__code__.co_argcount]
def getarg(name): def getarg(name):
if name == 'self': if name == "self":
return request.function.__self__ return request.function.__self__
else: else:
return request.getfixturevalue(name) return request.getfixturevalue(name)
args = [getarg(argname) for argname in argnames] args = [getarg(argname) for argname in argnames]
else: else:
args = [] args = []
app = setupfunc(*args) app = setupfunc(*args)
return app return app
def jointhreads(): def jointhreads():
"""Join all threads to the main thread""" """Join all threads to the main thread"""
for thread in threading.enumerate(): for thread in threading.enumerate():
if hasattr(thread, 'BUGGY'): if hasattr(thread, "BUGGY"):
continue continue
if thread.getName() != 'MainThread' and thread.isAlive(): if thread.getName() != "MainThread" and thread.isAlive():
if hasattr(thread, 'close'): if hasattr(thread, "close"):
thread.close() thread.close()
thread.join(1) thread.join(1)
if thread.isAlive(): if thread.isAlive():
print("Thread problem. Some thread doesn't want to stop.") print("Thread problem. Some thread doesn't want to stop.")
thread.BUGGY = True thread.BUGGY = True
def _unify_args(func, args, kwargs, args_to_ignore=None): def _unify_args(func, args, kwargs, args_to_ignore=None):
''' Unify args and kwargs in the same dictionary. """ Unify args and kwargs in the same dictionary.
The result is kwargs with args added to it. func.func_code.co_varnames is used to determine The result is kwargs with args added to it. func.func_code.co_varnames is used to determine
under what key each elements of arg will be mapped in kwargs. under what key each elements of arg will be mapped in kwargs.
@ -181,36 +204,40 @@ def _unify_args(func, args, kwargs, args_to_ignore=None):
def foo(bar, baz) def foo(bar, baz)
_unifyArgs(foo, (42,), {'baz': 23}) --> {'bar': 42, 'baz': 23} _unifyArgs(foo, (42,), {'baz': 23}) --> {'bar': 42, 'baz': 23}
_unifyArgs(foo, (42,), {'baz': 23}, ['bar']) --> {'baz': 23} _unifyArgs(foo, (42,), {'baz': 23}, ['bar']) --> {'baz': 23}
''' """
result = kwargs.copy() result = kwargs.copy()
if hasattr(func, '__code__'): # built-in functions don't have func_code if hasattr(func, "__code__"): # built-in functions don't have func_code
args = list(args) args = list(args)
if getattr(func, '__self__', None) is not None: # bound method, we have to add self to args list if (
getattr(func, "__self__", None) is not None
): # bound method, we have to add self to args list
args = [func.__self__] + args args = [func.__self__] + args
defaults = list(func.__defaults__) if func.__defaults__ is not None else [] defaults = list(func.__defaults__) if func.__defaults__ is not None else []
arg_count = func.__code__.co_argcount arg_count = func.__code__.co_argcount
arg_names = list(func.__code__.co_varnames) arg_names = list(func.__code__.co_varnames)
if len(args) < arg_count: # We have default values if len(args) < arg_count: # We have default values
required_arg_count = arg_count - len(args) required_arg_count = arg_count - len(args)
args = args + defaults[-required_arg_count:] args = args + defaults[-required_arg_count:]
for arg_name, arg in zip(arg_names, args): for arg_name, arg in zip(arg_names, args):
# setdefault is used because if the arg is already in kwargs, we don't want to use default values # setdefault is used because if the arg is already in kwargs, we don't want to use default values
result.setdefault(arg_name, arg) result.setdefault(arg_name, arg)
else: else:
#'func' has a *args argument # 'func' has a *args argument
result['args'] = args result["args"] = args
if args_to_ignore: if args_to_ignore:
for kw in args_to_ignore: for kw in args_to_ignore:
del result[kw] del result[kw]
return result return result
def log_calls(func): def log_calls(func):
''' Logs all func calls' arguments under func.calls. """ Logs all func calls' arguments under func.calls.
func.calls is a list of _unify_args() result (dict). func.calls is a list of _unify_args() result (dict).
Mostly used for unit testing. Mostly used for unit testing.
''' """
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
unifiedArgs = _unify_args(func, args, kwargs) unifiedArgs = _unify_args(func, args, kwargs)
wrapper.calls.append(unifiedArgs) wrapper.calls.append(unifiedArgs)
@ -218,4 +245,3 @@ def log_calls(func):
wrapper.calls = [] wrapper.calls = []
return wrapper return wrapper

View File

@ -19,6 +19,7 @@ _trfunc = None
_trget = None _trget = None
installed_lang = None installed_lang = None
def tr(s, context=None): def tr(s, context=None):
if _trfunc is None: if _trfunc is None:
return s return s
@ -28,6 +29,7 @@ def tr(s, context=None):
else: else:
return _trfunc(s) return _trfunc(s)
def trget(domain): def trget(domain):
# Returns a tr() function for the specified domain. # Returns a tr() function for the specified domain.
if _trget is None: if _trget is None:
@ -35,57 +37,61 @@ def trget(domain):
else: else:
return _trget(domain) return _trget(domain)
def set_tr(new_tr, new_trget=None): def set_tr(new_tr, new_trget=None):
global _trfunc, _trget global _trfunc, _trget
_trfunc = new_tr _trfunc = new_tr
if new_trget is not None: if new_trget is not None:
_trget = new_trget _trget = new_trget
def get_locale_name(lang): def get_locale_name(lang):
if ISWINDOWS: if ISWINDOWS:
# http://msdn.microsoft.com/en-us/library/39cwe7zf(vs.71).aspx # http://msdn.microsoft.com/en-us/library/39cwe7zf(vs.71).aspx
LANG2LOCALENAME = { LANG2LOCALENAME = {
'cs': 'czy', "cs": "czy",
'de': 'deu', "de": "deu",
'el': 'grc', "el": "grc",
'es': 'esn', "es": "esn",
'fr': 'fra', "fr": "fra",
'it': 'ita', "it": "ita",
'ko': 'korean', "ko": "korean",
'nl': 'nld', "nl": "nld",
'pl_PL': 'polish_poland', "pl_PL": "polish_poland",
'pt_BR': 'ptb', "pt_BR": "ptb",
'ru': 'rus', "ru": "rus",
'zh_CN': 'chs', "zh_CN": "chs",
} }
else: else:
LANG2LOCALENAME = { LANG2LOCALENAME = {
'cs': 'cs_CZ', "cs": "cs_CZ",
'de': 'de_DE', "de": "de_DE",
'el': 'el_GR', "el": "el_GR",
'es': 'es_ES', "es": "es_ES",
'fr': 'fr_FR', "fr": "fr_FR",
'it': 'it_IT', "it": "it_IT",
'nl': 'nl_NL', "nl": "nl_NL",
'hy': 'hy_AM', "hy": "hy_AM",
'ko': 'ko_KR', "ko": "ko_KR",
'pl_PL': 'pl_PL', "pl_PL": "pl_PL",
'pt_BR': 'pt_BR', "pt_BR": "pt_BR",
'ru': 'ru_RU', "ru": "ru_RU",
'uk': 'uk_UA', "uk": "uk_UA",
'vi': 'vi_VN', "vi": "vi_VN",
'zh_CN': 'zh_CN', "zh_CN": "zh_CN",
} }
if lang not in LANG2LOCALENAME: if lang not in LANG2LOCALENAME:
return None return None
result = LANG2LOCALENAME[lang] result = LANG2LOCALENAME[lang]
if ISLINUX: if ISLINUX:
result += '.UTF-8' result += ".UTF-8"
return result return result
#--- Qt
# --- Qt
def install_qt_trans(lang=None): def install_qt_trans(lang=None):
from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale
if not lang: if not lang:
lang = str(QLocale.system().name())[:2] lang = str(QLocale.system().name())[:2]
localename = get_locale_name(lang) localename = get_locale_name(lang)
@ -95,54 +101,66 @@ def install_qt_trans(lang=None):
except locale.Error: except locale.Error:
logging.warning("Couldn't set locale %s", localename) logging.warning("Couldn't set locale %s", localename)
else: else:
lang = 'en' lang = "en"
qtr1 = QTranslator(QCoreApplication.instance()) qtr1 = QTranslator(QCoreApplication.instance())
qtr1.load(':/qt_%s' % lang) qtr1.load(":/qt_%s" % lang)
QCoreApplication.installTranslator(qtr1) QCoreApplication.installTranslator(qtr1)
qtr2 = QTranslator(QCoreApplication.instance()) qtr2 = QTranslator(QCoreApplication.instance())
qtr2.load(':/%s' % lang) qtr2.load(":/%s" % lang)
QCoreApplication.installTranslator(qtr2) QCoreApplication.installTranslator(qtr2)
def qt_tr(s, context='core'):
def qt_tr(s, context="core"):
return str(QCoreApplication.translate(context, s, None)) return str(QCoreApplication.translate(context, s, None))
set_tr(qt_tr) set_tr(qt_tr)
#--- gettext
# --- gettext
def install_gettext_trans(base_folder, lang): def install_gettext_trans(base_folder, lang):
import gettext import gettext
def gettext_trget(domain): def gettext_trget(domain):
if not lang: if not lang:
return lambda s: s return lambda s: s
try: try:
return gettext.translation(domain, localedir=base_folder, languages=[lang]).gettext return gettext.translation(
domain, localedir=base_folder, languages=[lang]
).gettext
except IOError: except IOError:
return lambda s: s return lambda s: s
default_gettext = gettext_trget('core') default_gettext = gettext_trget("core")
def gettext_tr(s, context=None): def gettext_tr(s, context=None):
if not context: if not context:
return default_gettext(s) return default_gettext(s)
else: else:
trfunc = gettext_trget(context) trfunc = gettext_trget(context)
return trfunc(s) return trfunc(s)
set_tr(gettext_tr, gettext_trget) set_tr(gettext_tr, gettext_trget)
global installed_lang global installed_lang
installed_lang = lang installed_lang = lang
def install_gettext_trans_under_cocoa(): def install_gettext_trans_under_cocoa():
from cocoa import proxy from cocoa import proxy
resFolder = proxy.getResourcePath() resFolder = proxy.getResourcePath()
baseFolder = op.join(resFolder, 'locale') baseFolder = op.join(resFolder, "locale")
currentLang = proxy.systemLang() currentLang = proxy.systemLang()
install_gettext_trans(baseFolder, currentLang) install_gettext_trans(baseFolder, currentLang)
localename = get_locale_name(currentLang) localename = get_locale_name(currentLang)
if localename is not None: if localename is not None:
locale.setlocale(locale.LC_ALL, localename) locale.setlocale(locale.LC_ALL, localename)
def install_gettext_trans_under_qt(base_folder, lang=None): def install_gettext_trans_under_qt(base_folder, lang=None):
# So, we install the gettext locale, great, but we also should try to install qt_*.qm if # So, we install the gettext locale, great, but we also should try to install qt_*.qm if
# available so that strings that are inside Qt itself over which I have no control are in the # available so that strings that are inside Qt itself over which I have no control are in the
# right language. # right language.
from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale, QLibraryInfo from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale, QLibraryInfo
if not lang: if not lang:
lang = str(QLocale.system().name())[:2] lang = str(QLocale.system().name())[:2]
localename = get_locale_name(lang) localename = get_locale_name(lang)
@ -151,7 +169,7 @@ def install_gettext_trans_under_qt(base_folder, lang=None):
locale.setlocale(locale.LC_ALL, localename) locale.setlocale(locale.LC_ALL, localename)
except locale.Error: except locale.Error:
logging.warning("Couldn't set locale %s", localename) logging.warning("Couldn't set locale %s", localename)
qmname = 'qt_%s' % lang qmname = "qt_%s" % lang
if ISLINUX: if ISLINUX:
# Under linux, a full Qt installation is already available in the system, we didn't bundle # Under linux, a full Qt installation is already available in the system, we didn't bundle
# up the qm files in our package, so we have to load translations from the system. # up the qm files in our package, so we have to load translations from the system.

View File

@ -17,6 +17,7 @@ from datetime import timedelta
from .path import Path, pathify, log_io_error from .path import Path, pathify, log_io_error
def nonone(value, replace_value): def nonone(value, replace_value):
"""Returns ``value`` if ``value`` is not ``None``. Returns ``replace_value`` otherwise. """Returns ``value`` if ``value`` is not ``None``. Returns ``replace_value`` otherwise.
""" """
@ -25,6 +26,7 @@ def nonone(value, replace_value):
else: else:
return value return value
def tryint(value, default=0): def tryint(value, default=0):
"""Tries to convert ``value`` to in ``int`` and returns ``default`` if it fails. """Tries to convert ``value`` to in ``int`` and returns ``default`` if it fails.
""" """
@ -33,12 +35,15 @@ def tryint(value, default=0):
except (TypeError, ValueError): except (TypeError, ValueError):
return default return default
def minmax(value, min_value, max_value): def minmax(value, min_value, max_value):
"""Returns `value` or one of the min/max bounds if `value` is not between them. """Returns `value` or one of the min/max bounds if `value` is not between them.
""" """
return min(max(value, min_value), max_value) return min(max(value, min_value), max_value)
#--- Sequence related
# --- Sequence related
def dedupe(iterable): def dedupe(iterable):
"""Returns a list of elements in ``iterable`` with all dupes removed. """Returns a list of elements in ``iterable`` with all dupes removed.
@ -54,6 +59,7 @@ def dedupe(iterable):
result.append(item) result.append(item)
return result return result
def flatten(iterables, start_with=None): def flatten(iterables, start_with=None):
"""Takes a list of lists ``iterables`` and returns a list containing elements of every list. """Takes a list of lists ``iterables`` and returns a list containing elements of every list.
@ -67,6 +73,7 @@ def flatten(iterables, start_with=None):
result.extend(iterable) result.extend(iterable)
return result return result
def first(iterable): def first(iterable):
"""Returns the first item of ``iterable``. """Returns the first item of ``iterable``.
""" """
@ -75,11 +82,13 @@ def first(iterable):
except StopIteration: except StopIteration:
return None return None
def stripfalse(seq): def stripfalse(seq):
"""Returns a sequence with all false elements stripped out of seq. """Returns a sequence with all false elements stripped out of seq.
""" """
return [x for x in seq if x] return [x for x in seq if x]
def extract(predicate, iterable): def extract(predicate, iterable):
"""Separates the wheat from the shaft (`predicate` defines what's the wheat), and returns both. """Separates the wheat from the shaft (`predicate` defines what's the wheat), and returns both.
""" """
@ -92,6 +101,7 @@ def extract(predicate, iterable):
shaft.append(item) shaft.append(item)
return wheat, shaft return wheat, shaft
def allsame(iterable): def allsame(iterable):
"""Returns whether all elements of 'iterable' are the same. """Returns whether all elements of 'iterable' are the same.
""" """
@ -102,6 +112,7 @@ def allsame(iterable):
raise ValueError("iterable cannot be empty") raise ValueError("iterable cannot be empty")
return all(element == first_item for element in it) return all(element == first_item for element in it)
def trailiter(iterable, skipfirst=False): def trailiter(iterable, skipfirst=False):
"""Yields (prev_element, element), starting with (None, first_element). """Yields (prev_element, element), starting with (None, first_element).
@ -120,6 +131,7 @@ def trailiter(iterable, skipfirst=False):
yield prev, item yield prev, item
prev = item prev = item
def iterconsume(seq, reverse=True): def iterconsume(seq, reverse=True):
"""Iterate over ``seq`` and pops yielded objects. """Iterate over ``seq`` and pops yielded objects.
@ -135,31 +147,36 @@ def iterconsume(seq, reverse=True):
while seq: while seq:
yield seq.pop() yield seq.pop()
#--- String related
def escape(s, to_escape, escape_with='\\'): # --- String related
def escape(s, to_escape, escape_with="\\"):
"""Returns ``s`` with characters in ``to_escape`` all prepended with ``escape_with``. """Returns ``s`` with characters in ``to_escape`` all prepended with ``escape_with``.
""" """
return ''.join((escape_with + c if c in to_escape else c) for c in s) return "".join((escape_with + c if c in to_escape else c) for c in s)
def get_file_ext(filename): def get_file_ext(filename):
"""Returns the lowercase extension part of filename, without the dot. """Returns the lowercase extension part of filename, without the dot.
""" """
pos = filename.rfind('.') pos = filename.rfind(".")
if pos > -1: if pos > -1:
return filename[pos + 1:].lower() return filename[pos + 1 :].lower()
else: else:
return '' return ""
def rem_file_ext(filename): def rem_file_ext(filename):
"""Returns the filename without extension. """Returns the filename without extension.
""" """
pos = filename.rfind('.') pos = filename.rfind(".")
if pos > -1: if pos > -1:
return filename[:pos] return filename[:pos]
else: else:
return filename return filename
def pluralize(number, word, decimals=0, plural_word=None): def pluralize(number, word, decimals=0, plural_word=None):
"""Returns a pluralized string with ``number`` in front of ``word``. """Returns a pluralized string with ``number`` in front of ``word``.
@ -173,11 +190,12 @@ def pluralize(number, word, decimals=0, plural_word=None):
format = "%%1.%df %%s" % decimals format = "%%1.%df %%s" % decimals
if number > 1: if number > 1:
if plural_word is None: if plural_word is None:
word += 's' word += "s"
else: else:
word = plural_word word = plural_word
return format % (number, word) return format % (number, word)
def format_time(seconds, with_hours=True): def format_time(seconds, with_hours=True):
"""Transforms seconds in a hh:mm:ss string. """Transforms seconds in a hh:mm:ss string.
@ -189,14 +207,15 @@ def format_time(seconds, with_hours=True):
m, s = divmod(seconds, 60) m, s = divmod(seconds, 60)
if with_hours: if with_hours:
h, m = divmod(m, 60) h, m = divmod(m, 60)
r = '%02d:%02d:%02d' % (h, m, s) r = "%02d:%02d:%02d" % (h, m, s)
else: else:
r = '%02d:%02d' % (m,s) r = "%02d:%02d" % (m, s)
if minus: if minus:
return '-' + r return "-" + r
else: else:
return r return r
def format_time_decimal(seconds): def format_time_decimal(seconds):
"""Transforms seconds in a strings like '3.4 minutes'. """Transforms seconds in a strings like '3.4 minutes'.
""" """
@ -204,20 +223,23 @@ def format_time_decimal(seconds):
if minus: if minus:
seconds *= -1 seconds *= -1
if seconds < 60: if seconds < 60:
r = pluralize(seconds, 'second', 1) r = pluralize(seconds, "second", 1)
elif seconds < 3600: elif seconds < 3600:
r = pluralize(seconds / 60.0, 'minute', 1) r = pluralize(seconds / 60.0, "minute", 1)
elif seconds < 86400: elif seconds < 86400:
r = pluralize(seconds / 3600.0, 'hour', 1) r = pluralize(seconds / 3600.0, "hour", 1)
else: else:
r = pluralize(seconds / 86400.0, 'day', 1) r = pluralize(seconds / 86400.0, "day", 1)
if minus: if minus:
return '-' + r return "-" + r
else: else:
return r return r
SIZE_DESC = ('B','KB','MB','GB','TB','PB','EB','ZB','YB')
SIZE_VALS = tuple(1024 ** i for i in range(1,9)) SIZE_DESC = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
SIZE_VALS = tuple(1024 ** i for i in range(1, 9))
def format_size(size, decimal=0, forcepower=-1, showdesc=True): def format_size(size, decimal=0, forcepower=-1, showdesc=True):
"""Transform a byte count in a formatted string (KB, MB etc..). """Transform a byte count in a formatted string (KB, MB etc..).
@ -238,12 +260,12 @@ def format_size(size, decimal=0, forcepower=-1, showdesc=True):
else: else:
i = forcepower i = forcepower
if i > 0: if i > 0:
div = SIZE_VALS[i-1] div = SIZE_VALS[i - 1]
else: else:
div = 1 div = 1
format = '%%%d.%df' % (decimal,decimal) format = "%%%d.%df" % (decimal, decimal)
negative = size < 0 negative = size < 0
divided_size = ((0.0 + abs(size)) / div) divided_size = (0.0 + abs(size)) / div
if decimal == 0: if decimal == 0:
divided_size = ceil(divided_size) divided_size = ceil(divided_size)
else: else:
@ -252,18 +274,21 @@ def format_size(size, decimal=0, forcepower=-1, showdesc=True):
divided_size *= -1 divided_size *= -1
result = format % divided_size result = format % divided_size
if showdesc: if showdesc:
result += ' ' + SIZE_DESC[i] result += " " + SIZE_DESC[i]
return result return result
_valid_xml_range = '\x09\x0A\x0D\x20-\uD7FF\uE000-\uFFFD'
if sys.maxunicode > 0x10000:
_valid_xml_range += '%s-%s' % (chr(0x10000), chr(min(sys.maxunicode, 0x10FFFF)))
RE_INVALID_XML_SUB = re.compile('[^%s]' % _valid_xml_range, re.U).sub
def remove_invalid_xml(s, replace_with=' '): _valid_xml_range = "\x09\x0A\x0D\x20-\uD7FF\uE000-\uFFFD"
if sys.maxunicode > 0x10000:
_valid_xml_range += "%s-%s" % (chr(0x10000), chr(min(sys.maxunicode, 0x10FFFF)))
RE_INVALID_XML_SUB = re.compile("[^%s]" % _valid_xml_range, re.U).sub
def remove_invalid_xml(s, replace_with=" "):
return RE_INVALID_XML_SUB(replace_with, s) return RE_INVALID_XML_SUB(replace_with, s)
def multi_replace(s, replace_from, replace_to=''):
def multi_replace(s, replace_from, replace_to=""):
"""A function like str.replace() with multiple replacements. """A function like str.replace() with multiple replacements.
``replace_from`` is a list of things you want to replace. Ex: ['a','bc','d'] ``replace_from`` is a list of things you want to replace. Ex: ['a','bc','d']
@ -280,17 +305,20 @@ def multi_replace(s, replace_from, replace_to=''):
if isinstance(replace_to, str) and (len(replace_from) != len(replace_to)): if isinstance(replace_to, str) and (len(replace_from) != len(replace_to)):
replace_to = [replace_to for r in replace_from] replace_to = [replace_to for r in replace_from]
if len(replace_from) != len(replace_to): if len(replace_from) != len(replace_to):
raise ValueError('len(replace_from) must be equal to len(replace_to)') raise ValueError("len(replace_from) must be equal to len(replace_to)")
replace = list(zip(replace_from, replace_to)) replace = list(zip(replace_from, replace_to))
for r_from, r_to in [r for r in replace if r[0] in s]: for r_from, r_to in [r for r in replace if r[0] in s]:
s = s.replace(r_from, r_to) s = s.replace(r_from, r_to)
return s return s
#--- Date related
# --- Date related
# It might seem like needless namespace pollution, but the speedup gained by this constant is # It might seem like needless namespace pollution, but the speedup gained by this constant is
# significant, so it stays. # significant, so it stays.
ONE_DAY = timedelta(1) ONE_DAY = timedelta(1)
def iterdaterange(start, end): def iterdaterange(start, end):
"""Yields every day between ``start`` and ``end``. """Yields every day between ``start`` and ``end``.
""" """
@ -299,7 +327,9 @@ def iterdaterange(start, end):
yield date yield date
date += ONE_DAY date += ONE_DAY
#--- Files related
# --- Files related
@pathify @pathify
def modified_after(first_path: Path, second_path: Path): def modified_after(first_path: Path, second_path: Path):
@ -317,19 +347,21 @@ def modified_after(first_path: Path, second_path: Path):
return True return True
return first_mtime > second_mtime return first_mtime > second_mtime
def find_in_path(name, paths=None): def find_in_path(name, paths=None):
"""Search for `name` in all directories of `paths` and return the absolute path of the first """Search for `name` in all directories of `paths` and return the absolute path of the first
occurrence. If `paths` is None, $PATH is used. occurrence. If `paths` is None, $PATH is used.
""" """
if paths is None: if paths is None:
paths = os.environ['PATH'] paths = os.environ["PATH"]
if isinstance(paths, str): # if it's not a string, it's already a list if isinstance(paths, str): # if it's not a string, it's already a list
paths = paths.split(os.pathsep) paths = paths.split(os.pathsep)
for path in paths: for path in paths:
if op.exists(op.join(path, name)): if op.exists(op.join(path, name)):
return op.join(path, name) return op.join(path, name)
return None return None
@log_io_error @log_io_error
@pathify @pathify
def delete_if_empty(path: Path, files_to_delete=[]): def delete_if_empty(path: Path, files_to_delete=[]):
@ -345,7 +377,8 @@ def delete_if_empty(path: Path, files_to_delete=[]):
path.rmdir() path.rmdir()
return True return True
def open_if_filename(infile, mode='rb'):
def open_if_filename(infile, mode="rb"):
"""If ``infile`` is a string, it opens and returns it. If it's already a file object, it simply returns it. """If ``infile`` is a string, it opens and returns it. If it's already a file object, it simply returns it.
This function returns ``(file, should_close_flag)``. The should_close_flag is True is a file has This function returns ``(file, should_close_flag)``. The should_close_flag is True is a file has
@ -364,15 +397,18 @@ def open_if_filename(infile, mode='rb'):
else: else:
return (infile, False) return (infile, False)
def ensure_folder(path): def ensure_folder(path):
"Create `path` as a folder if it doesn't exist." "Create `path` as a folder if it doesn't exist."
if not op.exists(path): if not op.exists(path):
os.makedirs(path) os.makedirs(path)
def ensure_file(path): def ensure_file(path):
"Create `path` as an empty file if it doesn't exist." "Create `path` as an empty file if it doesn't exist."
if not op.exists(path): if not op.exists(path):
open(path, 'w').close() open(path, "w").close()
def delete_files_with_pattern(folder_path, pattern, recursive=True): def delete_files_with_pattern(folder_path, pattern, recursive=True):
"""Delete all files (or folders) in `folder_path` that match the glob `pattern`. """Delete all files (or folders) in `folder_path` that match the glob `pattern`.
@ -389,6 +425,7 @@ def delete_files_with_pattern(folder_path, pattern, recursive=True):
for p in subfolders: for p in subfolders:
delete_files_with_pattern(p, pattern, True) delete_files_with_pattern(p, pattern, True)
class FileOrPath: class FileOrPath:
"""Does the same as :func:`open_if_filename`, but it can be used with a ``with`` statement. """Does the same as :func:`open_if_filename`, but it can be used with a ``with`` statement.
@ -397,7 +434,8 @@ class FileOrPath:
with FileOrPath(infile): with FileOrPath(infile):
dostuff() dostuff()
""" """
def __init__(self, file_or_path, mode='rb'):
def __init__(self, file_or_path, mode="rb"):
self.file_or_path = file_or_path self.file_or_path = file_or_path
self.mode = mode self.mode = mode
self.mustclose = False self.mustclose = False
@ -410,4 +448,3 @@ class FileOrPath:
def __exit__(self, exc_type, exc_value, traceback): def __exit__(self, exc_type, exc_value, traceback):
if self.fp and self.mustclose: if self.fp and self.mustclose:
self.fp.close() self.fp.close()

View File

@ -15,16 +15,23 @@ import platform
import re import re
from hscommon.build import ( from hscommon.build import (
print_and_do, copy_packages, build_debian_changelog, print_and_do,
get_module_version, filereplace, copy, setup_package_argparser, copy_packages,
copy_all build_debian_changelog,
get_module_version,
filereplace,
copy,
setup_package_argparser,
copy_all,
) )
def parse_args(): def parse_args():
parser = ArgumentParser() parser = ArgumentParser()
setup_package_argparser(parser) setup_package_argparser(parser)
return parser.parse_args() return parser.parse_args()
def copy_files_to_package(destpath, packages, with_so): def copy_files_to_package(destpath, packages, with_so):
# when with_so is true, we keep .so files in the package, and otherwise, we don't. We need this # when with_so is true, we keep .so files in the package, and otherwise, we don't. We need this
# flag because when building debian src pkg, we *don't* want .so files (they're compiled later) # flag because when building debian src pkg, we *don't* want .so files (they're compiled later)
@ -32,126 +39,162 @@ def copy_files_to_package(destpath, packages, with_so):
if op.exists(destpath): if op.exists(destpath):
shutil.rmtree(destpath) shutil.rmtree(destpath)
os.makedirs(destpath) os.makedirs(destpath)
shutil.copy('run.py', op.join(destpath, 'run.py')) shutil.copy("run.py", op.join(destpath, "run.py"))
extra_ignores = ['*.so'] if not with_so else None extra_ignores = ["*.so"] if not with_so else None
copy_packages(packages, destpath, extra_ignores=extra_ignores) copy_packages(packages, destpath, extra_ignores=extra_ignores)
shutil.copytree(op.join('build', 'help'), op.join(destpath, 'help')) shutil.copytree(op.join("build", "help"), op.join(destpath, "help"))
shutil.copytree(op.join('build', 'locale'), op.join(destpath, 'locale')) shutil.copytree(op.join("build", "locale"), op.join(destpath, "locale"))
compileall.compile_dir(destpath) compileall.compile_dir(destpath)
def package_debian_distribution(distribution): def package_debian_distribution(distribution):
app_version = get_module_version('core') app_version = get_module_version("core")
version = '{}~{}'.format(app_version, distribution) version = "{}~{}".format(app_version, distribution)
destpath = op.join('build', 'dupeguru-{}'.format(version)) destpath = op.join("build", "dupeguru-{}".format(version))
srcpath = op.join(destpath, 'src') srcpath = op.join(destpath, "src")
packages = [ packages = ["hscommon", "core", "qtlib", "qt", "send2trash", "hsaudiotag"]
'hscommon', 'core', 'qtlib', 'qt', 'send2trash', 'hsaudiotag'
]
copy_files_to_package(srcpath, packages, with_so=False) copy_files_to_package(srcpath, packages, with_so=False)
os.mkdir(op.join(destpath, 'modules')) os.mkdir(op.join(destpath, "modules"))
copy_all(op.join('core', 'pe', 'modules', '*.*'), op.join(destpath, 'modules')) copy_all(op.join("core", "pe", "modules", "*.*"), op.join(destpath, "modules"))
copy(op.join('qt', 'pe', 'modules', 'block.c'), op.join(destpath, 'modules', 'block_qt.c')) copy(
copy(op.join('pkg', 'debian', 'build_pe_modules.py'), op.join(destpath, 'build_pe_modules.py')) op.join("qt", "pe", "modules", "block.c"),
debdest = op.join(destpath, 'debian') op.join(destpath, "modules", "block_qt.c"),
debskel = op.join('pkg', 'debian')
os.makedirs(debdest)
debopts = json.load(open(op.join(debskel, 'dupeguru.json')))
for fn in ['compat', 'copyright', 'dirs', 'rules', 'source']:
copy(op.join(debskel, fn), op.join(debdest, fn))
filereplace(op.join(debskel, 'control'), op.join(debdest, 'control'), **debopts)
filereplace(op.join(debskel, 'Makefile'), op.join(destpath, 'Makefile'), **debopts)
filereplace(op.join(debskel, 'dupeguru.desktop'), op.join(debdest, 'dupeguru.desktop'), **debopts)
changelogpath = op.join('help', 'changelog')
changelog_dest = op.join(debdest, 'changelog')
project_name = debopts['pkgname']
from_version = '2.9.2'
build_debian_changelog(
changelogpath, changelog_dest, project_name, from_version=from_version,
distribution=distribution
) )
shutil.copy(op.join('images', 'dgse_logo_128.png'), srcpath) copy(
op.join("pkg", "debian", "build_pe_modules.py"),
op.join(destpath, "build_pe_modules.py"),
)
debdest = op.join(destpath, "debian")
debskel = op.join("pkg", "debian")
os.makedirs(debdest)
debopts = json.load(open(op.join(debskel, "dupeguru.json")))
for fn in ["compat", "copyright", "dirs", "rules", "source"]:
copy(op.join(debskel, fn), op.join(debdest, fn))
filereplace(op.join(debskel, "control"), op.join(debdest, "control"), **debopts)
filereplace(op.join(debskel, "Makefile"), op.join(destpath, "Makefile"), **debopts)
filereplace(
op.join(debskel, "dupeguru.desktop"),
op.join(debdest, "dupeguru.desktop"),
**debopts
)
changelogpath = op.join("help", "changelog")
changelog_dest = op.join(debdest, "changelog")
project_name = debopts["pkgname"]
from_version = "2.9.2"
build_debian_changelog(
changelogpath,
changelog_dest,
project_name,
from_version=from_version,
distribution=distribution,
)
shutil.copy(op.join("images", "dgse_logo_128.png"), srcpath)
os.chdir(destpath) os.chdir(destpath)
cmd = "dpkg-buildpackage -S -us -uc" cmd = "dpkg-buildpackage -S -us -uc"
os.system(cmd) os.system(cmd)
os.chdir('../..') os.chdir("../..")
def package_debian(): def package_debian():
print("Packaging for Debian/Ubuntu") print("Packaging for Debian/Ubuntu")
for distribution in ['unstable']: for distribution in ["unstable"]:
package_debian_distribution(distribution) package_debian_distribution(distribution)
def package_arch(): def package_arch():
# For now, package_arch() will only copy the source files into build/. It copies less packages # For now, package_arch() will only copy the source files into build/. It copies less packages
# than package_debian because there are more python packages available in Arch (so we don't # than package_debian because there are more python packages available in Arch (so we don't
# need to include them). # need to include them).
print("Packaging for Arch") print("Packaging for Arch")
srcpath = op.join('build', 'dupeguru-arch') srcpath = op.join("build", "dupeguru-arch")
packages = [ packages = [
'hscommon', 'core', 'qtlib', 'qt', 'send2trash', 'hsaudiotag', "hscommon",
"core",
"qtlib",
"qt",
"send2trash",
"hsaudiotag",
] ]
copy_files_to_package(srcpath, packages, with_so=True) copy_files_to_package(srcpath, packages, with_so=True)
shutil.copy(op.join('images', 'dgse_logo_128.png'), srcpath) shutil.copy(op.join("images", "dgse_logo_128.png"), srcpath)
debopts = json.load(open(op.join('pkg', 'arch', 'dupeguru.json'))) debopts = json.load(open(op.join("pkg", "arch", "dupeguru.json")))
filereplace(op.join('pkg', 'arch', 'dupeguru.desktop'), op.join(srcpath, 'dupeguru.desktop'), **debopts) filereplace(
op.join("pkg", "arch", "dupeguru.desktop"),
op.join(srcpath, "dupeguru.desktop"),
**debopts
)
def package_source_txz(): def package_source_txz():
print("Creating git archive") print("Creating git archive")
app_version = get_module_version('core') app_version = get_module_version("core")
name = 'dupeguru-src-{}.tar'.format(app_version) name = "dupeguru-src-{}.tar".format(app_version)
base_path = os.getcwd() base_path = os.getcwd()
build_path = op.join(base_path, 'build') build_path = op.join(base_path, "build")
dest = op.join(build_path, name) dest = op.join(build_path, name)
print_and_do('git archive -o {} HEAD'.format(dest)) print_and_do("git archive -o {} HEAD".format(dest))
# Now, we need to include submodules # Now, we need to include submodules
SUBMODULES = ['hscommon', 'qtlib'] SUBMODULES = ["hscommon", "qtlib"]
for submodule in SUBMODULES: for submodule in SUBMODULES:
print("Adding submodule {} to archive".format(submodule)) print("Adding submodule {} to archive".format(submodule))
os.chdir(submodule) os.chdir(submodule)
archive_path = op.join(build_path, '{}.tar'.format(submodule)) archive_path = op.join(build_path, "{}.tar".format(submodule))
print_and_do('git archive -o {} --prefix {}/ HEAD'.format(archive_path, submodule)) print_and_do(
"git archive -o {} --prefix {}/ HEAD".format(archive_path, submodule)
)
os.chdir(base_path) os.chdir(base_path)
print_and_do('tar -A {} -f {}'.format(archive_path, dest)) print_and_do("tar -A {} -f {}".format(archive_path, dest))
print_and_do('xz {}'.format(dest)) print_and_do("xz {}".format(dest))
def package_windows(): def package_windows():
app_version = get_module_version('core') app_version = get_module_version("core")
arch = platform.architecture()[0] arch = platform.architecture()[0]
# Information to pass to pyinstaller and NSIS # Information to pass to pyinstaller and NSIS
match = re.search('[0-9]+.[0-9]+.[0-9]+', app_version) match = re.search("[0-9]+.[0-9]+.[0-9]+", app_version)
version_array = match.group(0).split('.') version_array = match.group(0).split(".")
match = re.search('[0-9]+', arch) match = re.search("[0-9]+", arch)
bits = match.group(0) bits = match.group(0)
# include locale files if they are built otherwise exit as it will break # include locale files if they are built otherwise exit as it will break
# the localization # the localization
if not op.exists('build/locale'): if not op.exists("build/locale"):
print("Locale files not built, exiting...") print("Locale files not built, exiting...")
return return
# include help files if they are built otherwise exit as they should be included? # include help files if they are built otherwise exit as they should be included?
if not op.exists('build/help'): if not op.exists("build/help"):
print("Help files not built, exiting...") print("Help files not built, exiting...")
return return
# create version information file from template # create version information file from template
try: try:
version_template = open("win_version_info.temp", "r") version_template = open("win_version_info.temp", "r")
version_info = version_template.read() version_info = version_template.read()
version_template.close() version_template.close()
version_info_file = open("win_version_info.txt", "w") version_info_file = open("win_version_info.txt", "w")
version_info_file.write(version_info.format(version_array[0], version_array[1], version_array[2], bits)) version_info_file.write(
version_info.format(
version_array[0], version_array[1], version_array[2], bits
)
)
version_info_file.close() version_info_file.close()
except Exception: except Exception:
print("Error creating version info file, exiting...") print("Error creating version info file, exiting...")
return return
# run pyinstaller via command line # run pyinstaller via command line
print_and_do('pyinstaller -w --name=dupeguru-win{0} --icon=images/dgse_logo.ico ' print_and_do(
'--add-data "build/locale;locale" --add-data "build/help;help" ' "pyinstaller -w --name=dupeguru-win{0} --icon=images/dgse_logo.ico "
'--version-file win_version_info.txt run.py'.format(bits)) '--add-data "build/locale;locale" --add-data "build/help;help" '
"--version-file win_version_info.txt run.py".format(bits)
)
# remove version info file # remove version info file
os.remove('win_version_info.txt') os.remove("win_version_info.txt")
# Call NSIS (TODO update to not use hardcoded path) # Call NSIS (TODO update to not use hardcoded path)
cmd = ('"C:\\Program Files (x86)\\NSIS\\Bin\\makensis.exe" ' cmd = (
'/DVERSIONMAJOR={0} /DVERSIONMINOR={1} /DVERSIONPATCH={2} /DBITS={3} setup.nsi') '"C:\\Program Files (x86)\\NSIS\\Bin\\makensis.exe" '
"/DVERSIONMAJOR={0} /DVERSIONMINOR={1} /DVERSIONPATCH={2} /DBITS={3} setup.nsi"
)
print_and_do(cmd.format(version_array[0], version_array[1], version_array[2], bits)) print_and_do(cmd.format(version_array[0], version_array[1], version_array[2], bits))
def main(): def main():
args = parse_args() args = parse_args()
if args.src_pkg: if args.src_pkg:
@ -159,17 +202,18 @@ def main():
package_source_txz() package_source_txz()
return return
print("Packaging dupeGuru with UI qt") print("Packaging dupeGuru with UI qt")
if sys.platform == 'win32': if sys.platform == "win32":
package_windows() package_windows()
else: else:
if not args.arch_pkg: if not args.arch_pkg:
distname, _, _ = platform.dist() distname, _, _ = platform.dist()
else: else:
distname = 'arch' distname = "arch"
if distname == 'arch': if distname == "arch":
package_arch() package_arch()
else: else:
package_debian() package_debian()
if __name__ == '__main__':
if __name__ == "__main__":
main() main()

147
qt/app.py
View File

@ -36,11 +36,12 @@ from .me.preferences_dialog import PreferencesDialog as PreferencesDialogMusic
from .pe.preferences_dialog import PreferencesDialog as PreferencesDialogPicture from .pe.preferences_dialog import PreferencesDialog as PreferencesDialogPicture
from .pe.photo import File as PlatSpecificPhoto from .pe.photo import File as PlatSpecificPhoto
tr = trget('ui') tr = trget("ui")
class DupeGuru(QObject): class DupeGuru(QObject):
LOGO_NAME = 'logo_se' LOGO_NAME = "logo_se"
NAME = 'dupeGuru' NAME = "dupeGuru"
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
@ -49,20 +50,28 @@ class DupeGuru(QObject):
self.model = DupeGuruModel(view=self) self.model = DupeGuruModel(view=self)
self._setup() self._setup()
#--- Private # --- Private
def _setup(self): def _setup(self):
core.pe.photo.PLAT_SPECIFIC_PHOTO_CLASS = PlatSpecificPhoto core.pe.photo.PLAT_SPECIFIC_PHOTO_CLASS = PlatSpecificPhoto
self._setupActions() self._setupActions()
self._update_options() self._update_options()
self.recentResults = Recent(self, 'recentResults') self.recentResults = Recent(self, "recentResults")
self.recentResults.mustOpenItem.connect(self.model.load_from) self.recentResults.mustOpenItem.connect(self.model.load_from)
self.resultWindow = None self.resultWindow = None
self.details_dialog = None self.details_dialog = None
self.directories_dialog = DirectoriesDialog(self) self.directories_dialog = DirectoriesDialog(self)
self.progress_window = ProgressWindow(self.directories_dialog, self.model.progress_window) self.progress_window = ProgressWindow(
self.problemDialog = ProblemDialog(parent=self.directories_dialog, model=self.model.problem_dialog) self.directories_dialog, self.model.progress_window
self.ignoreListDialog = IgnoreListDialog(parent=self.directories_dialog, model=self.model.ignore_list_dialog) )
self.deletionOptions = DeletionOptions(parent=self.directories_dialog, model=self.model.deletion_options) self.problemDialog = ProblemDialog(
parent=self.directories_dialog, model=self.model.problem_dialog
)
self.ignoreListDialog = IgnoreListDialog(
parent=self.directories_dialog, model=self.model.ignore_list_dialog
)
self.deletionOptions = DeletionOptions(
parent=self.directories_dialog, model=self.model.deletion_options
)
self.about_box = AboutBox(self.directories_dialog, self) self.about_box = AboutBox(self.directories_dialog, self)
self.directories_dialog.show() self.directories_dialog.show()
@ -80,46 +89,70 @@ class DupeGuru(QObject):
# Setup actions that are common to both the directory dialog and the results window. # Setup actions that are common to both the directory dialog and the results window.
# (name, shortcut, icon, desc, func) # (name, shortcut, icon, desc, func)
ACTIONS = [ ACTIONS = [
('actionQuit', 'Ctrl+Q', '', tr("Quit"), self.quitTriggered), ("actionQuit", "Ctrl+Q", "", tr("Quit"), self.quitTriggered),
('actionPreferences', 'Ctrl+P', '', tr("Options"), self.preferencesTriggered), (
('actionIgnoreList', '', '', tr("Ignore List"), self.ignoreListTriggered), "actionPreferences",
('actionClearPictureCache', 'Ctrl+Shift+P', '', tr("Clear Picture Cache"), self.clearPictureCacheTriggered), "Ctrl+P",
('actionShowHelp', 'F1', '', tr("dupeGuru Help"), self.showHelpTriggered), "",
('actionAbout', '', '', tr("About dupeGuru"), self.showAboutBoxTriggered), tr("Options"),
('actionOpenDebugLog', '', '', tr("Open Debug Log"), self.openDebugLogTriggered), self.preferencesTriggered,
),
("actionIgnoreList", "", "", tr("Ignore List"), self.ignoreListTriggered),
(
"actionClearPictureCache",
"Ctrl+Shift+P",
"",
tr("Clear Picture Cache"),
self.clearPictureCacheTriggered,
),
("actionShowHelp", "F1", "", tr("dupeGuru Help"), self.showHelpTriggered),
("actionAbout", "", "", tr("About dupeGuru"), self.showAboutBoxTriggered),
(
"actionOpenDebugLog",
"",
"",
tr("Open Debug Log"),
self.openDebugLogTriggered,
),
] ]
createActions(ACTIONS, self) createActions(ACTIONS, self)
def _update_options(self): def _update_options(self):
self.model.options['mix_file_kind'] = self.prefs.mix_file_kind self.model.options["mix_file_kind"] = self.prefs.mix_file_kind
self.model.options['escape_filter_regexp'] = not self.prefs.use_regexp self.model.options["escape_filter_regexp"] = not self.prefs.use_regexp
self.model.options['clean_empty_dirs'] = self.prefs.remove_empty_folders self.model.options["clean_empty_dirs"] = self.prefs.remove_empty_folders
self.model.options['ignore_hardlink_matches'] = self.prefs.ignore_hardlink_matches self.model.options[
self.model.options['copymove_dest_type'] = self.prefs.destination_type "ignore_hardlink_matches"
self.model.options['scan_type'] = self.prefs.get_scan_type(self.model.app_mode) ] = self.prefs.ignore_hardlink_matches
self.model.options['min_match_percentage'] = self.prefs.filter_hardness self.model.options["copymove_dest_type"] = self.prefs.destination_type
self.model.options['word_weighting'] = self.prefs.word_weighting self.model.options["scan_type"] = self.prefs.get_scan_type(self.model.app_mode)
self.model.options['match_similar_words'] = self.prefs.match_similar self.model.options["min_match_percentage"] = self.prefs.filter_hardness
threshold = self.prefs.small_file_threshold if self.prefs.ignore_small_files else 0 self.model.options["word_weighting"] = self.prefs.word_weighting
self.model.options['size_threshold'] = threshold * 1024 # threshold is in KB. the scanner wants bytes self.model.options["match_similar_words"] = self.prefs.match_similar
threshold = (
self.prefs.small_file_threshold if self.prefs.ignore_small_files else 0
)
self.model.options["size_threshold"] = (
threshold * 1024
) # threshold is in KB. the scanner wants bytes
scanned_tags = set() scanned_tags = set()
if self.prefs.scan_tag_track: if self.prefs.scan_tag_track:
scanned_tags.add('track') scanned_tags.add("track")
if self.prefs.scan_tag_artist: if self.prefs.scan_tag_artist:
scanned_tags.add('artist') scanned_tags.add("artist")
if self.prefs.scan_tag_album: if self.prefs.scan_tag_album:
scanned_tags.add('album') scanned_tags.add("album")
if self.prefs.scan_tag_title: if self.prefs.scan_tag_title:
scanned_tags.add('title') scanned_tags.add("title")
if self.prefs.scan_tag_genre: if self.prefs.scan_tag_genre:
scanned_tags.add('genre') scanned_tags.add("genre")
if self.prefs.scan_tag_year: if self.prefs.scan_tag_year:
scanned_tags.add('year') scanned_tags.add("year")
self.model.options['scanned_tags'] = scanned_tags self.model.options["scanned_tags"] = scanned_tags
self.model.options['match_scaled'] = self.prefs.match_scaled self.model.options["match_scaled"] = self.prefs.match_scaled
self.model.options['picture_cache_type'] = self.prefs.picture_cache_type self.model.options["picture_cache_type"] = self.prefs.picture_cache_type
#--- Private # --- Private
def _get_details_dialog_class(self): def _get_details_dialog_class(self):
if self.model.app_mode == AppMode.Picture: if self.model.app_mode == AppMode.Picture:
return DetailsDialogPicture return DetailsDialogPicture
@ -136,7 +169,7 @@ class DupeGuru(QObject):
else: else:
return PreferencesDialogStandard return PreferencesDialogStandard
#--- Public # --- Public
def add_selected_to_ignore_list(self): def add_selected_to_ignore_list(self):
self.model.add_selected_to_ignore_list() self.model.add_selected_to_ignore_list()
@ -166,17 +199,19 @@ class DupeGuru(QObject):
self.model.save() self.model.save()
QApplication.quit() QApplication.quit()
#--- Signals # --- Signals
willSavePrefs = pyqtSignal() willSavePrefs = pyqtSignal()
SIGTERM = pyqtSignal() SIGTERM = pyqtSignal()
#--- Events # --- Events
def finishedLaunching(self): def finishedLaunching(self):
if sys.getfilesystemencoding() == 'ascii': if sys.getfilesystemencoding() == "ascii":
# No need to localize this, it's a debugging message. # No need to localize this, it's a debugging message.
msg = "Something is wrong with the way your system locale is set. If the files you're "\ msg = (
"scanning have accented letters, you'll probably get a crash. It is advised that "\ "Something is wrong with the way your system locale is set. If the files you're "
"scanning have accented letters, you'll probably get a crash. It is advised that "
"you set your system locale properly." "you set your system locale properly."
)
QMessageBox.warning(self.directories_dialog, "Wrong Locale", msg) QMessageBox.warning(self.directories_dialog, "Wrong Locale", msg)
def clearPictureCacheTriggered(self): def clearPictureCacheTriggered(self):
@ -191,11 +226,13 @@ class DupeGuru(QObject):
self.model.ignore_list_dialog.show() self.model.ignore_list_dialog.show()
def openDebugLogTriggered(self): def openDebugLogTriggered(self):
debugLogPath = op.join(self.model.appdata, 'debug.log') debugLogPath = op.join(self.model.appdata, "debug.log")
desktop.open_path(debugLogPath) desktop.open_path(debugLogPath)
def preferencesTriggered(self): def preferencesTriggered(self):
preferences_dialog = self._get_preferences_dialog_class()(self.directories_dialog, self) preferences_dialog = self._get_preferences_dialog_class()(
self.directories_dialog, self
)
preferences_dialog.load() preferences_dialog.load()
result = preferences_dialog.exec() result = preferences_dialog.exec()
if result == QDialog.Accepted: if result == QDialog.Accepted:
@ -212,17 +249,17 @@ class DupeGuru(QObject):
def showHelpTriggered(self): def showHelpTriggered(self):
base_path = platform.HELP_PATH base_path = platform.HELP_PATH
help_path = op.abspath(op.join(base_path, 'index.html')) help_path = op.abspath(op.join(base_path, "index.html"))
if op.exists(help_path): if op.exists(help_path):
url = QUrl.fromLocalFile(help_path) url = QUrl.fromLocalFile(help_path)
else: else:
url = QUrl('https://www.hardcoded.net/dupeguru/help/en/') url = QUrl("https://www.hardcoded.net/dupeguru/help/en/")
QDesktopServices.openUrl(url) QDesktopServices.openUrl(url)
def handleSIGTERM(self): def handleSIGTERM(self):
self.shutdown() self.shutdown()
#--- model --> view # --- model --> view
def get_default(self, key): def get_default(self, key):
return self.prefs.get_value(key) return self.prefs.get_value(key)
@ -231,10 +268,10 @@ class DupeGuru(QObject):
def show_message(self, msg): def show_message(self, msg):
window = QApplication.activeWindow() window = QApplication.activeWindow()
QMessageBox.information(window, '', msg) QMessageBox.information(window, "", msg)
def ask_yes_no(self, prompt): def ask_yes_no(self, prompt):
return self.confirm('', prompt) return self.confirm("", prompt)
def create_results_window(self): def create_results_window(self):
"""Creates resultWindow and details_dialog depending on the selected ``app_mode``. """Creates resultWindow and details_dialog depending on the selected ``app_mode``.
@ -256,11 +293,13 @@ class DupeGuru(QObject):
def select_dest_folder(self, prompt): def select_dest_folder(self, prompt):
flags = QFileDialog.ShowDirsOnly flags = QFileDialog.ShowDirsOnly
return QFileDialog.getExistingDirectory(self.resultWindow, prompt, '', flags) return QFileDialog.getExistingDirectory(self.resultWindow, prompt, "", flags)
def select_dest_file(self, prompt, extension): def select_dest_file(self, prompt, extension):
files = tr("{} file (*.{})").format(extension.upper(), extension) files = tr("{} file (*.{})").format(extension.upper(), extension)
destination, chosen_filter = QFileDialog.getSaveFileName(self.resultWindow, prompt, '', files) destination, chosen_filter = QFileDialog.getSaveFileName(
if not destination.endswith('.{}'.format(extension)): self.resultWindow, prompt, "", files
destination = '{}.{}'.format(destination, extension) )
if not destination.endswith(".{}".format(extension)):
destination = "{}.{}".format(destination, extension)
return destination return destination

View File

@ -12,7 +12,8 @@ from PyQt5.QtWidgets import QDialog, QVBoxLayout, QLabel, QCheckBox, QDialogButt
from hscommon.trans import trget from hscommon.trans import trget
from qtlib.radio_box import RadioBox from qtlib.radio_box import RadioBox
tr = trget('ui') tr = trget("ui")
class DeletionOptions(QDialog): class DeletionOptions(QDialog):
def __init__(self, parent, model, **kwargs): def __init__(self, parent, model, **kwargs):
@ -41,7 +42,9 @@ class DeletionOptions(QDialog):
self.linkMessageLabel = QLabel(text) self.linkMessageLabel = QLabel(text)
self.linkMessageLabel.setWordWrap(True) self.linkMessageLabel.setWordWrap(True)
self.verticalLayout.addWidget(self.linkMessageLabel) self.verticalLayout.addWidget(self.linkMessageLabel)
self.linkTypeRadio = RadioBox(items=[tr("Symlink"), tr("Hardlink")], spread=False) self.linkTypeRadio = RadioBox(
items=[tr("Symlink"), tr("Hardlink")], spread=False
)
self.verticalLayout.addWidget(self.linkTypeRadio) self.verticalLayout.addWidget(self.linkTypeRadio)
if not self.model.supports_links(): if not self.model.supports_links():
self.linkCheckbox.setEnabled(False) self.linkCheckbox.setEnabled(False)
@ -60,11 +63,11 @@ class DeletionOptions(QDialog):
self.buttonBox.addButton(tr("Cancel"), QDialogButtonBox.RejectRole) self.buttonBox.addButton(tr("Cancel"), QDialogButtonBox.RejectRole)
self.verticalLayout.addWidget(self.buttonBox) self.verticalLayout.addWidget(self.buttonBox)
#--- Signals # --- Signals
def linkCheckboxChanged(self, changed: int): def linkCheckboxChanged(self, changed: int):
self.model.link_deleted = bool(changed) self.model.link_deleted = bool(changed)
#--- model --> view # --- model --> view
def update_msg(self, msg: str): def update_msg(self, msg: str):
self.msgLabel.setText(msg) self.msgLabel.setText(msg)
@ -80,4 +83,3 @@ class DeletionOptions(QDialog):
def set_hardlink_option_enabled(self, is_enabled: bool): def set_hardlink_option_enabled(self, is_enabled: bool):
self.linkTypeRadio.setEnabled(is_enabled) self.linkTypeRadio.setEnabled(is_enabled)

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2010-02-05 # Created On: 2010-02-05
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt from PyQt5.QtCore import Qt
@ -11,6 +11,7 @@ from PyQt5.QtWidgets import QDialog
from .details_table import DetailsModel from .details_table import DetailsModel
class DetailsDialog(QDialog): class DetailsDialog(QDialog):
def __init__(self, parent, app, **kwargs): def __init__(self, parent, app, **kwargs):
super().__init__(parent, Qt.Tool, **kwargs) super().__init__(parent, Qt.Tool, **kwargs)
@ -20,28 +21,27 @@ class DetailsDialog(QDialog):
# To avoid saving uninitialized geometry on appWillSavePrefs, we track whether our dialog # To avoid saving uninitialized geometry on appWillSavePrefs, we track whether our dialog
# has been shown. If it has, we know that our geometry should be saved. # has been shown. If it has, we know that our geometry should be saved.
self._shown_once = False self._shown_once = False
self.app.prefs.restoreGeometry('DetailsWindowRect', self) self.app.prefs.restoreGeometry("DetailsWindowRect", self)
self.tableModel = DetailsModel(self.model) self.tableModel = DetailsModel(self.model)
# tableView is defined in subclasses # tableView is defined in subclasses
self.tableView.setModel(self.tableModel) self.tableView.setModel(self.tableModel)
self.model.view = self self.model.view = self
self.app.willSavePrefs.connect(self.appWillSavePrefs) self.app.willSavePrefs.connect(self.appWillSavePrefs)
def _setupUi(self): # Virtual def _setupUi(self): # Virtual
pass pass
def show(self): def show(self):
self._shown_once = True self._shown_once = True
super().show() super().show()
#--- Events # --- Events
def appWillSavePrefs(self): def appWillSavePrefs(self):
if self._shown_once: if self._shown_once:
self.app.prefs.saveGeometry('DetailsWindowRect', self) self.app.prefs.saveGeometry("DetailsWindowRect", self)
#--- model --> view # --- model --> view
def refresh(self): def refresh(self):
self.tableModel.beginResetModel() self.tableModel.beginResetModel()
self.tableModel.endResetModel() self.tableModel.endResetModel()

View File

@ -1,9 +1,9 @@
# Created By: Virgil Dupras # Created By: Virgil Dupras
# Created On: 2009-05-17 # Created On: 2009-05-17
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# #
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at # which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html # http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt, QAbstractTableModel from PyQt5.QtCore import Qt, QAbstractTableModel
@ -11,18 +11,19 @@ from PyQt5.QtWidgets import QHeaderView, QTableView
from hscommon.trans import trget from hscommon.trans import trget
tr = trget('ui') tr = trget("ui")
HEADER = [tr("Attribute"), tr("Selected"), tr("Reference")] HEADER = [tr("Attribute"), tr("Selected"), tr("Reference")]
class DetailsModel(QAbstractTableModel): class DetailsModel(QAbstractTableModel):
def __init__(self, model, **kwargs): def __init__(self, model, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.model = model self.model = model
def columnCount(self, parent): def columnCount(self, parent):
return len(HEADER) return len(HEADER)
def data(self, index, role): def data(self, index, role):
if not index.isValid(): if not index.isValid():
return None return None
@ -31,15 +32,19 @@ class DetailsModel(QAbstractTableModel):
column = index.column() column = index.column()
row = index.row() row = index.row()
return self.model.row(row)[column] return self.model.row(row)[column]
def headerData(self, section, orientation, role): def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole and section < len(HEADER): if (
orientation == Qt.Horizontal
and role == Qt.DisplayRole
and section < len(HEADER)
):
return HEADER[section] return HEADER[section]
return None return None
def rowCount(self, parent): def rowCount(self, parent):
return self.model.row_count() return self.model.row_count()
class DetailsTable(QTableView): class DetailsTable(QTableView):
def __init__(self, *args): def __init__(self, *args):
@ -47,7 +52,7 @@ class DetailsTable(QTableView):
self.setAlternatingRowColors(True) self.setAlternatingRowColors(True)
self.setSelectionBehavior(QTableView.SelectRows) self.setSelectionBehavior(QTableView.SelectRows)
self.setShowGrid(False) self.setShowGrid(False)
def setModel(self, model): def setModel(self, model):
QTableView.setModel(self, model) QTableView.setModel(self, model)
# The model needs to be set to set header stuff # The model needs to be set to set header stuff
@ -61,4 +66,3 @@ class DetailsTable(QTableView):
vheader = self.verticalHeader() vheader = self.verticalHeader()
vheader.setVisible(False) vheader.setVisible(False)
vheader.setDefaultSectionSize(18) vheader.setDefaultSectionSize(18)

Some files were not shown because too many files have changed in this diff Show More