mirror of
https://github.com/arsenetar/dupeguru.git
synced 2026-01-22 06:37:17 +00:00
[#32] Internationalized the core and localized it to french.
This commit is contained in:
@@ -19,6 +19,7 @@ from hscommon.notify import Broadcaster
|
||||
from hscommon.path import Path
|
||||
from hscommon.conflict import smart_move, smart_copy
|
||||
from hscommon.util import delete_if_empty, first, escape
|
||||
from hscommon.trans import tr
|
||||
|
||||
from . import directories, results, scanner, export, fs
|
||||
|
||||
@@ -328,7 +329,7 @@ class DupeGuru(RegistrableApplication, Broadcaster):
|
||||
|
||||
def start_scanning(self):
|
||||
def do(j):
|
||||
j.set_progress(0, 'Collecting files to scan')
|
||||
j.set_progress(0, tr("Collecting files to scan"))
|
||||
files = list(self.directories.get_files())
|
||||
if self.options['ignore_hardlink_matches']:
|
||||
files = self._remove_hardlink_dupes(files)
|
||||
@@ -354,6 +355,6 @@ class DupeGuru(RegistrableApplication, Broadcaster):
|
||||
def stat_line(self):
|
||||
result = self.results.stat_line
|
||||
if self.scanner.discarded_file_count:
|
||||
result = '%s (%d discarded)' % (result, self.scanner.discarded_file_count)
|
||||
result = tr("%s (%d discarded)") % (result, self.scanner.discarded_file_count)
|
||||
return result
|
||||
|
||||
|
||||
@@ -15,15 +15,16 @@ from hscommon.cocoa import install_exception_hook
|
||||
from hscommon.cocoa.objcmin import (NSNotificationCenter, NSUserDefaults,
|
||||
NSSearchPathForDirectoriesInDomains, NSApplicationSupportDirectory, NSUserDomainMask,
|
||||
NSWorkspace)
|
||||
from hscommon.trans import tr
|
||||
|
||||
from . import app
|
||||
|
||||
JOBID2TITLE = {
|
||||
app.JOB_SCAN: "Scanning for duplicates",
|
||||
app.JOB_LOAD: "Loading",
|
||||
app.JOB_MOVE: "Moving",
|
||||
app.JOB_COPY: "Copying",
|
||||
app.JOB_DELETE: "Sending to Trash",
|
||||
app.JOB_SCAN: tr("Scanning for duplicates"),
|
||||
app.JOB_LOAD: tr("Loading"),
|
||||
app.JOB_MOVE: tr("Moving"),
|
||||
app.JOB_COPY: tr("Copying"),
|
||||
app.JOB_DELETE: tr("Sending to Trash"),
|
||||
}
|
||||
|
||||
class DupeGuru(app.DupeGuru):
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.hardcoded.net/licenses/bsd_license
|
||||
|
||||
|
||||
import difflib
|
||||
import itertools
|
||||
import logging
|
||||
@@ -15,6 +14,7 @@ from collections import defaultdict, namedtuple
|
||||
from unicodedata import normalize
|
||||
|
||||
from hscommon.util import flatten, multi_replace
|
||||
from hscommon.trans import tr
|
||||
from jobprogress import job
|
||||
|
||||
(WEIGHT_WORDS,
|
||||
@@ -25,6 +25,7 @@ JOB_REFRESH_RATE = 100
|
||||
|
||||
def getwords(s):
|
||||
if isinstance(s, str):
|
||||
# XXX is this really needed?
|
||||
s = normalize('NFD', s)
|
||||
s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", ' ').lower()
|
||||
s = ''.join(c for c in s if c in string.ascii_letters + string.digits + string.whitespace)
|
||||
@@ -175,7 +176,7 @@ def getmatches(objects, min_match_percentage=0, match_similar_words=False, weigh
|
||||
match_flags.append(MATCH_SIMILAR_WORDS)
|
||||
if no_field_order:
|
||||
match_flags.append(NO_FIELD_ORDER)
|
||||
j.start_job(len(word_dict), '0 matches found')
|
||||
j.start_job(len(word_dict), tr("0 matches found"))
|
||||
compared = defaultdict(set)
|
||||
result = []
|
||||
try:
|
||||
@@ -193,7 +194,7 @@ def getmatches(objects, min_match_percentage=0, match_similar_words=False, weigh
|
||||
result.append(m)
|
||||
if len(result) >= LIMIT:
|
||||
return result
|
||||
j.add_progress(desc='%d matches found' % len(result))
|
||||
j.add_progress(desc=tr("%d matches found") % len(result))
|
||||
except MemoryError:
|
||||
# This is the place where the memory usage is at its peak during the scan.
|
||||
# Just continue the process with an incomplete list of matches.
|
||||
@@ -205,14 +206,14 @@ def getmatches(objects, min_match_percentage=0, match_similar_words=False, weigh
|
||||
def getmatches_by_contents(files, sizeattr='size', partial=False, j=job.nulljob):
|
||||
j = j.start_subjob([2, 8])
|
||||
size2files = defaultdict(set)
|
||||
for file in j.iter_with_progress(files, 'Read size of %d/%d files'):
|
||||
for file in j.iter_with_progress(files, tr("Read size of %d/%d files")):
|
||||
filesize = getattr(file, sizeattr)
|
||||
if filesize:
|
||||
size2files[filesize].add(file)
|
||||
possible_matches = [files for files in size2files.values() if len(files) > 1]
|
||||
del size2files
|
||||
result = []
|
||||
j.start_job(len(possible_matches), '0 matches found')
|
||||
j.start_job(len(possible_matches), tr("0 matches found"))
|
||||
for group in possible_matches:
|
||||
for first, second in itertools.combinations(group, 2):
|
||||
if first.is_ref and second.is_ref:
|
||||
@@ -220,7 +221,7 @@ def getmatches_by_contents(files, sizeattr='size', partial=False, j=job.nulljob)
|
||||
if first.md5partial == second.md5partial:
|
||||
if partial or first.md5 == second.md5:
|
||||
result.append(Match(first, second, 100))
|
||||
j.add_progress(desc='%d matches found' % len(result))
|
||||
j.add_progress(desc=tr("%d matches found") % len(result))
|
||||
return result
|
||||
|
||||
class Group(object):
|
||||
@@ -349,7 +350,7 @@ def get_groups(matches, j=job.nulljob):
|
||||
dupe2group = {}
|
||||
groups = []
|
||||
try:
|
||||
for match in j.iter_with_progress(matches, 'Grouped %d/%d matches', JOB_REFRESH_RATE):
|
||||
for match in j.iter_with_progress(matches, tr("Grouped %d/%d matches"), JOB_REFRESH_RATE):
|
||||
first, second, _ = match
|
||||
first_group = dupe2group.get(first)
|
||||
second_group = dupe2group.get(second)
|
||||
|
||||
@@ -14,6 +14,7 @@ from . import engine
|
||||
from jobprogress.job import nulljob
|
||||
from hscommon.markable import Markable
|
||||
from hscommon.util import flatten, nonone, FileOrPath, format_size
|
||||
from hscommon.trans import tr
|
||||
|
||||
class Results(Markable):
|
||||
#---Override
|
||||
@@ -87,14 +88,14 @@ class Results(Markable):
|
||||
total_size = sum(dupe.size for dupe in self.__filtered_dupes if self.is_markable(dupe))
|
||||
if self.mark_inverted:
|
||||
marked_size = self.__total_size - marked_size
|
||||
result = '%d / %d (%s / %s) duplicates marked.' % (
|
||||
result = tr("%d / %d (%s / %s) duplicates marked.") % (
|
||||
mark_count,
|
||||
total_count,
|
||||
format_size(marked_size, 2),
|
||||
format_size(total_size, 2),
|
||||
)
|
||||
if self.__filters:
|
||||
result += ' filter: %s' % ' --> '.join(self.__filters)
|
||||
result += tr(" filter: %s") % ' --> '.join(self.__filters)
|
||||
return result
|
||||
|
||||
def __recalculate_stats(self):
|
||||
|
||||
@@ -12,6 +12,7 @@ import re
|
||||
from jobprogress import job
|
||||
from hscommon import io
|
||||
from hscommon.util import dedupe, rem_file_ext, get_file_ext
|
||||
from hscommon.trans import tr
|
||||
|
||||
from . import engine
|
||||
from .ignore import IgnoreList
|
||||
@@ -44,7 +45,7 @@ class Scanner:
|
||||
def _getmatches(self, files, j):
|
||||
if self.size_threshold:
|
||||
j = j.start_subjob([2, 8])
|
||||
for f in j.iter_with_progress(files, 'Read size of %d/%d files'):
|
||||
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
|
||||
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
|
||||
files = [f for f in files if f.size >= self.size_threshold]
|
||||
if self.scan_type in (ScanType.Contents, ScanType.ContentsAudio):
|
||||
@@ -64,7 +65,7 @@ class Scanner:
|
||||
ScanType.Fields: lambda f: engine.getfields(rem_file_ext(f.name)),
|
||||
ScanType.Tag: lambda f: [engine.getwords(str(getattr(f, attrname))) for attrname in SCANNABLE_TAGS if attrname in self.scanned_tags],
|
||||
}[self.scan_type]
|
||||
for f in j.iter_with_progress(files, 'Read metadata of %d/%d files'):
|
||||
for f in j.iter_with_progress(files, tr("Read metadata of %d/%d files")):
|
||||
f.words = func(f)
|
||||
return engine.getmatches(files, j=j, **kw)
|
||||
|
||||
@@ -93,13 +94,13 @@ class Scanner:
|
||||
logging.info('Getting matches')
|
||||
matches = self._getmatches(files, j)
|
||||
logging.info('Found %d matches' % len(matches))
|
||||
j.set_progress(100, 'Removing false matches')
|
||||
j.set_progress(100, tr("Removing false matches"))
|
||||
if not self.mix_file_kind:
|
||||
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)]
|
||||
matches = [m for m in matches if io.exists(m.first.path) and io.exists(m.second.path)]
|
||||
if self.ignore_list:
|
||||
j = j.start_subjob(2)
|
||||
iter_matches = j.iter_with_progress(matches, 'Processed %d/%d matches against the ignore list')
|
||||
iter_matches = j.iter_with_progress(matches, tr("Processed %d/%d matches against the ignore list"))
|
||||
matches = [m for m in iter_matches
|
||||
if not self.ignore_list.AreIgnored(str(m.first.path), str(m.second.path))]
|
||||
logging.info('Grouping matches')
|
||||
@@ -108,7 +109,7 @@ class Scanner:
|
||||
self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups)
|
||||
groups = [g for g in groups if any(not f.is_ref for f in g)]
|
||||
logging.info('Created %d groups' % len(groups))
|
||||
j.set_progress(100, 'Doing group prioritization')
|
||||
j.set_progress(100, tr("Doing group prioritization"))
|
||||
for g in groups:
|
||||
g.prioritize(self._key_func, self._tie_breaker)
|
||||
return groups
|
||||
|
||||
Reference in New Issue
Block a user