2016-05-25 02:53:03 +00:00
|
|
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
2014-10-05 20:31:16 +00:00
|
|
|
#
|
2015-01-03 21:33:16 +00:00
|
|
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
2014-10-05 20:31:16 +00:00
|
|
|
# which should be included with this package. The terms are also available at
|
2015-01-03 21:33:16 +00:00
|
|
|
# http://www.gnu.org/licenses/gpl-3.0.html
|
2010-08-11 14:39:06 +00:00
|
|
|
|
|
|
|
import logging
|
2010-08-14 17:32:09 +00:00
|
|
|
import re
|
2012-02-21 16:14:12 +00:00
|
|
|
import os.path as op
|
2016-05-25 02:53:03 +00:00
|
|
|
from collections import namedtuple
|
2010-08-11 14:39:06 +00:00
|
|
|
|
2014-10-05 20:31:16 +00:00
|
|
|
from hscommon.jobprogress import job
|
2011-01-11 12:36:05 +00:00
|
|
|
from hscommon.util import dedupe, rem_file_ext, get_file_ext
|
2011-01-18 16:33:33 +00:00
|
|
|
from hscommon.trans import tr
|
2010-08-11 14:39:06 +00:00
|
|
|
|
|
|
|
from . import engine
|
|
|
|
|
2011-04-21 15:17:19 +00:00
|
|
|
# It's quite ugly to have scan types from all editions all put in the same class, but because there's
|
|
|
|
# there will be some nasty bugs popping up (ScanType is used in core when in should exclusively be
|
|
|
|
# used in core_*). One day I'll clean this up.
|
|
|
|
|
2020-01-01 02:16:27 +00:00
|
|
|
|
2010-08-14 17:52:23 +00:00
|
|
|
class ScanType:
|
2021-08-21 23:02:02 +00:00
|
|
|
FILENAME = 0
|
|
|
|
FIELDS = 1
|
|
|
|
FIELDSNOORDER = 2
|
|
|
|
TAG = 3
|
|
|
|
FOLDERS = 4
|
|
|
|
CONTENTS = 5
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2020-01-01 02:16:27 +00:00
|
|
|
# PE
|
2021-08-21 23:02:02 +00:00
|
|
|
FUZZYBLOCK = 10
|
|
|
|
EXIFTIMESTAMP = 11
|
2010-08-11 14:39:06 +00:00
|
|
|
|
2016-05-25 02:53:03 +00:00
|
|
|
|
2020-01-01 02:16:27 +00:00
|
|
|
ScanOption = namedtuple("ScanOption", "scan_type label")
|
|
|
|
|
|
|
|
SCANNABLE_TAGS = ["track", "artist", "album", "title", "genre", "year"]
|
|
|
|
|
|
|
|
RE_DIGIT_ENDING = re.compile(r"\d+|\(\d+\)|\[\d+\]|{\d+}")
|
2010-08-11 14:39:06 +00:00
|
|
|
|
2010-08-14 17:32:09 +00:00
|
|
|
|
|
|
|
def is_same_with_digit(name, refname):
|
|
|
|
# Returns True if name is the same as refname, but with digits (with brackets or not) at the end
|
|
|
|
if not name.startswith(refname):
|
|
|
|
return False
|
2020-01-01 02:16:27 +00:00
|
|
|
end = name[len(refname) :].strip()
|
2010-08-14 17:32:09 +00:00
|
|
|
return RE_DIGIT_ENDING.match(end) is not None
|
|
|
|
|
2020-01-01 02:16:27 +00:00
|
|
|
|
2012-02-21 16:14:12 +00:00
|
|
|
def remove_dupe_paths(files):
|
|
|
|
# Returns files with duplicates-by-path removed. Files with the exact same path are considered
|
|
|
|
# duplicates and only the first file to have a path is kept. In certain cases, we have files
|
|
|
|
# that have the same path, but not with the same case, that's why we normalize. However, we also
|
|
|
|
# have case-sensitive filesystems, and in those, we don't want to falsely remove duplicates,
|
|
|
|
# that's why we have a `samefile` mechanism.
|
|
|
|
result = []
|
|
|
|
path2file = {}
|
|
|
|
for f in files:
|
|
|
|
normalized = str(f.path).lower()
|
|
|
|
if normalized in path2file:
|
|
|
|
try:
|
|
|
|
if op.samefile(normalized, str(path2file[normalized].path)):
|
2020-01-01 02:16:27 +00:00
|
|
|
continue # same file, it's a dupe
|
2012-02-21 16:14:12 +00:00
|
|
|
else:
|
2020-01-01 02:16:27 +00:00
|
|
|
pass # We don't treat them as dupes
|
2012-02-21 16:14:12 +00:00
|
|
|
except OSError:
|
2020-01-01 02:16:27 +00:00
|
|
|
continue # File doesn't exist? Well, treat them as dupes
|
2012-02-21 16:14:12 +00:00
|
|
|
else:
|
|
|
|
path2file[normalized] = f
|
|
|
|
result.append(f)
|
|
|
|
return result
|
|
|
|
|
2020-01-01 02:16:27 +00:00
|
|
|
|
2011-01-11 10:59:53 +00:00
|
|
|
class Scanner:
|
2010-08-11 14:39:06 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.discarded_file_count = 0
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2010-08-11 14:39:06 +00:00
|
|
|
def _getmatches(self, files, j):
|
2021-08-27 10:35:54 +00:00
|
|
|
if (
|
|
|
|
self.size_threshold
|
|
|
|
or self.large_size_threshold
|
|
|
|
or self.scan_type
|
|
|
|
in {
|
|
|
|
ScanType.CONTENTS,
|
|
|
|
ScanType.FOLDERS,
|
|
|
|
}
|
|
|
|
):
|
2010-08-11 14:39:06 +00:00
|
|
|
j = j.start_subjob([2, 8])
|
2011-01-18 16:33:33 +00:00
|
|
|
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
|
2020-01-01 02:16:27 +00:00
|
|
|
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
|
2016-06-08 16:06:08 +00:00
|
|
|
if self.size_threshold:
|
|
|
|
files = [f for f in files if f.size >= self.size_threshold]
|
2021-08-27 10:35:54 +00:00
|
|
|
if self.large_size_threshold:
|
|
|
|
files = [f for f in files if f.size <= self.large_size_threshold]
|
2021-08-21 23:02:02 +00:00
|
|
|
if self.scan_type in {ScanType.CONTENTS, ScanType.FOLDERS}:
|
2021-08-15 09:10:18 +00:00
|
|
|
return engine.getmatches_by_contents(files, bigsize=self.big_file_size_threshold, j=j)
|
2010-08-11 14:39:06 +00:00
|
|
|
else:
|
|
|
|
j = j.start_subjob([2, 8])
|
|
|
|
kw = {}
|
2020-01-01 02:16:27 +00:00
|
|
|
kw["match_similar_words"] = self.match_similar_words
|
|
|
|
kw["weight_words"] = self.word_weighting
|
|
|
|
kw["min_match_percentage"] = self.min_match_percentage
|
2021-08-21 23:02:02 +00:00
|
|
|
if self.scan_type == ScanType.FIELDSNOORDER:
|
|
|
|
self.scan_type = ScanType.FIELDS
|
2020-01-01 02:16:27 +00:00
|
|
|
kw["no_field_order"] = True
|
2010-08-11 14:39:06 +00:00
|
|
|
func = {
|
2021-08-21 23:02:02 +00:00
|
|
|
ScanType.FILENAME: lambda f: engine.getwords(rem_file_ext(f.name)),
|
|
|
|
ScanType.FIELDS: lambda f: engine.getfields(rem_file_ext(f.name)),
|
|
|
|
ScanType.TAG: lambda f: [
|
2014-10-13 19:08:59 +00:00
|
|
|
engine.getwords(str(getattr(f, attrname)))
|
|
|
|
for attrname in SCANNABLE_TAGS
|
|
|
|
if attrname in self.scanned_tags
|
|
|
|
],
|
2010-08-11 14:39:06 +00:00
|
|
|
}[self.scan_type]
|
2011-01-18 16:33:33 +00:00
|
|
|
for f in j.iter_with_progress(files, tr("Read metadata of %d/%d files")):
|
2016-06-08 16:06:08 +00:00
|
|
|
logging.debug("Reading metadata of %s", f.path)
|
2010-08-11 14:39:06 +00:00
|
|
|
f.words = func(f)
|
|
|
|
return engine.getmatches(files, j=j, **kw)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2010-08-11 14:39:06 +00:00
|
|
|
@staticmethod
|
|
|
|
def _key_func(dupe):
|
2011-09-23 17:14:57 +00:00
|
|
|
return -dupe.size
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2010-08-11 14:39:06 +00:00
|
|
|
@staticmethod
|
|
|
|
def _tie_breaker(ref, dupe):
|
|
|
|
refname = rem_file_ext(ref.name).lower()
|
|
|
|
dupename = rem_file_ext(dupe.name).lower()
|
2020-01-01 02:16:27 +00:00
|
|
|
if "copy" in dupename:
|
2010-08-11 14:39:06 +00:00
|
|
|
return False
|
2020-01-01 02:16:27 +00:00
|
|
|
if "copy" in refname:
|
2010-08-11 14:39:06 +00:00
|
|
|
return True
|
2010-08-14 17:32:09 +00:00
|
|
|
if is_same_with_digit(dupename, refname):
|
2010-08-11 14:39:06 +00:00
|
|
|
return False
|
2010-08-14 17:32:09 +00:00
|
|
|
if is_same_with_digit(refname, dupename):
|
2010-08-11 14:39:06 +00:00
|
|
|
return True
|
2022-03-28 04:50:03 +00:00
|
|
|
return len(dupe.path.parts) > len(ref.path.parts)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2016-05-29 20:52:07 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_scan_options():
|
2016-05-25 02:53:03 +00:00
|
|
|
"""Returns a list of scanning options for this scanner.
|
|
|
|
|
|
|
|
Returns a list of ``ScanOption``.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2016-05-29 18:13:19 +00:00
|
|
|
def get_dupe_groups(self, files, ignore_list=None, j=job.nulljob):
|
2020-01-01 02:16:27 +00:00
|
|
|
for f in (f for f in files if not hasattr(f, "is_ref")):
|
2010-08-11 14:39:06 +00:00
|
|
|
f.is_ref = False
|
2012-02-21 16:14:12 +00:00
|
|
|
files = remove_dupe_paths(files)
|
2011-04-12 11:22:29 +00:00
|
|
|
logging.info("Getting matches. Scan type: %d", self.scan_type)
|
2010-08-11 14:39:06 +00:00
|
|
|
matches = self._getmatches(files, j)
|
2020-01-01 02:16:27 +00:00
|
|
|
logging.info("Found %d matches" % len(matches))
|
2016-06-08 16:24:35 +00:00
|
|
|
j.set_progress(100, tr("Almost done! Fiddling with results..."))
|
2012-08-09 15:16:06 +00:00
|
|
|
# In removing what we call here "false matches", we first want to remove, if we scan by
|
|
|
|
# folders, we want to remove folder matches for which the parent is also in a match (they're
|
|
|
|
# "duplicated duplicates if you will). Then, we also don't want mixed file kinds if the
|
|
|
|
# option isn't enabled, we want matches for which both files exist and, lastly, we don't
|
|
|
|
# want matches with both files as ref.
|
2021-08-21 23:02:02 +00:00
|
|
|
if self.scan_type == ScanType.FOLDERS and matches:
|
2011-04-12 11:22:29 +00:00
|
|
|
allpath = {m.first.path for m in matches}
|
|
|
|
allpath |= {m.second.path for m in matches}
|
|
|
|
sortedpaths = sorted(allpath)
|
|
|
|
toremove = set()
|
|
|
|
last_parent_path = sortedpaths[0]
|
|
|
|
for p in sortedpaths[1:]:
|
2022-03-28 04:50:03 +00:00
|
|
|
if last_parent_path in p.parents:
|
2011-04-12 11:22:29 +00:00
|
|
|
toremove.add(p)
|
|
|
|
else:
|
|
|
|
last_parent_path = p
|
2021-08-15 09:10:18 +00:00
|
|
|
matches = [m for m in matches if m.first.path not in toremove or m.second.path not in toremove]
|
2010-08-11 14:39:06 +00:00
|
|
|
if not self.mix_file_kind:
|
2021-08-15 09:10:18 +00:00
|
|
|
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)]
|
|
|
|
matches = [m for m in matches if m.first.path.exists() and m.second.path.exists()]
|
2012-08-09 15:16:06 +00:00
|
|
|
matches = [m for m in matches if not (m.first.is_ref and m.second.is_ref)]
|
2016-05-29 18:13:19 +00:00
|
|
|
if ignore_list:
|
2021-08-25 06:11:24 +00:00
|
|
|
matches = [m for m in matches if not ignore_list.are_ignored(str(m.first.path), str(m.second.path))]
|
2020-01-01 02:16:27 +00:00
|
|
|
logging.info("Grouping matches")
|
2016-06-08 16:24:35 +00:00
|
|
|
groups = engine.get_groups(matches)
|
2020-01-01 02:16:27 +00:00
|
|
|
if self.scan_type in {
|
2021-08-21 23:02:02 +00:00
|
|
|
ScanType.FILENAME,
|
|
|
|
ScanType.FIELDS,
|
|
|
|
ScanType.FIELDSNOORDER,
|
|
|
|
ScanType.TAG,
|
2020-01-01 02:16:27 +00:00
|
|
|
}:
|
2021-08-15 09:10:18 +00:00
|
|
|
matched_files = dedupe([m.first for m in matches] + [m.second for m in matches])
|
2012-02-26 16:18:29 +00:00
|
|
|
self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups)
|
|
|
|
else:
|
|
|
|
# Ticket #195
|
|
|
|
# To speed up the scan, we don't bother comparing contents of files that are both ref
|
|
|
|
# files. However, this messes up "discarded" counting because there's a missing match
|
|
|
|
# in cases where we end up with a dupe group anyway (with a non-ref file). Because it's
|
|
|
|
# impossible to have discarded matches in exact dupe scans, we simply set it at 0, thus
|
|
|
|
# bypassing our tricky problem.
|
|
|
|
# Also, although ScanType.FuzzyBlock is not always doing exact comparisons, we also
|
|
|
|
# bypass ref comparison, thus messing up with our "discarded" count. So we're
|
|
|
|
# effectively disabling the "discarded" feature in PE, but it's better than falsely
|
|
|
|
# reporting discarded matches.
|
|
|
|
self.discarded_file_count = 0
|
2010-08-11 14:39:06 +00:00
|
|
|
groups = [g for g in groups if any(not f.is_ref for f in g)]
|
2020-01-01 02:16:27 +00:00
|
|
|
logging.info("Created %d groups" % len(groups))
|
2010-08-11 14:39:06 +00:00
|
|
|
for g in groups:
|
|
|
|
g.prioritize(self._key_func, self._tie_breaker)
|
|
|
|
return groups
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2014-10-13 19:08:59 +00:00
|
|
|
match_similar_words = False
|
2010-08-11 14:39:06 +00:00
|
|
|
min_match_percentage = 80
|
2014-10-13 19:08:59 +00:00
|
|
|
mix_file_kind = True
|
2021-08-21 23:02:02 +00:00
|
|
|
scan_type = ScanType.FILENAME
|
2020-01-01 02:16:27 +00:00
|
|
|
scanned_tags = {"artist", "title"}
|
2014-10-13 19:08:59 +00:00
|
|
|
size_threshold = 0
|
2021-08-27 10:35:54 +00:00
|
|
|
large_size_threshold = 0
|
2021-06-21 20:44:05 +00:00
|
|
|
big_file_size_threshold = 0
|
2014-10-13 19:08:59 +00:00
|
|
|
word_weighting = False
|