2010-08-11 14:39:06 +00:00
|
|
|
# Created By: Virgil Dupras
|
|
|
|
# Created On: 2006/03/03
|
2011-04-12 08:04:01 +00:00
|
|
|
# Copyright 2011 Hardcoded Software (http://www.hardcoded.net)
|
2010-08-11 14:39:06 +00:00
|
|
|
#
|
2010-09-30 10:17:41 +00:00
|
|
|
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
|
2010-08-11 14:39:06 +00:00
|
|
|
# which should be included with this package. The terms are also available at
|
2010-09-30 10:17:41 +00:00
|
|
|
# http://www.hardcoded.net/licenses/bsd_license
|
2010-08-11 14:39:06 +00:00
|
|
|
|
|
|
|
import logging
|
2010-08-14 17:32:09 +00:00
|
|
|
import re
|
2010-08-11 14:39:06 +00:00
|
|
|
|
2010-11-20 11:42:15 +00:00
|
|
|
from jobprogress import job
|
2011-01-11 10:59:53 +00:00
|
|
|
from hscommon import io
|
2011-01-11 12:36:05 +00:00
|
|
|
from hscommon.util import dedupe, rem_file_ext, get_file_ext
|
2011-01-18 16:33:33 +00:00
|
|
|
from hscommon.trans import tr
|
2010-08-11 14:39:06 +00:00
|
|
|
|
|
|
|
from . import engine
|
|
|
|
from .ignore import IgnoreList
|
|
|
|
|
2010-08-14 17:52:23 +00:00
|
|
|
class ScanType:
|
|
|
|
Filename = 0
|
|
|
|
Fields = 1
|
|
|
|
FieldsNoOrder = 2
|
|
|
|
Tag = 3
|
2011-04-12 11:22:29 +00:00
|
|
|
Folders = 4
|
2010-08-14 17:52:23 +00:00
|
|
|
Contents = 5
|
|
|
|
ContentsAudio = 6
|
2010-08-11 14:39:06 +00:00
|
|
|
|
|
|
|
SCANNABLE_TAGS = ['track', 'artist', 'album', 'title', 'genre', 'year']
|
|
|
|
|
2010-08-14 17:32:09 +00:00
|
|
|
RE_DIGIT_ENDING = re.compile(r'\d+|\(\d+\)|\[\d+\]|{\d+}')
|
|
|
|
|
|
|
|
def is_same_with_digit(name, refname):
|
|
|
|
# Returns True if name is the same as refname, but with digits (with brackets or not) at the end
|
|
|
|
if not name.startswith(refname):
|
|
|
|
return False
|
|
|
|
end = name[len(refname):].strip()
|
|
|
|
return RE_DIGIT_ENDING.match(end) is not None
|
|
|
|
|
2011-01-11 10:59:53 +00:00
|
|
|
class Scanner:
|
2010-08-11 14:39:06 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.ignore_list = IgnoreList()
|
|
|
|
self.discarded_file_count = 0
|
|
|
|
|
|
|
|
def _getmatches(self, files, j):
|
|
|
|
if self.size_threshold:
|
|
|
|
j = j.start_subjob([2, 8])
|
2011-01-18 16:33:33 +00:00
|
|
|
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
|
2010-08-11 14:39:06 +00:00
|
|
|
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
|
|
|
|
files = [f for f in files if f.size >= self.size_threshold]
|
2011-04-12 11:22:29 +00:00
|
|
|
if self.scan_type in {ScanType.Contents, ScanType.ContentsAudio, ScanType.Folders}:
|
|
|
|
sizeattr = 'audiosize' if self.scan_type == ScanType.ContentsAudio else 'size'
|
2010-08-14 17:52:23 +00:00
|
|
|
return engine.getmatches_by_contents(files, sizeattr, partial=self.scan_type==ScanType.ContentsAudio, j=j)
|
2010-08-11 14:39:06 +00:00
|
|
|
else:
|
|
|
|
j = j.start_subjob([2, 8])
|
|
|
|
kw = {}
|
|
|
|
kw['match_similar_words'] = self.match_similar_words
|
|
|
|
kw['weight_words'] = self.word_weighting
|
|
|
|
kw['min_match_percentage'] = self.min_match_percentage
|
2010-08-14 17:52:23 +00:00
|
|
|
if self.scan_type == ScanType.FieldsNoOrder:
|
|
|
|
self.scan_type = ScanType.Fields
|
2010-08-11 14:39:06 +00:00
|
|
|
kw['no_field_order'] = True
|
|
|
|
func = {
|
2010-08-14 17:52:23 +00:00
|
|
|
ScanType.Filename: lambda f: engine.getwords(rem_file_ext(f.name)),
|
|
|
|
ScanType.Fields: lambda f: engine.getfields(rem_file_ext(f.name)),
|
|
|
|
ScanType.Tag: lambda f: [engine.getwords(str(getattr(f, attrname))) for attrname in SCANNABLE_TAGS if attrname in self.scanned_tags],
|
2010-08-11 14:39:06 +00:00
|
|
|
}[self.scan_type]
|
2011-01-18 16:33:33 +00:00
|
|
|
for f in j.iter_with_progress(files, tr("Read metadata of %d/%d files")):
|
2011-01-26 11:50:44 +00:00
|
|
|
logging.debug("Reading metadata of {}".format(str(f.path)))
|
2010-08-11 14:39:06 +00:00
|
|
|
f.words = func(f)
|
|
|
|
return engine.getmatches(files, j=j, **kw)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _key_func(dupe):
|
|
|
|
return (not dupe.is_ref, -dupe.size)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _tie_breaker(ref, dupe):
|
|
|
|
refname = rem_file_ext(ref.name).lower()
|
|
|
|
dupename = rem_file_ext(dupe.name).lower()
|
|
|
|
if 'copy' in dupename:
|
|
|
|
return False
|
|
|
|
if 'copy' in refname:
|
|
|
|
return True
|
2010-08-14 17:32:09 +00:00
|
|
|
if is_same_with_digit(dupename, refname):
|
2010-08-11 14:39:06 +00:00
|
|
|
return False
|
2010-08-14 17:32:09 +00:00
|
|
|
if is_same_with_digit(refname, dupename):
|
2010-08-11 14:39:06 +00:00
|
|
|
return True
|
|
|
|
return len(dupe.path) > len(ref.path)
|
|
|
|
|
|
|
|
def GetDupeGroups(self, files, j=job.nulljob):
|
|
|
|
j = j.start_subjob([8, 2])
|
|
|
|
for f in [f for f in files if not hasattr(f, 'is_ref')]:
|
|
|
|
f.is_ref = False
|
2011-04-12 11:22:29 +00:00
|
|
|
logging.info("Getting matches. Scan type: %d", self.scan_type)
|
2010-08-11 14:39:06 +00:00
|
|
|
matches = self._getmatches(files, j)
|
|
|
|
logging.info('Found %d matches' % len(matches))
|
2011-01-18 16:33:33 +00:00
|
|
|
j.set_progress(100, tr("Removing false matches"))
|
2011-04-12 11:22:29 +00:00
|
|
|
if self.scan_type == ScanType.Folders and matches:
|
|
|
|
allpath = {m.first.path for m in matches}
|
|
|
|
allpath |= {m.second.path for m in matches}
|
|
|
|
sortedpaths = sorted(allpath)
|
|
|
|
toremove = set()
|
|
|
|
last_parent_path = sortedpaths[0]
|
|
|
|
for p in sortedpaths[1:]:
|
|
|
|
if p in last_parent_path:
|
|
|
|
toremove.add(p)
|
|
|
|
else:
|
|
|
|
last_parent_path = p
|
|
|
|
matches = [m for m in matches if m.first.path not in toremove or m.second.path not in toremove]
|
2010-08-11 14:39:06 +00:00
|
|
|
if not self.mix_file_kind:
|
|
|
|
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)]
|
|
|
|
matches = [m for m in matches if io.exists(m.first.path) and io.exists(m.second.path)]
|
|
|
|
if self.ignore_list:
|
|
|
|
j = j.start_subjob(2)
|
2011-01-18 16:33:33 +00:00
|
|
|
iter_matches = j.iter_with_progress(matches, tr("Processed %d/%d matches against the ignore list"))
|
2010-08-11 14:39:06 +00:00
|
|
|
matches = [m for m in iter_matches
|
|
|
|
if not self.ignore_list.AreIgnored(str(m.first.path), str(m.second.path))]
|
|
|
|
logging.info('Grouping matches')
|
|
|
|
groups = engine.get_groups(matches, j)
|
|
|
|
matched_files = dedupe([m.first for m in matches] + [m.second for m in matches])
|
|
|
|
self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups)
|
|
|
|
groups = [g for g in groups if any(not f.is_ref for f in g)]
|
|
|
|
logging.info('Created %d groups' % len(groups))
|
2011-01-18 16:33:33 +00:00
|
|
|
j.set_progress(100, tr("Doing group prioritization"))
|
2010-08-11 14:39:06 +00:00
|
|
|
for g in groups:
|
|
|
|
g.prioritize(self._key_func, self._tie_breaker)
|
|
|
|
return groups
|
|
|
|
|
|
|
|
match_similar_words = False
|
|
|
|
min_match_percentage = 80
|
|
|
|
mix_file_kind = True
|
2010-08-14 17:52:23 +00:00
|
|
|
scan_type = ScanType.Filename
|
2010-08-11 14:39:06 +00:00
|
|
|
scanned_tags = set(['artist', 'title'])
|
|
|
|
size_threshold = 0
|
|
|
|
word_weighting = False
|