1
0
镜像来自 https://github.com/arsenetar/dupeguru.git synced 2025-07-12 01:33:20 +00:00

Fix stripping (japanese) unicode characters

* Accents are getting removed from Unicode characters to generate similar "words".
* Non-latin characters which cannot be processed that way (eg. japanese, greek, russian, etc.) should not be filtered out at all otherwise files are erroneously skipped or detected as dupes if only some characters make it passed the filter.
* Starting from an arbitrary unicode codepoint (converted to decimal), above which we know it is pointless to try any sort of processing, we leave the characters as is.
* Fix #878.
This commit is contained in:
glubsy 2021-04-29 05:08:43 +02:00
父節點 0840104edf
當前提交 c4dcfd3d4b
共有 2 個文件被更改,包括 17 次插入2 次删除

查看文件

@ -26,8 +26,19 @@ def getwords(s):
# We decompose the string so that ascii letters with accents can be part of the word.
s = normalize("NFD", s)
s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", " ").lower()
# logging.debug(f"DEBUG chars for: {s}\n"
# f"{[c for c in s if ord(c) != 32]}\n"
# f"{[ord(c) for c in s if ord(c) != 32]}")
# HACK We shouldn't ignore non-ascii characters altogether. Any Unicode char
# above common european characters that cannot be "sanitized" (ie. stripped
# of their accents, etc.) are preserved as is. The arbitrary limit is
# obtained from this one: ord("\u037e") GREEK QUESTION MARK
s = "".join(
c for c in s if c in string.ascii_letters + string.digits + string.whitespace
c for c in s
if (ord(c) < 894
and c in string.ascii_letters + string.digits + string.whitespace
)
or ord(c) > 894
)
return [_f for _f in s.split(" ") if _f] # remove empty elements

查看文件

@ -69,6 +69,10 @@ class TestCasegetwords:
eq_(["a", "b", "c", "d"], getwords("a b c d"))
eq_(["a", "b", "c", "d"], getwords(" a b c d "))
def test_unicode(self):
eq_(["e", "c", "0", "a", "o", "u", "e", "u"], getwords("é ç 0 à ö û è ¤ ù"))
eq_(["02", "君のこころは輝いてるかい?", "国木田花丸", "solo", "ver"], getwords("02 君のこころは輝いてるかい? 国木田花丸 Solo Ver"))
def test_splitter_chars(self):
eq_(
[chr(i) for i in range(ord("a"), ord("z") + 1)],
@ -85,7 +89,7 @@ class TestCasegetwords:
eq_(["foo", "bar"], getwords("FOO BAR"))
def test_decompose_unicode(self):
eq_(getwords("foo\xe9bar"), ["fooebar"])
eq_(["fooebar"], getwords("foo\xe9bar"))
class TestCasegetfields: