2016-05-29 19:02:39 +00:00
|
|
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
2014-10-05 20:31:16 +00:00
|
|
|
#
|
2015-01-03 21:33:16 +00:00
|
|
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
2014-10-05 20:31:16 +00:00
|
|
|
# which should be included with this package. The terms are also available at
|
2015-01-03 21:33:16 +00:00
|
|
|
# http://www.gnu.org/licenses/gpl-3.0.html
|
2009-06-07 14:26:46 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
import sys
|
|
|
|
|
2014-10-05 20:31:16 +00:00
|
|
|
from hscommon.jobprogress import job
|
2011-01-11 12:36:05 +00:00
|
|
|
from hscommon.util import first
|
|
|
|
from hscommon.testutil import eq_, log_calls
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-09-07 19:46:41 +00:00
|
|
|
from .base import NamedObject
|
2010-06-07 13:41:59 +00:00
|
|
|
from .. import engine
|
2016-05-29 19:02:39 +00:00
|
|
|
from ..engine import (
|
|
|
|
get_match, getwords, Group, getfields, unpack_fields, compare_fields, compare, WEIGHT_WORDS,
|
|
|
|
MATCH_SIMILAR_WORDS, NO_FIELD_ORDER, build_word_dict, get_groups, getmatches, Match,
|
|
|
|
getmatches_by_contents, merge_similar_words, reduce_common_words
|
|
|
|
)
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2009-10-30 11:09:04 +00:00
|
|
|
no = NamedObject
|
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def get_match_triangle():
|
|
|
|
o1 = NamedObject(with_words=True)
|
|
|
|
o2 = NamedObject(with_words=True)
|
|
|
|
o3 = NamedObject(with_words=True)
|
2016-05-29 19:02:39 +00:00
|
|
|
m1 = get_match(o1, o2)
|
|
|
|
m2 = get_match(o1, o3)
|
|
|
|
m3 = get_match(o2, o3)
|
2009-06-01 09:55:11 +00:00
|
|
|
return [m1, m2, m3]
|
|
|
|
|
|
|
|
def get_test_group():
|
|
|
|
m1, m2, m3 = get_match_triangle()
|
|
|
|
result = Group()
|
|
|
|
result.add_match(m1)
|
|
|
|
result.add_match(m2)
|
|
|
|
result.add_match(m3)
|
|
|
|
return result
|
|
|
|
|
2010-06-07 13:41:59 +00:00
|
|
|
def assert_match(m, name1, name2):
|
|
|
|
# When testing matches, whether objects are in first or second position very often doesn't
|
|
|
|
# matter. This function makes this test more convenient.
|
|
|
|
if m.first.name == name1:
|
|
|
|
eq_(m.second.name, name2)
|
|
|
|
else:
|
|
|
|
eq_(m.first.name, name2)
|
|
|
|
eq_(m.second.name, name1)
|
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCasegetwords:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_spaces(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(['a', 'b', 'c', 'd'], getwords("a b c d"))
|
|
|
|
eq_(['a', 'b', 'c', 'd'], getwords(" a b c d "))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_splitter_chars(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(
|
2016-05-29 19:02:39 +00:00
|
|
|
[chr(i) for i in range(ord('a'), ord('z')+1)],
|
2009-06-01 09:55:11 +00:00
|
|
|
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z")
|
|
|
|
)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_joiner_chars(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(["aec"], getwords("a'e\u0301c"))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_([], getwords(''))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_returns_lowercase(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(['foo', 'bar'], getwords('FOO BAR'))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_decompose_unicode(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(getwords('foo\xe9bar'), ['fooebar'])
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCasegetfields:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_simple(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_([['a', 'b'], ['c', 'd', 'e']], getfields('a b - c d e'))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_([], getfields(''))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_cleans_empty_fields(self):
|
|
|
|
expected = [['a', 'bc', 'def']]
|
|
|
|
actual = getfields(' - a bc def')
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(expected, actual)
|
2009-06-01 09:55:11 +00:00
|
|
|
expected = [['bc', 'def']]
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseunpack_fields:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_with_fields(self):
|
|
|
|
expected = ['a', 'b', 'c', 'd', 'e', 'f']
|
|
|
|
actual = unpack_fields([['a'], ['b', 'c'], ['d', 'e', 'f']])
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(expected, actual)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_without_fields(self):
|
|
|
|
expected = ['a', 'b', 'c', 'd', 'e', 'f']
|
|
|
|
actual = unpack_fields(['a', 'b', 'c', 'd', 'e', 'f'])
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(expected, actual)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_([], unpack_fields([]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseWordCompare:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_list(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(100, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']))
|
|
|
|
eq_(86, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_unordered(self):
|
|
|
|
#Sometimes, users don't want fuzzy matching too much When they set the slider
|
|
|
|
#to 100, they don't expect a filename with the same words, but not the same order, to match.
|
|
|
|
#Thus, we want to return 99 in that case.
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(99, compare(['a', 'b', 'c', 'd'], ['d', 'b', 'c', 'a']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_word_occurs_twice(self):
|
|
|
|
#if a word occurs twice in first, but once in second, we want the word to be only counted once
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(89, compare(['a', 'b', 'c', 'd', 'a'], ['d', 'b', 'c', 'a']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_uses_copy_of_lists(self):
|
|
|
|
first = ['foo', 'bar']
|
|
|
|
second = ['bar', 'bleh']
|
|
|
|
compare(first, second)
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(['foo', 'bar'], first)
|
|
|
|
eq_(['bar', 'bleh'], second)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_word_weight(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(int((6.0 / 13.0) * 100), compare(['foo', 'bar'], ['bar', 'bleh'], (WEIGHT_WORDS, )))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_similar_words(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(100, compare(['the', 'white', 'stripes'], ['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, )))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(0, compare([], []))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_with_fields(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(67, compare([['a', 'b'], ['c', 'd', 'e']], [['a', 'b'], ['c', 'd', 'f']]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
def test_propagate_flags_with_fields(self, monkeypatch):
|
2009-06-01 09:55:11 +00:00
|
|
|
def mock_compare(first, second, flags):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_((0, 1, 2, 3, 5), flags)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
|
2009-06-01 09:55:11 +00:00
|
|
|
compare([['a']], [['a']], (0, 1, 2, 3, 5))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseWordCompareWithFields:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_simple(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(67, compare_fields([['a', 'b'], ['c', 'd', 'e']], [['a', 'b'], ['c', 'd', 'f']]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(0, compare_fields([], []))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_different_length(self):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(0, compare_fields([['a'], ['b']], [['a'], ['b'], ['c']]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
def test_propagates_flags(self, monkeypatch):
|
2009-06-01 09:55:11 +00:00
|
|
|
def mock_compare(first, second, flags):
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_((0, 1, 2, 3, 5), flags)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
|
2016-05-29 19:02:39 +00:00
|
|
|
compare_fields([['a']], [['a']], (0, 1, 2, 3, 5))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_order(self):
|
|
|
|
first = [['a', 'b'], ['c', 'd', 'e']]
|
|
|
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(0, compare_fields(first, second))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_no_order(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
first = [['a', 'b'], ['c', 'd', 'e']]
|
|
|
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
2016-05-29 19:02:39 +00:00
|
|
|
first = [['a', 'b'], ['a', 'b']] #a field can only be matched once.
|
|
|
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
2016-05-29 19:02:39 +00:00
|
|
|
first = [['a', 'b'], ['a', 'b', 'c']]
|
|
|
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_compare_fields_without_order_doesnt_alter_fields(self):
|
|
|
|
#The NO_ORDER comp type altered the fields!
|
2016-05-29 19:02:39 +00:00
|
|
|
first = [['a', 'b'], ['c', 'd', 'e']]
|
|
|
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([['a', 'b'], ['c', 'd', 'e']], first)
|
|
|
|
eq_([['c', 'd', 'f'], ['a', 'b']], second)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCasebuild_word_dict:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_with_standard_words(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject('foo bar', True)]
|
|
|
|
l.append(NamedObject('bar baz', True))
|
|
|
|
l.append(NamedObject('baz bleh foo', True))
|
2009-06-01 09:55:11 +00:00
|
|
|
d = build_word_dict(l)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(4, len(d))
|
|
|
|
eq_(2, len(d['foo']))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert l[0] in d['foo']
|
|
|
|
assert l[2] in d['foo']
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(d['bar']))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert l[0] in d['bar']
|
|
|
|
assert l[1] in d['bar']
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(d['baz']))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert l[1] in d['baz']
|
|
|
|
assert l[2] in d['baz']
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(d['bleh']))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert l[2] in d['bleh']
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_unpack_fields(self):
|
|
|
|
o = NamedObject('')
|
2016-05-29 19:02:39 +00:00
|
|
|
o.words = [['foo', 'bar'], ['baz']]
|
2009-06-01 09:55:11 +00:00
|
|
|
d = build_word_dict([o])
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(3, len(d))
|
|
|
|
eq_(1, len(d['foo']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_words_are_unaltered(self):
|
|
|
|
o = NamedObject('')
|
2016-05-29 19:02:39 +00:00
|
|
|
o.words = [['foo', 'bar'], ['baz']]
|
2011-01-05 10:11:21 +00:00
|
|
|
build_word_dict([o])
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([['foo', 'bar'], ['baz']], o.words)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_object_instances_can_only_be_once_in_words_object_list(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
o = NamedObject('foo foo', True)
|
2009-06-01 09:55:11 +00:00
|
|
|
d = build_word_dict([o])
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(d['foo']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_job(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
def do_progress(p, d=''):
|
2009-06-01 09:55:11 +00:00
|
|
|
self.log.append(p)
|
|
|
|
return True
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2016-05-29 19:02:39 +00:00
|
|
|
j = job.Job(1, do_progress)
|
2009-06-01 09:55:11 +00:00
|
|
|
self.log = []
|
|
|
|
s = "foo bar"
|
|
|
|
build_word_dict([NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j)
|
2010-03-01 11:21:43 +00:00
|
|
|
# We don't have intermediate log because iter_with_progress is called with every > 1
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(0, self.log[0])
|
|
|
|
eq_(100, self.log[1])
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCasemerge_similar_words:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_some_similar_words(self):
|
|
|
|
d = {
|
2016-05-29 19:02:39 +00:00
|
|
|
'foobar': set([1]),
|
|
|
|
'foobar1': set([2]),
|
|
|
|
'foobar2': set([3]),
|
2009-06-01 09:55:11 +00:00
|
|
|
}
|
|
|
|
merge_similar_words(d)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(d))
|
|
|
|
eq_(3, len(d['foobar']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCasereduce_common_words:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_typical(self):
|
|
|
|
d = {
|
2016-05-29 19:02:39 +00:00
|
|
|
'foo': set([NamedObject('foo bar', True) for i in range(50)]),
|
|
|
|
'bar': set([NamedObject('foo bar', True) for i in range(49)])
|
2009-06-01 09:55:11 +00:00
|
|
|
}
|
|
|
|
reduce_common_words(d, 50)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert 'foo' not in d
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(49, len(d['bar']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_dont_remove_objects_with_only_common_words(self):
|
|
|
|
d = {
|
2016-05-29 19:02:39 +00:00
|
|
|
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
|
|
|
|
'uncommon': set([NamedObject("common uncommon", True)])
|
2009-06-01 09:55:11 +00:00
|
|
|
}
|
|
|
|
reduce_common_words(d, 50)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(d['common']))
|
|
|
|
eq_(1, len(d['uncommon']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_values_still_are_set_instances(self):
|
|
|
|
d = {
|
2016-05-29 19:02:39 +00:00
|
|
|
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
|
|
|
|
'uncommon': set([NamedObject("common uncommon", True)])
|
2009-06-01 09:55:11 +00:00
|
|
|
}
|
|
|
|
reduce_common_words(d, 50)
|
2016-05-29 19:02:39 +00:00
|
|
|
assert isinstance(d['common'], set)
|
|
|
|
assert isinstance(d['uncommon'], set)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_dont_raise_KeyError_when_a_word_has_been_removed(self):
|
|
|
|
#If a word has been removed by the reduce, an object in a subsequent common word that
|
|
|
|
#contains the word that has been removed would cause a KeyError.
|
|
|
|
d = {
|
2016-05-29 19:02:39 +00:00
|
|
|
'foo': set([NamedObject('foo bar baz', True) for i in range(50)]),
|
|
|
|
'bar': set([NamedObject('foo bar baz', True) for i in range(50)]),
|
|
|
|
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
|
2009-06-01 09:55:11 +00:00
|
|
|
}
|
|
|
|
try:
|
|
|
|
reduce_common_words(d, 50)
|
|
|
|
except KeyError:
|
|
|
|
self.fail()
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_unpack_fields(self):
|
|
|
|
#object.words may be fields.
|
|
|
|
def create_it():
|
|
|
|
o = NamedObject('')
|
2016-05-29 19:02:39 +00:00
|
|
|
o.words = [['foo', 'bar'], ['baz']]
|
2009-06-01 09:55:11 +00:00
|
|
|
return o
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
d = {
|
|
|
|
'foo': set([create_it() for i in range(50)])
|
|
|
|
}
|
|
|
|
try:
|
|
|
|
reduce_common_words(d, 50)
|
|
|
|
except TypeError:
|
|
|
|
self.fail("must support fields.")
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_consider_a_reduced_common_word_common_even_after_reduction(self):
|
|
|
|
#There was a bug in the code that causeda word that has already been reduced not to
|
|
|
|
#be counted as a common word for subsequent words. For example, if 'foo' is processed
|
|
|
|
#as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
|
|
|
|
#would not stay in 'bar' because 'foo' is not a common word anymore.
|
2016-05-29 19:02:39 +00:00
|
|
|
only_common = NamedObject('foo bar', True)
|
2009-06-01 09:55:11 +00:00
|
|
|
d = {
|
2016-05-29 19:02:39 +00:00
|
|
|
'foo': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
|
|
|
|
'bar': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
|
|
|
|
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
|
2009-06-01 09:55:11 +00:00
|
|
|
}
|
|
|
|
reduce_common_words(d, 50)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(d['foo']))
|
|
|
|
eq_(1, len(d['bar']))
|
|
|
|
eq_(49, len(d['baz']))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseget_match:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_simple(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
o1 = NamedObject("foo bar", True)
|
|
|
|
o2 = NamedObject("bar bleh", True)
|
|
|
|
m = get_match(o1, o2)
|
|
|
|
eq_(50, m.percentage)
|
|
|
|
eq_(['foo', 'bar'], m.first.words)
|
|
|
|
eq_(['bar', 'bleh'], m.second.words)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert m.first is o1
|
|
|
|
assert m.second is o2
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_in(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
o1 = NamedObject("foo", True)
|
|
|
|
o2 = NamedObject("bar", True)
|
|
|
|
m = get_match(o1, o2)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o1 in m
|
|
|
|
assert o2 in m
|
|
|
|
assert object() not in m
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_word_weight(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
m = get_match(NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS, ))
|
|
|
|
eq_(m.percentage, int((6.0 / 13.0) * 100))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseGetMatches:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(getmatches([]), [])
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_simple(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(r))
|
2010-06-07 13:41:59 +00:00
|
|
|
m = first(m for m in r if m.percentage == 50) #"foo bar" and "bar bleh"
|
|
|
|
assert_match(m, 'foo bar', 'bar bleh')
|
|
|
|
m = first(m for m in r if m.percentage == 33) #"foo bar" and "a b c foo"
|
|
|
|
assert_match(m, 'foo bar', 'a b c foo')
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_null_and_unrelated_objects(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject(""), NamedObject("unrelated object")]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l)
|
2010-06-07 14:15:58 +00:00
|
|
|
eq_(len(r), 1)
|
2009-06-01 09:55:11 +00:00
|
|
|
m = r[0]
|
2010-06-07 14:15:58 +00:00
|
|
|
eq_(m.percentage, 50)
|
|
|
|
assert_match(m, 'foo bar', 'bar bleh')
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_twice_the_same_word(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo foo bar"), NamedObject("bar bleh")]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_twice_the_same_word_when_preworded(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_two_words_match(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar"), NamedObject("foo bar bleh")]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_match_files_with_only_common_words(self):
|
|
|
|
#If a word occurs more than 50 times, it is excluded from the matching process
|
|
|
|
#The problem with the common_word_threshold is that the files containing only common
|
|
|
|
#words will never be matched together. We *should* match them.
|
2009-10-18 08:46:00 +00:00
|
|
|
# This test assumes that the common word threashold const is 50
|
2009-06-01 09:55:11 +00:00
|
|
|
l = [NamedObject("foo") for i in range(50)]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1225, len(r))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_use_words_already_there_if_there(self):
|
|
|
|
o1 = NamedObject('foo')
|
|
|
|
o2 = NamedObject('bar')
|
|
|
|
o2.words = ['foo']
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(getmatches([o1, o2])))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_job(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
def do_progress(p, d=''):
|
2009-06-01 09:55:11 +00:00
|
|
|
self.log.append(p)
|
|
|
|
return True
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2016-05-29 19:02:39 +00:00
|
|
|
j = job.Job(1, do_progress)
|
2009-06-01 09:55:11 +00:00
|
|
|
self.log = []
|
|
|
|
s = "foo bar"
|
2009-10-18 08:46:00 +00:00
|
|
|
getmatches([NamedObject(s), NamedObject(s), NamedObject(s)], j=j)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert len(self.log) > 2
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(0, self.log[0])
|
|
|
|
eq_(100, self.log[-1])
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_weight_words(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
2009-10-18 08:46:00 +00:00
|
|
|
m = getmatches(l, weight_words=True)[0]
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(int((6.0 / 13.0) * 100), m.percentage)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_similar_word(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foobar"), NamedObject("foobars")]
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
|
|
|
eq_(getmatches(l, match_similar_words=True)[0].percentage, 100)
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foobar"), NamedObject("foo")]
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(len(getmatches(l, match_similar_words=True)), 0) #too far
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("bizkit"), NamedObject("bizket")]
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foobar"), NamedObject("foosbar")]
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_single_object_with_similar_words(self):
|
|
|
|
l = [NamedObject("foo foos")]
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(len(getmatches(l, match_similar_words=True)), 0)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_double_words_get_counted_only_once(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")]
|
2009-10-18 08:46:00 +00:00
|
|
|
m = getmatches(l)[0]
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(75, m.percentage)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_with_fields(self):
|
|
|
|
o1 = NamedObject("foo bar - foo bleh")
|
|
|
|
o2 = NamedObject("foo bar - bleh bar")
|
|
|
|
o1.words = getfields(o1.name)
|
|
|
|
o2.words = getfields(o2.name)
|
2009-10-18 08:46:00 +00:00
|
|
|
m = getmatches([o1, o2])[0]
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(50, m.percentage)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_with_fields_no_order(self):
|
|
|
|
o1 = NamedObject("foo bar - foo bleh")
|
|
|
|
o2 = NamedObject("bleh bang - foo bar")
|
|
|
|
o1.words = getfields(o1.name)
|
|
|
|
o2.words = getfields(o2.name)
|
2009-10-18 08:46:00 +00:00
|
|
|
m = getmatches([o1, o2], no_field_order=True)[0]
|
|
|
|
eq_(m.percentage, 50)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_only_match_similar_when_the_option_is_set(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foobar"), NamedObject("foobars")]
|
2009-10-18 08:46:00 +00:00
|
|
|
eq_(len(getmatches(l, match_similar_words=False)), 0)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_dont_recurse_do_match(self):
|
|
|
|
# with nosetests, the stack is increased. The number has to be high enough not to be failing falsely
|
2016-07-01 19:29:50 +00:00
|
|
|
sys.setrecursionlimit(200)
|
|
|
|
files = [NamedObject('foo bar') for i in range(201)]
|
2009-06-01 09:55:11 +00:00
|
|
|
try:
|
2009-10-18 08:46:00 +00:00
|
|
|
getmatches(files)
|
2009-06-01 09:55:11 +00:00
|
|
|
except RuntimeError:
|
|
|
|
self.fail()
|
|
|
|
finally:
|
|
|
|
sys.setrecursionlimit(1000)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_min_match_percentage(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(l, min_match_percentage=50)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r)) #Only "foo bar" / "bar bleh" should match
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
def test_MemoryError(self, monkeypatch):
|
2009-06-01 09:55:11 +00:00
|
|
|
@log_calls
|
|
|
|
def mocked_match(first, second, flags):
|
|
|
|
if len(mocked_match.calls) > 42:
|
|
|
|
raise MemoryError()
|
|
|
|
return Match(first, second, 0)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
objects = [NamedObject() for i in range(10)] # results in 45 matches
|
2011-01-05 10:11:21 +00:00
|
|
|
monkeypatch.setattr(engine, 'get_match', mocked_match)
|
2009-06-01 09:55:11 +00:00
|
|
|
try:
|
2009-10-18 08:46:00 +00:00
|
|
|
r = getmatches(objects)
|
2009-06-01 09:55:11 +00:00
|
|
|
except MemoryError:
|
|
|
|
self.fail('MemorryError must be handled')
|
2011-01-05 10:11:21 +00:00
|
|
|
eq_(42, len(r))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseGetMatchesByContents:
|
2009-10-30 11:09:04 +00:00
|
|
|
def test_dont_compare_empty_files(self):
|
|
|
|
o1, o2 = no(size=0), no(size=0)
|
|
|
|
assert not getmatches_by_contents([o1, o2])
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-10-30 11:09:04 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseGroup:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empy(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(None, g.ref)
|
|
|
|
eq_([], g.dupes)
|
|
|
|
eq_(0, len(g.matches))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_add_match(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
m = get_match(NamedObject("foo", True), NamedObject("bar", True))
|
2009-06-01 09:55:11 +00:00
|
|
|
g.add_match(m)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref is m.first
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([m.second], g.dupes)
|
|
|
|
eq_(1, len(g.matches))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert m in g.matches
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_multiple_add_match(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
o1 = NamedObject("a", True)
|
|
|
|
o2 = NamedObject("b", True)
|
|
|
|
o3 = NamedObject("c", True)
|
|
|
|
o4 = NamedObject("d", True)
|
|
|
|
g.add_match(get_match(o1, o2))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref is o1
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([o2], g.dupes)
|
|
|
|
eq_(1, len(g.matches))
|
|
|
|
g.add_match(get_match(o1, o3))
|
|
|
|
eq_([o2], g.dupes)
|
|
|
|
eq_(2, len(g.matches))
|
|
|
|
g.add_match(get_match(o2, o3))
|
|
|
|
eq_([o2, o3], g.dupes)
|
|
|
|
eq_(3, len(g.matches))
|
|
|
|
g.add_match(get_match(o1, o4))
|
|
|
|
eq_([o2, o3], g.dupes)
|
|
|
|
eq_(4, len(g.matches))
|
|
|
|
g.add_match(get_match(o2, o4))
|
|
|
|
eq_([o2, o3], g.dupes)
|
|
|
|
eq_(5, len(g.matches))
|
|
|
|
g.add_match(get_match(o3, o4))
|
|
|
|
eq_([o2, o3, o4], g.dupes)
|
|
|
|
eq_(6, len(g.matches))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_len(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(0, len(g))
|
|
|
|
g.add_match(get_match(NamedObject("foo", True), NamedObject("bar", True)))
|
|
|
|
eq_(2, len(g))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_add_same_match_twice(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
m = get_match(NamedObject("foo", True), NamedObject("foo", True))
|
2009-06-01 09:55:11 +00:00
|
|
|
g.add_match(m)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(g))
|
|
|
|
eq_(1, len(g.matches))
|
2009-06-01 09:55:11 +00:00
|
|
|
g.add_match(m)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(g))
|
|
|
|
eq_(1, len(g.matches))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_in(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
o1 = NamedObject("foo", True)
|
|
|
|
o2 = NamedObject("bar", True)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o1 not in g
|
2016-05-29 19:02:39 +00:00
|
|
|
g.add_match(get_match(o1, o2))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o1 in g
|
|
|
|
assert o2 in g
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_remove(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
o1 = NamedObject("foo", True)
|
|
|
|
o2 = NamedObject("bar", True)
|
|
|
|
o3 = NamedObject("bleh", True)
|
|
|
|
g.add_match(get_match(o1, o2))
|
|
|
|
g.add_match(get_match(o1, o3))
|
|
|
|
g.add_match(get_match(o2, o3))
|
|
|
|
eq_(3, len(g.matches))
|
|
|
|
eq_(3, len(g))
|
2009-06-01 09:55:11 +00:00
|
|
|
g.remove_dupe(o3)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(g.matches))
|
|
|
|
eq_(2, len(g))
|
2009-06-01 09:55:11 +00:00
|
|
|
g.remove_dupe(o1)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(0, len(g.matches))
|
|
|
|
eq_(0, len(g))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_remove_with_ref_dupes(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
o1 = NamedObject("foo", True)
|
|
|
|
o2 = NamedObject("bar", True)
|
|
|
|
o3 = NamedObject("bleh", True)
|
|
|
|
g.add_match(get_match(o1, o2))
|
|
|
|
g.add_match(get_match(o1, o3))
|
|
|
|
g.add_match(get_match(o2, o3))
|
2009-06-01 09:55:11 +00:00
|
|
|
o1.is_ref = True
|
|
|
|
o2.is_ref = True
|
|
|
|
g.remove_dupe(o3)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(0, len(g))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_switch_ref(self):
|
|
|
|
o1 = NamedObject(with_words=True)
|
|
|
|
o2 = NamedObject(with_words=True)
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
g.add_match(get_match(o1, o2))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o1 is g.ref
|
2009-06-01 09:55:11 +00:00
|
|
|
g.switch_ref(o2)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o2 is g.ref
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([o1], g.dupes)
|
2009-06-01 09:55:11 +00:00
|
|
|
g.switch_ref(o2)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o2 is g.ref
|
2016-05-29 19:02:39 +00:00
|
|
|
g.switch_ref(NamedObject('', True))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o2 is g.ref
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-09-23 17:14:57 +00:00
|
|
|
def test_switch_ref_from_ref_dir(self):
|
|
|
|
# When the ref dupe is from a ref dir, switch_ref() does nothing
|
|
|
|
o1 = no(with_words=True)
|
|
|
|
o2 = no(with_words=True)
|
|
|
|
o1.is_ref = True
|
|
|
|
g = Group()
|
|
|
|
g.add_match(get_match(o1, o2))
|
|
|
|
g.switch_ref(o2)
|
|
|
|
assert o1 is g.ref
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_get_match_of(self):
|
|
|
|
g = Group()
|
|
|
|
for m in get_match_triangle():
|
|
|
|
g.add_match(m)
|
|
|
|
o = g.dupes[0]
|
|
|
|
m = g.get_match_of(o)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref in m
|
|
|
|
assert o in m
|
2016-05-29 19:02:39 +00:00
|
|
|
assert g.get_match_of(NamedObject('', True)) is None
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.get_match_of(g.ref) is None
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_percentage(self):
|
|
|
|
#percentage should return the avg percentage in relation to the ref
|
2016-05-29 19:02:39 +00:00
|
|
|
m1, m2, m3 = get_match_triangle()
|
2009-06-01 09:55:11 +00:00
|
|
|
m1 = Match(m1[0], m1[1], 100)
|
|
|
|
m2 = Match(m2[0], m2[1], 50)
|
|
|
|
m3 = Match(m3[0], m3[1], 33)
|
|
|
|
g = Group()
|
|
|
|
g.add_match(m1)
|
|
|
|
g.add_match(m2)
|
|
|
|
g.add_match(m3)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(75, g.percentage)
|
2009-06-01 09:55:11 +00:00
|
|
|
g.switch_ref(g.dupes[0])
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(66, g.percentage)
|
2009-06-01 09:55:11 +00:00
|
|
|
g.remove_dupe(g.dupes[0])
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(33, g.percentage)
|
2009-06-01 09:55:11 +00:00
|
|
|
g.add_match(m1)
|
|
|
|
g.add_match(m2)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(66, g.percentage)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_percentage_on_empty_group(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(0, g.percentage)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_prioritize(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
m1, m2, m3 = get_match_triangle()
|
2009-06-01 09:55:11 +00:00
|
|
|
o1 = m1.first
|
|
|
|
o2 = m1.second
|
|
|
|
o3 = m2.second
|
|
|
|
o1.name = 'c'
|
|
|
|
o2.name = 'b'
|
|
|
|
o3.name = 'a'
|
|
|
|
g = Group()
|
|
|
|
g.add_match(m1)
|
|
|
|
g.add_match(m2)
|
|
|
|
g.add_match(m3)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o1 is g.ref
|
2016-05-29 19:02:39 +00:00
|
|
|
assert g.prioritize(lambda x: x.name)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o3 is g.ref
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_prioritize_with_tie_breaker(self):
|
|
|
|
# if the ref has the same key as one or more of the dupe, run the tie_breaker func among them
|
|
|
|
g = get_test_group()
|
|
|
|
o1, o2, o3 = g.ordered
|
|
|
|
tie_breaker = lambda ref, dupe: dupe is o3
|
2016-05-29 19:02:39 +00:00
|
|
|
g.prioritize(lambda x: 0, tie_breaker)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref is o3
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_prioritize_with_tie_breaker_runs_on_all_dupes(self):
|
2014-10-05 20:31:16 +00:00
|
|
|
# Even if a dupe is chosen to switch with ref with a tie breaker, we still run the tie breaker
|
2009-06-01 09:55:11 +00:00
|
|
|
# with other dupes and the newly chosen ref
|
|
|
|
g = get_test_group()
|
|
|
|
o1, o2, o3 = g.ordered
|
|
|
|
o1.foo = 1
|
|
|
|
o2.foo = 2
|
|
|
|
o3.foo = 3
|
|
|
|
tie_breaker = lambda ref, dupe: dupe.foo > ref.foo
|
2016-05-29 19:02:39 +00:00
|
|
|
g.prioritize(lambda x: 0, tie_breaker)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref is o3
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_prioritize_with_tie_breaker_runs_only_on_tie_dupes(self):
|
|
|
|
# The tie breaker only runs on dupes that had the same value for the key_func
|
|
|
|
g = get_test_group()
|
|
|
|
o1, o2, o3 = g.ordered
|
|
|
|
o1.foo = 2
|
|
|
|
o2.foo = 2
|
|
|
|
o3.foo = 1
|
|
|
|
o1.bar = 1
|
|
|
|
o2.bar = 2
|
|
|
|
o3.bar = 3
|
|
|
|
key_func = lambda x: -x.foo
|
|
|
|
tie_breaker = lambda ref, dupe: dupe.bar > ref.bar
|
|
|
|
g.prioritize(key_func, tie_breaker)
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref is o2
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2011-09-23 17:14:57 +00:00
|
|
|
def test_prioritize_with_ref_dupe(self):
|
|
|
|
# when the ref dupe of a group is from a ref dir, make it stay on top.
|
|
|
|
g = get_test_group()
|
|
|
|
o1, o2, o3 = g
|
|
|
|
o1.is_ref = True
|
|
|
|
o2.size = 2
|
|
|
|
g.prioritize(lambda x: -x.size)
|
|
|
|
assert g.ref is o1
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2012-07-31 15:37:51 +00:00
|
|
|
def test_prioritize_nothing_changes(self):
|
|
|
|
# prioritize() returns False when nothing changes in the group.
|
|
|
|
g = get_test_group()
|
|
|
|
g[0].name = 'a'
|
|
|
|
g[1].name = 'b'
|
|
|
|
g[2].name = 'c'
|
2016-05-29 19:02:39 +00:00
|
|
|
assert not g.prioritize(lambda x: x.name)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_list_like(self):
|
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
o1, o2 = (NamedObject("foo", True), NamedObject("bar", True))
|
|
|
|
g.add_match(get_match(o1, o2))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g[0] is o1
|
|
|
|
assert g[1] is o2
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-09-05 14:58:35 +00:00
|
|
|
def test_discard_matches(self):
|
2009-06-01 09:55:11 +00:00
|
|
|
g = Group()
|
2016-05-29 19:02:39 +00:00
|
|
|
o1, o2, o3 = (NamedObject("foo", True), NamedObject("bar", True), NamedObject("baz", True))
|
|
|
|
g.add_match(get_match(o1, o2))
|
|
|
|
g.add_match(get_match(o1, o3))
|
2009-09-05 14:58:35 +00:00
|
|
|
g.discard_matches()
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(g.matches))
|
|
|
|
eq_(0, len(g.candidates))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
|
2011-01-05 10:11:21 +00:00
|
|
|
class TestCaseget_groups:
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_empty(self):
|
|
|
|
r = get_groups([])
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([], r)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_simple(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
2009-10-18 08:46:00 +00:00
|
|
|
matches = getmatches(l)
|
2009-06-01 09:55:11 +00:00
|
|
|
m = matches[0]
|
|
|
|
r = get_groups(matches)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
2009-06-01 09:55:11 +00:00
|
|
|
g = r[0]
|
2011-01-05 10:11:21 +00:00
|
|
|
assert g.ref is m.first
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_([m.second], g.dupes)
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_group_with_multiple_matches(self):
|
|
|
|
#This results in 3 matches
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")]
|
2009-10-18 08:46:00 +00:00
|
|
|
matches = getmatches(l)
|
2009-06-01 09:55:11 +00:00
|
|
|
r = get_groups(matches)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
2009-06-01 09:55:11 +00:00
|
|
|
g = r[0]
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(3, len(g))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_must_choose_a_group(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("b c"), NamedObject("c d"), NamedObject("c d")]
|
2009-06-01 09:55:11 +00:00
|
|
|
#There will be 2 groups here: group "a b" and group "c d"
|
|
|
|
#"b c" can go either of them, but not both.
|
2009-10-18 08:46:00 +00:00
|
|
|
matches = getmatches(l)
|
2009-06-01 09:55:11 +00:00
|
|
|
r = get_groups(matches)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(r))
|
|
|
|
eq_(5, len(r[0])+len(r[1]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_should_all_go_in_the_same_group(self):
|
2016-05-29 19:02:39 +00:00
|
|
|
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("a b"), NamedObject("a b")]
|
2009-06-01 09:55:11 +00:00
|
|
|
#There will be 2 groups here: group "a b" and group "c d"
|
|
|
|
#"b c" can fit in both, but it must be in only one of them
|
2009-10-18 08:46:00 +00:00
|
|
|
matches = getmatches(l)
|
2009-06-01 09:55:11 +00:00
|
|
|
r = get_groups(matches)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_give_priority_to_matches_with_higher_percentage(self):
|
|
|
|
o1 = NamedObject(with_words=True)
|
|
|
|
o2 = NamedObject(with_words=True)
|
|
|
|
o3 = NamedObject(with_words=True)
|
|
|
|
m1 = Match(o1, o2, 1)
|
|
|
|
m2 = Match(o2, o3, 2)
|
2016-05-29 19:02:39 +00:00
|
|
|
r = get_groups([m1, m2])
|
|
|
|
eq_(1, len(r))
|
2009-06-01 09:55:11 +00:00
|
|
|
g = r[0]
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(2, len(g))
|
2011-01-05 10:11:21 +00:00
|
|
|
assert o1 not in g
|
|
|
|
assert o2 in g
|
|
|
|
assert o3 in g
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_four_sized_group(self):
|
2010-08-11 14:39:06 +00:00
|
|
|
l = [NamedObject("foobar") for i in range(4)]
|
2009-10-18 08:46:00 +00:00
|
|
|
m = getmatches(l)
|
2009-06-01 09:55:11 +00:00
|
|
|
r = get_groups(m)
|
2016-05-29 19:02:39 +00:00
|
|
|
eq_(1, len(r))
|
|
|
|
eq_(4, len(r[0]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-06-01 09:55:11 +00:00
|
|
|
def test_referenced_by_ref2(self):
|
|
|
|
o1 = NamedObject(with_words=True)
|
|
|
|
o2 = NamedObject(with_words=True)
|
|
|
|
o3 = NamedObject(with_words=True)
|
2016-05-29 19:02:39 +00:00
|
|
|
m1 = get_match(o1, o2)
|
|
|
|
m2 = get_match(o3, o1)
|
|
|
|
m3 = get_match(o3, o2)
|
|
|
|
r = get_groups([m1, m2, m3])
|
|
|
|
eq_(3, len(r[0]))
|
2014-10-05 20:31:16 +00:00
|
|
|
|
2009-09-05 14:58:35 +00:00
|
|
|
def test_group_admissible_discarded_dupes(self):
|
|
|
|
# If, with a (A, B, C, D) set, all match with A, but C and D don't match with B and that the
|
|
|
|
# (A, B) match is the highest (thus resulting in an (A, B) group), still match C and D
|
|
|
|
# in a separate group instead of discarding them.
|
|
|
|
A, B, C, D = [NamedObject() for _ in range(4)]
|
|
|
|
m1 = Match(A, B, 90) # This is the strongest "A" match
|
|
|
|
m2 = Match(A, C, 80) # Because C doesn't match with B, it won't be in the group
|
|
|
|
m3 = Match(A, D, 80) # Same thing for D
|
|
|
|
m4 = Match(C, D, 70) # However, because C and D match, they should have their own group.
|
|
|
|
groups = get_groups([m1, m2, m3, m4])
|
|
|
|
eq_(len(groups), 2)
|
|
|
|
g1, g2 = groups
|
|
|
|
assert A in g1
|
|
|
|
assert B in g1
|
|
|
|
assert C in g2
|
|
|
|
assert D in g2
|
2014-10-05 20:31:16 +00:00
|
|
|
|