mirror of
https://github.com/arsenetar/dupeguru.git
synced 2025-05-08 09:49:51 +00:00
Apply flake8 checks to tests
This commit is contained in:
parent
9ed4b7abf0
commit
130581db53
@ -160,7 +160,7 @@ class TestCaseDupeGuruWithResults:
|
|||||||
def pytest_funcarg__do_setup(self, request):
|
def pytest_funcarg__do_setup(self, request):
|
||||||
app = TestApp()
|
app = TestApp()
|
||||||
self.app = app.app
|
self.app = app.app
|
||||||
self.objects,self.matches,self.groups = GetTestGroups()
|
self.objects, self.matches, self.groups = GetTestGroups()
|
||||||
self.app.results.groups = self.groups
|
self.app.results.groups = self.groups
|
||||||
self.dpanel = app.dpanel
|
self.dpanel = app.dpanel
|
||||||
self.dtree = app.dtree
|
self.dtree = app.dtree
|
||||||
@ -273,7 +273,6 @@ class TestCaseDupeGuruWithResults:
|
|||||||
# When marking selected dupes with a heterogenous selection, mark all selected dupes. When
|
# When marking selected dupes with a heterogenous selection, mark all selected dupes. When
|
||||||
# it's homogenous, simply toggle.
|
# it's homogenous, simply toggle.
|
||||||
app = self.app
|
app = self.app
|
||||||
objects = self.objects
|
|
||||||
self.rtable.select([1])
|
self.rtable.select([1])
|
||||||
app.toggle_selected_mark_state()
|
app.toggle_selected_mark_state()
|
||||||
# index 0 is unmarkable, but we throw it in the bunch to be sure that it doesn't make the
|
# index 0 is unmarkable, but we throw it in the bunch to be sure that it doesn't make the
|
||||||
@ -358,19 +357,19 @@ class TestCaseDupeGuruWithResults:
|
|||||||
open(p1, 'w').close()
|
open(p1, 'w').close()
|
||||||
open(p2, 'w').close()
|
open(p2, 'w').close()
|
||||||
dne = '/does_not_exist'
|
dne = '/does_not_exist'
|
||||||
app.ignore_list.Ignore(dne,p1)
|
app.ignore_list.Ignore(dne, p1)
|
||||||
app.ignore_list.Ignore(p2,dne)
|
app.ignore_list.Ignore(p2, dne)
|
||||||
app.ignore_list.Ignore(p1,p2)
|
app.ignore_list.Ignore(p1, p2)
|
||||||
app.purge_ignore_list()
|
app.purge_ignore_list()
|
||||||
eq_(1,len(app.ignore_list))
|
eq_(1, len(app.ignore_list))
|
||||||
assert app.ignore_list.AreIgnored(p1,p2)
|
assert app.ignore_list.AreIgnored(p1, p2)
|
||||||
assert not app.ignore_list.AreIgnored(dne,p1)
|
assert not app.ignore_list.AreIgnored(dne, p1)
|
||||||
|
|
||||||
def test_only_unicode_is_added_to_ignore_list(self, do_setup):
|
def test_only_unicode_is_added_to_ignore_list(self, do_setup):
|
||||||
def FakeIgnore(first,second):
|
def FakeIgnore(first, second):
|
||||||
if not isinstance(first,str):
|
if not isinstance(first, str):
|
||||||
self.fail()
|
self.fail()
|
||||||
if not isinstance(second,str):
|
if not isinstance(second, str):
|
||||||
self.fail()
|
self.fail()
|
||||||
|
|
||||||
app = self.app
|
app = self.app
|
||||||
@ -400,8 +399,6 @@ class TestCaseDupeGuruWithResults:
|
|||||||
def test_dont_crash_on_delta_powermarker_dupecount_sort(self, do_setup):
|
def test_dont_crash_on_delta_powermarker_dupecount_sort(self, do_setup):
|
||||||
# Don't crash when sorting by dupe count or percentage while delta+powermarker are enabled.
|
# Don't crash when sorting by dupe count or percentage while delta+powermarker are enabled.
|
||||||
# Ref #238
|
# Ref #238
|
||||||
app = self.app
|
|
||||||
objects = self.objects
|
|
||||||
self.rtable.delta_values = True
|
self.rtable.delta_values = True
|
||||||
self.rtable.power_marker = True
|
self.rtable.power_marker = True
|
||||||
self.rtable.sort('dupe_count', False)
|
self.rtable.sort('dupe_count', False)
|
||||||
@ -414,11 +411,11 @@ class TestCaseDupeGuru_renameSelected:
|
|||||||
def pytest_funcarg__do_setup(self, request):
|
def pytest_funcarg__do_setup(self, request):
|
||||||
tmpdir = request.getfuncargvalue('tmpdir')
|
tmpdir = request.getfuncargvalue('tmpdir')
|
||||||
p = Path(str(tmpdir))
|
p = Path(str(tmpdir))
|
||||||
fp = open(str(p['foo bar 1']),mode='w')
|
fp = open(str(p['foo bar 1']), mode='w')
|
||||||
fp.close()
|
fp.close()
|
||||||
fp = open(str(p['foo bar 2']),mode='w')
|
fp = open(str(p['foo bar 2']), mode='w')
|
||||||
fp.close()
|
fp.close()
|
||||||
fp = open(str(p['foo bar 3']),mode='w')
|
fp = open(str(p['foo bar 3']), mode='w')
|
||||||
fp.close()
|
fp.close()
|
||||||
files = fs.get_files(p)
|
files = fs.get_files(p)
|
||||||
for f in files:
|
for f in files:
|
||||||
@ -426,7 +423,7 @@ class TestCaseDupeGuru_renameSelected:
|
|||||||
matches = engine.getmatches(files)
|
matches = engine.getmatches(files)
|
||||||
groups = engine.get_groups(matches)
|
groups = engine.get_groups(matches)
|
||||||
g = groups[0]
|
g = groups[0]
|
||||||
g.prioritize(lambda x:x.name)
|
g.prioritize(lambda x: x.name)
|
||||||
app = TestApp()
|
app = TestApp()
|
||||||
app.app.results.groups = groups
|
app.app.results.groups = groups
|
||||||
self.app = app.app
|
self.app = app.app
|
||||||
|
@ -1,12 +1,10 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2011/09/07
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||||
|
|
||||||
from hscommon.testutil import TestApp as TestAppBase, eq_, with_app
|
from hscommon.testutil import TestApp as TestAppBase, eq_, with_app # noqa
|
||||||
from hscommon.path import Path
|
from hscommon.path import Path
|
||||||
from hscommon.util import get_file_ext, format_size
|
from hscommon.util import get_file_ext, format_size
|
||||||
from hscommon.gui.column import Column
|
from hscommon.gui.column import Column
|
||||||
@ -15,9 +13,7 @@ from hscommon.jobprogress.job import nulljob, JobCancelled
|
|||||||
from .. import engine
|
from .. import engine
|
||||||
from .. import prioritize
|
from .. import prioritize
|
||||||
from ..engine import getwords
|
from ..engine import getwords
|
||||||
from ..app import DupeGuru as DupeGuruBase, cmp_value
|
from ..app import DupeGuru as DupeGuruBase
|
||||||
from ..gui.details_panel import DetailsPanel
|
|
||||||
from ..gui.directory_tree import DirectoryTree
|
|
||||||
from ..gui.result_table import ResultTable as ResultTableBase
|
from ..gui.result_table import ResultTable as ResultTableBase
|
||||||
from ..gui.prioritize_dialog import PrioritizeDialog
|
from ..gui.prioritize_dialog import PrioritizeDialog
|
||||||
|
|
||||||
@ -119,14 +115,20 @@ class NamedObject:
|
|||||||
# "ibabtu" (1)
|
# "ibabtu" (1)
|
||||||
# "ibabtu" (1)
|
# "ibabtu" (1)
|
||||||
def GetTestGroups():
|
def GetTestGroups():
|
||||||
objects = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject("foo bleh"),NamedObject("ibabtu"),NamedObject("ibabtu")]
|
objects = [
|
||||||
|
NamedObject("foo bar"),
|
||||||
|
NamedObject("bar bleh"),
|
||||||
|
NamedObject("foo bleh"),
|
||||||
|
NamedObject("ibabtu"),
|
||||||
|
NamedObject("ibabtu")
|
||||||
|
]
|
||||||
objects[1].size = 1024
|
objects[1].size = 1024
|
||||||
matches = engine.getmatches(objects) #we should have 5 matches
|
matches = engine.getmatches(objects) #we should have 5 matches
|
||||||
groups = engine.get_groups(matches) #We should have 2 groups
|
groups = engine.get_groups(matches) #We should have 2 groups
|
||||||
for g in groups:
|
for g in groups:
|
||||||
g.prioritize(lambda x:objects.index(x)) #We want the dupes to be in the same order as the list is
|
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
|
||||||
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first.
|
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first.
|
||||||
return (objects,matches,groups)
|
return (objects, matches, groups)
|
||||||
|
|
||||||
class TestApp(TestAppBase):
|
class TestApp(TestAppBase):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -137,7 +139,6 @@ class TestApp(TestAppBase):
|
|||||||
return gui
|
return gui
|
||||||
|
|
||||||
TestAppBase.__init__(self)
|
TestAppBase.__init__(self)
|
||||||
make_gui = self.make_gui
|
|
||||||
self.app = DupeGuru()
|
self.app = DupeGuru()
|
||||||
self.default_parent = self.app
|
self.default_parent = self.app
|
||||||
self.rtable = link_gui(self.app.result_table)
|
self.rtable = link_gui(self.app.result_table)
|
||||||
|
@ -1 +1 @@
|
|||||||
from hscommon.testutil import pytest_funcarg__app
|
from hscommon.testutil import pytest_funcarg__app # noqa
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/02/27
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -15,7 +13,7 @@ from pytest import raises
|
|||||||
from hscommon.path import Path
|
from hscommon.path import Path
|
||||||
from hscommon.testutil import eq_
|
from hscommon.testutil import eq_
|
||||||
|
|
||||||
from ..directories import *
|
from ..directories import Directories, DirectoryState, AlreadyThereError, InvalidPathError
|
||||||
|
|
||||||
def create_fake_fs(rootpath):
|
def create_fake_fs(rootpath):
|
||||||
# We have it as a separate function because other units are using it.
|
# We have it as a separate function because other units are using it.
|
||||||
@ -44,6 +42,8 @@ def create_fake_fs(rootpath):
|
|||||||
fp.close()
|
fp.close()
|
||||||
return rootpath
|
return rootpath
|
||||||
|
|
||||||
|
testpath = None
|
||||||
|
|
||||||
def setup_module(module):
|
def setup_module(module):
|
||||||
# In this unit, we have tests depending on two directory structure. One with only one file in it
|
# In this unit, we have tests depending on two directory structure. One with only one file in it
|
||||||
# and another with a more complex structure.
|
# and another with a more complex structure.
|
||||||
@ -68,13 +68,13 @@ def test_add_path():
|
|||||||
d = Directories()
|
d = Directories()
|
||||||
p = testpath['onefile']
|
p = testpath['onefile']
|
||||||
d.add_path(p)
|
d.add_path(p)
|
||||||
eq_(1,len(d))
|
eq_(1, len(d))
|
||||||
assert p in d
|
assert p in d
|
||||||
assert (p['foobar']) in d
|
assert (p['foobar']) in d
|
||||||
assert p.parent() not in d
|
assert p.parent() not in d
|
||||||
p = testpath['fs']
|
p = testpath['fs']
|
||||||
d.add_path(p)
|
d.add_path(p)
|
||||||
eq_(2,len(d))
|
eq_(2, len(d))
|
||||||
assert p in d
|
assert p in d
|
||||||
|
|
||||||
def test_AddPath_when_path_is_already_there():
|
def test_AddPath_when_path_is_already_there():
|
||||||
@ -96,14 +96,14 @@ def test_add_path_containing_paths_already_there():
|
|||||||
eq_(d[0], testpath)
|
eq_(d[0], testpath)
|
||||||
|
|
||||||
def test_AddPath_non_latin(tmpdir):
|
def test_AddPath_non_latin(tmpdir):
|
||||||
p = Path(str(tmpdir))
|
p = Path(str(tmpdir))
|
||||||
to_add = p['unicode\u201a']
|
to_add = p['unicode\u201a']
|
||||||
os.mkdir(str(to_add))
|
os.mkdir(str(to_add))
|
||||||
d = Directories()
|
d = Directories()
|
||||||
try:
|
try:
|
||||||
d.add_path(to_add)
|
d.add_path(to_add)
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
def test_del():
|
def test_del():
|
||||||
d = Directories()
|
d = Directories()
|
||||||
@ -121,13 +121,13 @@ def test_states():
|
|||||||
d = Directories()
|
d = Directories()
|
||||||
p = testpath['onefile']
|
p = testpath['onefile']
|
||||||
d.add_path(p)
|
d.add_path(p)
|
||||||
eq_(DirectoryState.Normal ,d.get_state(p))
|
eq_(DirectoryState.Normal, d.get_state(p))
|
||||||
d.set_state(p, DirectoryState.Reference)
|
d.set_state(p, DirectoryState.Reference)
|
||||||
eq_(DirectoryState.Reference ,d.get_state(p))
|
eq_(DirectoryState.Reference, d.get_state(p))
|
||||||
eq_(DirectoryState.Reference ,d.get_state(p['dir1']))
|
eq_(DirectoryState.Reference, d.get_state(p['dir1']))
|
||||||
eq_(1,len(d.states))
|
eq_(1, len(d.states))
|
||||||
eq_(p,list(d.states.keys())[0])
|
eq_(p, list(d.states.keys())[0])
|
||||||
eq_(DirectoryState.Reference ,d.states[p])
|
eq_(DirectoryState.Reference, d.states[p])
|
||||||
|
|
||||||
def test_get_state_with_path_not_there():
|
def test_get_state_with_path_not_there():
|
||||||
# When the path's not there, just return DirectoryState.Normal
|
# When the path's not there, just return DirectoryState.Normal
|
||||||
@ -199,8 +199,8 @@ def test_save_and_load(tmpdir):
|
|||||||
d1.save_to_file(tmpxml)
|
d1.save_to_file(tmpxml)
|
||||||
d2.load_from_file(tmpxml)
|
d2.load_from_file(tmpxml)
|
||||||
eq_(2, len(d2))
|
eq_(2, len(d2))
|
||||||
eq_(DirectoryState.Reference ,d2.get_state(p1))
|
eq_(DirectoryState.Reference, d2.get_state(p1))
|
||||||
eq_(DirectoryState.Excluded ,d2.get_state(p1['dir1']))
|
eq_(DirectoryState.Excluded, d2.get_state(p1['dir1']))
|
||||||
|
|
||||||
def test_invalid_path():
|
def test_invalid_path():
|
||||||
d = Directories()
|
d = Directories()
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/01/29
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -14,7 +12,11 @@ from hscommon.testutil import eq_, log_calls
|
|||||||
|
|
||||||
from .base import NamedObject
|
from .base import NamedObject
|
||||||
from .. import engine
|
from .. import engine
|
||||||
from ..engine import *
|
from ..engine import (
|
||||||
|
get_match, getwords, Group, getfields, unpack_fields, compare_fields, compare, WEIGHT_WORDS,
|
||||||
|
MATCH_SIMILAR_WORDS, NO_FIELD_ORDER, build_word_dict, get_groups, getmatches, Match,
|
||||||
|
getmatches_by_contents, merge_similar_words, reduce_common_words
|
||||||
|
)
|
||||||
|
|
||||||
no = NamedObject
|
no = NamedObject
|
||||||
|
|
||||||
@ -22,9 +24,9 @@ def get_match_triangle():
|
|||||||
o1 = NamedObject(with_words=True)
|
o1 = NamedObject(with_words=True)
|
||||||
o2 = NamedObject(with_words=True)
|
o2 = NamedObject(with_words=True)
|
||||||
o3 = NamedObject(with_words=True)
|
o3 = NamedObject(with_words=True)
|
||||||
m1 = get_match(o1,o2)
|
m1 = get_match(o1, o2)
|
||||||
m2 = get_match(o1,o3)
|
m2 = get_match(o1, o3)
|
||||||
m3 = get_match(o2,o3)
|
m3 = get_match(o2, o3)
|
||||||
return [m1, m2, m3]
|
return [m1, m2, m3]
|
||||||
|
|
||||||
def get_test_group():
|
def get_test_group():
|
||||||
@ -51,7 +53,7 @@ class TestCasegetwords:
|
|||||||
|
|
||||||
def test_splitter_chars(self):
|
def test_splitter_chars(self):
|
||||||
eq_(
|
eq_(
|
||||||
[chr(i) for i in range(ord('a'),ord('z')+1)],
|
[chr(i) for i in range(ord('a'), ord('z')+1)],
|
||||||
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z")
|
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -99,8 +101,8 @@ class TestCaseunpack_fields:
|
|||||||
|
|
||||||
class TestCaseWordCompare:
|
class TestCaseWordCompare:
|
||||||
def test_list(self):
|
def test_list(self):
|
||||||
eq_(100, compare(['a', 'b', 'c', 'd'],['a', 'b', 'c', 'd']))
|
eq_(100, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']))
|
||||||
eq_(86, compare(['a', 'b', 'c', 'd'],['a', 'b', 'c']))
|
eq_(86, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c']))
|
||||||
|
|
||||||
def test_unordered(self):
|
def test_unordered(self):
|
||||||
#Sometimes, users don't want fuzzy matching too much When they set the slider
|
#Sometimes, users don't want fuzzy matching too much When they set the slider
|
||||||
@ -123,7 +125,7 @@ class TestCaseWordCompare:
|
|||||||
eq_(int((6.0 / 13.0) * 100), compare(['foo', 'bar'], ['bar', 'bleh'], (WEIGHT_WORDS, )))
|
eq_(int((6.0 / 13.0) * 100), compare(['foo', 'bar'], ['bar', 'bleh'], (WEIGHT_WORDS, )))
|
||||||
|
|
||||||
def test_similar_words(self):
|
def test_similar_words(self):
|
||||||
eq_(100, compare(['the', 'white', 'stripes'],['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, )))
|
eq_(100, compare(['the', 'white', 'stripes'], ['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, )))
|
||||||
|
|
||||||
def test_empty(self):
|
def test_empty(self):
|
||||||
eq_(0, compare([], []))
|
eq_(0, compare([], []))
|
||||||
@ -154,7 +156,7 @@ class TestCaseWordCompareWithFields:
|
|||||||
eq_((0, 1, 2, 3, 5), flags)
|
eq_((0, 1, 2, 3, 5), flags)
|
||||||
|
|
||||||
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
|
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
|
||||||
compare_fields([['a']], [['a']],(0, 1, 2, 3, 5))
|
compare_fields([['a']], [['a']], (0, 1, 2, 3, 5))
|
||||||
|
|
||||||
def test_order(self):
|
def test_order(self):
|
||||||
first = [['a', 'b'], ['c', 'd', 'e']]
|
first = [['a', 'b'], ['c', 'd', 'e']]
|
||||||
@ -162,124 +164,124 @@ class TestCaseWordCompareWithFields:
|
|||||||
eq_(0, compare_fields(first, second))
|
eq_(0, compare_fields(first, second))
|
||||||
|
|
||||||
def test_no_order(self):
|
def test_no_order(self):
|
||||||
first = [['a','b'],['c','d','e']]
|
first = [['a', 'b'], ['c', 'd', 'e']]
|
||||||
second = [['c','d','f'],['a','b']]
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||||
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||||
first = [['a','b'],['a','b']] #a field can only be matched once.
|
first = [['a', 'b'], ['a', 'b']] #a field can only be matched once.
|
||||||
second = [['c','d','f'],['a','b']]
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||||
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||||
first = [['a','b'],['a','b','c']]
|
first = [['a', 'b'], ['a', 'b', 'c']]
|
||||||
second = [['c','d','f'],['a','b']]
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||||
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||||
|
|
||||||
def test_compare_fields_without_order_doesnt_alter_fields(self):
|
def test_compare_fields_without_order_doesnt_alter_fields(self):
|
||||||
#The NO_ORDER comp type altered the fields!
|
#The NO_ORDER comp type altered the fields!
|
||||||
first = [['a','b'],['c','d','e']]
|
first = [['a', 'b'], ['c', 'd', 'e']]
|
||||||
second = [['c','d','f'],['a','b']]
|
second = [['c', 'd', 'f'], ['a', 'b']]
|
||||||
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
|
||||||
eq_([['a','b'],['c','d','e']],first)
|
eq_([['a', 'b'], ['c', 'd', 'e']], first)
|
||||||
eq_([['c','d','f'],['a','b']],second)
|
eq_([['c', 'd', 'f'], ['a', 'b']], second)
|
||||||
|
|
||||||
|
|
||||||
class TestCasebuild_word_dict:
|
class TestCasebuild_word_dict:
|
||||||
def test_with_standard_words(self):
|
def test_with_standard_words(self):
|
||||||
l = [NamedObject('foo bar',True)]
|
l = [NamedObject('foo bar', True)]
|
||||||
l.append(NamedObject('bar baz',True))
|
l.append(NamedObject('bar baz', True))
|
||||||
l.append(NamedObject('baz bleh foo',True))
|
l.append(NamedObject('baz bleh foo', True))
|
||||||
d = build_word_dict(l)
|
d = build_word_dict(l)
|
||||||
eq_(4,len(d))
|
eq_(4, len(d))
|
||||||
eq_(2,len(d['foo']))
|
eq_(2, len(d['foo']))
|
||||||
assert l[0] in d['foo']
|
assert l[0] in d['foo']
|
||||||
assert l[2] in d['foo']
|
assert l[2] in d['foo']
|
||||||
eq_(2,len(d['bar']))
|
eq_(2, len(d['bar']))
|
||||||
assert l[0] in d['bar']
|
assert l[0] in d['bar']
|
||||||
assert l[1] in d['bar']
|
assert l[1] in d['bar']
|
||||||
eq_(2,len(d['baz']))
|
eq_(2, len(d['baz']))
|
||||||
assert l[1] in d['baz']
|
assert l[1] in d['baz']
|
||||||
assert l[2] in d['baz']
|
assert l[2] in d['baz']
|
||||||
eq_(1,len(d['bleh']))
|
eq_(1, len(d['bleh']))
|
||||||
assert l[2] in d['bleh']
|
assert l[2] in d['bleh']
|
||||||
|
|
||||||
def test_unpack_fields(self):
|
def test_unpack_fields(self):
|
||||||
o = NamedObject('')
|
o = NamedObject('')
|
||||||
o.words = [['foo','bar'],['baz']]
|
o.words = [['foo', 'bar'], ['baz']]
|
||||||
d = build_word_dict([o])
|
d = build_word_dict([o])
|
||||||
eq_(3,len(d))
|
eq_(3, len(d))
|
||||||
eq_(1,len(d['foo']))
|
eq_(1, len(d['foo']))
|
||||||
|
|
||||||
def test_words_are_unaltered(self):
|
def test_words_are_unaltered(self):
|
||||||
o = NamedObject('')
|
o = NamedObject('')
|
||||||
o.words = [['foo','bar'],['baz']]
|
o.words = [['foo', 'bar'], ['baz']]
|
||||||
build_word_dict([o])
|
build_word_dict([o])
|
||||||
eq_([['foo','bar'],['baz']],o.words)
|
eq_([['foo', 'bar'], ['baz']], o.words)
|
||||||
|
|
||||||
def test_object_instances_can_only_be_once_in_words_object_list(self):
|
def test_object_instances_can_only_be_once_in_words_object_list(self):
|
||||||
o = NamedObject('foo foo',True)
|
o = NamedObject('foo foo', True)
|
||||||
d = build_word_dict([o])
|
d = build_word_dict([o])
|
||||||
eq_(1,len(d['foo']))
|
eq_(1, len(d['foo']))
|
||||||
|
|
||||||
def test_job(self):
|
def test_job(self):
|
||||||
def do_progress(p,d=''):
|
def do_progress(p, d=''):
|
||||||
self.log.append(p)
|
self.log.append(p)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
j = job.Job(1,do_progress)
|
j = job.Job(1, do_progress)
|
||||||
self.log = []
|
self.log = []
|
||||||
s = "foo bar"
|
s = "foo bar"
|
||||||
build_word_dict([NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j)
|
build_word_dict([NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j)
|
||||||
# We don't have intermediate log because iter_with_progress is called with every > 1
|
# We don't have intermediate log because iter_with_progress is called with every > 1
|
||||||
eq_(0,self.log[0])
|
eq_(0, self.log[0])
|
||||||
eq_(100,self.log[1])
|
eq_(100, self.log[1])
|
||||||
|
|
||||||
|
|
||||||
class TestCasemerge_similar_words:
|
class TestCasemerge_similar_words:
|
||||||
def test_some_similar_words(self):
|
def test_some_similar_words(self):
|
||||||
d = {
|
d = {
|
||||||
'foobar':set([1]),
|
'foobar': set([1]),
|
||||||
'foobar1':set([2]),
|
'foobar1': set([2]),
|
||||||
'foobar2':set([3]),
|
'foobar2': set([3]),
|
||||||
}
|
}
|
||||||
merge_similar_words(d)
|
merge_similar_words(d)
|
||||||
eq_(1,len(d))
|
eq_(1, len(d))
|
||||||
eq_(3,len(d['foobar']))
|
eq_(3, len(d['foobar']))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TestCasereduce_common_words:
|
class TestCasereduce_common_words:
|
||||||
def test_typical(self):
|
def test_typical(self):
|
||||||
d = {
|
d = {
|
||||||
'foo': set([NamedObject('foo bar',True) for i in range(50)]),
|
'foo': set([NamedObject('foo bar', True) for i in range(50)]),
|
||||||
'bar': set([NamedObject('foo bar',True) for i in range(49)])
|
'bar': set([NamedObject('foo bar', True) for i in range(49)])
|
||||||
}
|
}
|
||||||
reduce_common_words(d, 50)
|
reduce_common_words(d, 50)
|
||||||
assert 'foo' not in d
|
assert 'foo' not in d
|
||||||
eq_(49,len(d['bar']))
|
eq_(49, len(d['bar']))
|
||||||
|
|
||||||
def test_dont_remove_objects_with_only_common_words(self):
|
def test_dont_remove_objects_with_only_common_words(self):
|
||||||
d = {
|
d = {
|
||||||
'common': set([NamedObject("common uncommon",True) for i in range(50)] + [NamedObject("common",True)]),
|
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
|
||||||
'uncommon': set([NamedObject("common uncommon",True)])
|
'uncommon': set([NamedObject("common uncommon", True)])
|
||||||
}
|
}
|
||||||
reduce_common_words(d, 50)
|
reduce_common_words(d, 50)
|
||||||
eq_(1,len(d['common']))
|
eq_(1, len(d['common']))
|
||||||
eq_(1,len(d['uncommon']))
|
eq_(1, len(d['uncommon']))
|
||||||
|
|
||||||
def test_values_still_are_set_instances(self):
|
def test_values_still_are_set_instances(self):
|
||||||
d = {
|
d = {
|
||||||
'common': set([NamedObject("common uncommon",True) for i in range(50)] + [NamedObject("common",True)]),
|
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
|
||||||
'uncommon': set([NamedObject("common uncommon",True)])
|
'uncommon': set([NamedObject("common uncommon", True)])
|
||||||
}
|
}
|
||||||
reduce_common_words(d, 50)
|
reduce_common_words(d, 50)
|
||||||
assert isinstance(d['common'],set)
|
assert isinstance(d['common'], set)
|
||||||
assert isinstance(d['uncommon'],set)
|
assert isinstance(d['uncommon'], set)
|
||||||
|
|
||||||
def test_dont_raise_KeyError_when_a_word_has_been_removed(self):
|
def test_dont_raise_KeyError_when_a_word_has_been_removed(self):
|
||||||
#If a word has been removed by the reduce, an object in a subsequent common word that
|
#If a word has been removed by the reduce, an object in a subsequent common word that
|
||||||
#contains the word that has been removed would cause a KeyError.
|
#contains the word that has been removed would cause a KeyError.
|
||||||
d = {
|
d = {
|
||||||
'foo': set([NamedObject('foo bar baz',True) for i in range(50)]),
|
'foo': set([NamedObject('foo bar baz', True) for i in range(50)]),
|
||||||
'bar': set([NamedObject('foo bar baz',True) for i in range(50)]),
|
'bar': set([NamedObject('foo bar baz', True) for i in range(50)]),
|
||||||
'baz': set([NamedObject('foo bar baz',True) for i in range(49)])
|
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
reduce_common_words(d, 50)
|
reduce_common_words(d, 50)
|
||||||
@ -290,7 +292,7 @@ class TestCasereduce_common_words:
|
|||||||
#object.words may be fields.
|
#object.words may be fields.
|
||||||
def create_it():
|
def create_it():
|
||||||
o = NamedObject('')
|
o = NamedObject('')
|
||||||
o.words = [['foo','bar'],['baz']]
|
o.words = [['foo', 'bar'], ['baz']]
|
||||||
return o
|
return o
|
||||||
|
|
||||||
d = {
|
d = {
|
||||||
@ -306,39 +308,40 @@ class TestCasereduce_common_words:
|
|||||||
#be counted as a common word for subsequent words. For example, if 'foo' is processed
|
#be counted as a common word for subsequent words. For example, if 'foo' is processed
|
||||||
#as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
|
#as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
|
||||||
#would not stay in 'bar' because 'foo' is not a common word anymore.
|
#would not stay in 'bar' because 'foo' is not a common word anymore.
|
||||||
only_common = NamedObject('foo bar',True)
|
only_common = NamedObject('foo bar', True)
|
||||||
d = {
|
d = {
|
||||||
'foo': set([NamedObject('foo bar baz',True) for i in range(49)] + [only_common]),
|
'foo': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
|
||||||
'bar': set([NamedObject('foo bar baz',True) for i in range(49)] + [only_common]),
|
'bar': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
|
||||||
'baz': set([NamedObject('foo bar baz',True) for i in range(49)])
|
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
|
||||||
}
|
}
|
||||||
reduce_common_words(d, 50)
|
reduce_common_words(d, 50)
|
||||||
eq_(1,len(d['foo']))
|
eq_(1, len(d['foo']))
|
||||||
eq_(1,len(d['bar']))
|
eq_(1, len(d['bar']))
|
||||||
eq_(49,len(d['baz']))
|
eq_(49, len(d['baz']))
|
||||||
|
|
||||||
|
|
||||||
class TestCaseget_match:
|
class TestCaseget_match:
|
||||||
def test_simple(self):
|
def test_simple(self):
|
||||||
o1 = NamedObject("foo bar",True)
|
o1 = NamedObject("foo bar", True)
|
||||||
o2 = NamedObject("bar bleh",True)
|
o2 = NamedObject("bar bleh", True)
|
||||||
m = get_match(o1,o2)
|
m = get_match(o1, o2)
|
||||||
eq_(50,m.percentage)
|
eq_(50, m.percentage)
|
||||||
eq_(['foo','bar'],m.first.words)
|
eq_(['foo', 'bar'], m.first.words)
|
||||||
eq_(['bar','bleh'],m.second.words)
|
eq_(['bar', 'bleh'], m.second.words)
|
||||||
assert m.first is o1
|
assert m.first is o1
|
||||||
assert m.second is o2
|
assert m.second is o2
|
||||||
|
|
||||||
def test_in(self):
|
def test_in(self):
|
||||||
o1 = NamedObject("foo",True)
|
o1 = NamedObject("foo", True)
|
||||||
o2 = NamedObject("bar",True)
|
o2 = NamedObject("bar", True)
|
||||||
m = get_match(o1,o2)
|
m = get_match(o1, o2)
|
||||||
assert o1 in m
|
assert o1 in m
|
||||||
assert o2 in m
|
assert o2 in m
|
||||||
assert object() not in m
|
assert object() not in m
|
||||||
|
|
||||||
def test_word_weight(self):
|
def test_word_weight(self):
|
||||||
eq_(int((6.0 / 13.0) * 100),get_match(NamedObject("foo bar",True),NamedObject("bar bleh",True),(WEIGHT_WORDS,)).percentage)
|
m = get_match(NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS, ))
|
||||||
|
eq_(m.percentage, int((6.0 / 13.0) * 100))
|
||||||
|
|
||||||
|
|
||||||
class TestCaseGetMatches:
|
class TestCaseGetMatches:
|
||||||
@ -346,16 +349,16 @@ class TestCaseGetMatches:
|
|||||||
eq_(getmatches([]), [])
|
eq_(getmatches([]), [])
|
||||||
|
|
||||||
def test_simple(self):
|
def test_simple(self):
|
||||||
l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject("a b c foo")]
|
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
||||||
r = getmatches(l)
|
r = getmatches(l)
|
||||||
eq_(2,len(r))
|
eq_(2, len(r))
|
||||||
m = first(m for m in r if m.percentage == 50) #"foo bar" and "bar bleh"
|
m = first(m for m in r if m.percentage == 50) #"foo bar" and "bar bleh"
|
||||||
assert_match(m, 'foo bar', 'bar bleh')
|
assert_match(m, 'foo bar', 'bar bleh')
|
||||||
m = first(m for m in r if m.percentage == 33) #"foo bar" and "a b c foo"
|
m = first(m for m in r if m.percentage == 33) #"foo bar" and "a b c foo"
|
||||||
assert_match(m, 'foo bar', 'a b c foo')
|
assert_match(m, 'foo bar', 'a b c foo')
|
||||||
|
|
||||||
def test_null_and_unrelated_objects(self):
|
def test_null_and_unrelated_objects(self):
|
||||||
l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject(""),NamedObject("unrelated object")]
|
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject(""), NamedObject("unrelated object")]
|
||||||
r = getmatches(l)
|
r = getmatches(l)
|
||||||
eq_(len(r), 1)
|
eq_(len(r), 1)
|
||||||
m = r[0]
|
m = r[0]
|
||||||
@ -363,19 +366,19 @@ class TestCaseGetMatches:
|
|||||||
assert_match(m, 'foo bar', 'bar bleh')
|
assert_match(m, 'foo bar', 'bar bleh')
|
||||||
|
|
||||||
def test_twice_the_same_word(self):
|
def test_twice_the_same_word(self):
|
||||||
l = [NamedObject("foo foo bar"),NamedObject("bar bleh")]
|
l = [NamedObject("foo foo bar"), NamedObject("bar bleh")]
|
||||||
r = getmatches(l)
|
r = getmatches(l)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
|
|
||||||
def test_twice_the_same_word_when_preworded(self):
|
def test_twice_the_same_word_when_preworded(self):
|
||||||
l = [NamedObject("foo foo bar",True),NamedObject("bar bleh",True)]
|
l = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)]
|
||||||
r = getmatches(l)
|
r = getmatches(l)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
|
|
||||||
def test_two_words_match(self):
|
def test_two_words_match(self):
|
||||||
l = [NamedObject("foo bar"),NamedObject("foo bar bleh")]
|
l = [NamedObject("foo bar"), NamedObject("foo bar bleh")]
|
||||||
r = getmatches(l)
|
r = getmatches(l)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
|
|
||||||
def test_match_files_with_only_common_words(self):
|
def test_match_files_with_only_common_words(self):
|
||||||
#If a word occurs more than 50 times, it is excluded from the matching process
|
#If a word occurs more than 50 times, it is excluded from the matching process
|
||||||
@ -384,41 +387,41 @@ class TestCaseGetMatches:
|
|||||||
# This test assumes that the common word threashold const is 50
|
# This test assumes that the common word threashold const is 50
|
||||||
l = [NamedObject("foo") for i in range(50)]
|
l = [NamedObject("foo") for i in range(50)]
|
||||||
r = getmatches(l)
|
r = getmatches(l)
|
||||||
eq_(1225,len(r))
|
eq_(1225, len(r))
|
||||||
|
|
||||||
def test_use_words_already_there_if_there(self):
|
def test_use_words_already_there_if_there(self):
|
||||||
o1 = NamedObject('foo')
|
o1 = NamedObject('foo')
|
||||||
o2 = NamedObject('bar')
|
o2 = NamedObject('bar')
|
||||||
o2.words = ['foo']
|
o2.words = ['foo']
|
||||||
eq_(1, len(getmatches([o1,o2])))
|
eq_(1, len(getmatches([o1, o2])))
|
||||||
|
|
||||||
def test_job(self):
|
def test_job(self):
|
||||||
def do_progress(p,d=''):
|
def do_progress(p, d=''):
|
||||||
self.log.append(p)
|
self.log.append(p)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
j = job.Job(1,do_progress)
|
j = job.Job(1, do_progress)
|
||||||
self.log = []
|
self.log = []
|
||||||
s = "foo bar"
|
s = "foo bar"
|
||||||
getmatches([NamedObject(s), NamedObject(s), NamedObject(s)], j=j)
|
getmatches([NamedObject(s), NamedObject(s), NamedObject(s)], j=j)
|
||||||
assert len(self.log) > 2
|
assert len(self.log) > 2
|
||||||
eq_(0,self.log[0])
|
eq_(0, self.log[0])
|
||||||
eq_(100,self.log[-1])
|
eq_(100, self.log[-1])
|
||||||
|
|
||||||
def test_weight_words(self):
|
def test_weight_words(self):
|
||||||
l = [NamedObject("foo bar"),NamedObject("bar bleh")]
|
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
||||||
m = getmatches(l, weight_words=True)[0]
|
m = getmatches(l, weight_words=True)[0]
|
||||||
eq_(int((6.0 / 13.0) * 100),m.percentage)
|
eq_(int((6.0 / 13.0) * 100), m.percentage)
|
||||||
|
|
||||||
def test_similar_word(self):
|
def test_similar_word(self):
|
||||||
l = [NamedObject("foobar"),NamedObject("foobars")]
|
l = [NamedObject("foobar"), NamedObject("foobars")]
|
||||||
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
||||||
eq_(getmatches(l, match_similar_words=True)[0].percentage, 100)
|
eq_(getmatches(l, match_similar_words=True)[0].percentage, 100)
|
||||||
l = [NamedObject("foobar"),NamedObject("foo")]
|
l = [NamedObject("foobar"), NamedObject("foo")]
|
||||||
eq_(len(getmatches(l, match_similar_words=True)), 0) #too far
|
eq_(len(getmatches(l, match_similar_words=True)), 0) #too far
|
||||||
l = [NamedObject("bizkit"),NamedObject("bizket")]
|
l = [NamedObject("bizkit"), NamedObject("bizket")]
|
||||||
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
||||||
l = [NamedObject("foobar"),NamedObject("foosbar")]
|
l = [NamedObject("foobar"), NamedObject("foosbar")]
|
||||||
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
eq_(len(getmatches(l, match_similar_words=True)), 1)
|
||||||
|
|
||||||
def test_single_object_with_similar_words(self):
|
def test_single_object_with_similar_words(self):
|
||||||
@ -426,9 +429,9 @@ class TestCaseGetMatches:
|
|||||||
eq_(len(getmatches(l, match_similar_words=True)), 0)
|
eq_(len(getmatches(l, match_similar_words=True)), 0)
|
||||||
|
|
||||||
def test_double_words_get_counted_only_once(self):
|
def test_double_words_get_counted_only_once(self):
|
||||||
l = [NamedObject("foo bar foo bleh"),NamedObject("foo bar bleh bar")]
|
l = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")]
|
||||||
m = getmatches(l)[0]
|
m = getmatches(l)[0]
|
||||||
eq_(75,m.percentage)
|
eq_(75, m.percentage)
|
||||||
|
|
||||||
def test_with_fields(self):
|
def test_with_fields(self):
|
||||||
o1 = NamedObject("foo bar - foo bleh")
|
o1 = NamedObject("foo bar - foo bleh")
|
||||||
@ -447,7 +450,7 @@ class TestCaseGetMatches:
|
|||||||
eq_(m.percentage, 50)
|
eq_(m.percentage, 50)
|
||||||
|
|
||||||
def test_only_match_similar_when_the_option_is_set(self):
|
def test_only_match_similar_when_the_option_is_set(self):
|
||||||
l = [NamedObject("foobar"),NamedObject("foobars")]
|
l = [NamedObject("foobar"), NamedObject("foobars")]
|
||||||
eq_(len(getmatches(l, match_similar_words=False)), 0)
|
eq_(len(getmatches(l, match_similar_words=False)), 0)
|
||||||
|
|
||||||
def test_dont_recurse_do_match(self):
|
def test_dont_recurse_do_match(self):
|
||||||
@ -462,9 +465,9 @@ class TestCaseGetMatches:
|
|||||||
sys.setrecursionlimit(1000)
|
sys.setrecursionlimit(1000)
|
||||||
|
|
||||||
def test_min_match_percentage(self):
|
def test_min_match_percentage(self):
|
||||||
l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject("a b c foo")]
|
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
|
||||||
r = getmatches(l, min_match_percentage=50)
|
r = getmatches(l, min_match_percentage=50)
|
||||||
eq_(1,len(r)) #Only "foo bar" / "bar bleh" should match
|
eq_(1, len(r)) #Only "foo bar" / "bar bleh" should match
|
||||||
|
|
||||||
def test_MemoryError(self, monkeypatch):
|
def test_MemoryError(self, monkeypatch):
|
||||||
@log_calls
|
@log_calls
|
||||||
@ -491,112 +494,112 @@ class TestCaseGetMatchesByContents:
|
|||||||
class TestCaseGroup:
|
class TestCaseGroup:
|
||||||
def test_empy(self):
|
def test_empy(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
eq_(None,g.ref)
|
eq_(None, g.ref)
|
||||||
eq_([],g.dupes)
|
eq_([], g.dupes)
|
||||||
eq_(0,len(g.matches))
|
eq_(0, len(g.matches))
|
||||||
|
|
||||||
def test_add_match(self):
|
def test_add_match(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
m = get_match(NamedObject("foo",True),NamedObject("bar",True))
|
m = get_match(NamedObject("foo", True), NamedObject("bar", True))
|
||||||
g.add_match(m)
|
g.add_match(m)
|
||||||
assert g.ref is m.first
|
assert g.ref is m.first
|
||||||
eq_([m.second],g.dupes)
|
eq_([m.second], g.dupes)
|
||||||
eq_(1,len(g.matches))
|
eq_(1, len(g.matches))
|
||||||
assert m in g.matches
|
assert m in g.matches
|
||||||
|
|
||||||
def test_multiple_add_match(self):
|
def test_multiple_add_match(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
o1 = NamedObject("a",True)
|
o1 = NamedObject("a", True)
|
||||||
o2 = NamedObject("b",True)
|
o2 = NamedObject("b", True)
|
||||||
o3 = NamedObject("c",True)
|
o3 = NamedObject("c", True)
|
||||||
o4 = NamedObject("d",True)
|
o4 = NamedObject("d", True)
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
assert g.ref is o1
|
assert g.ref is o1
|
||||||
eq_([o2],g.dupes)
|
eq_([o2], g.dupes)
|
||||||
eq_(1,len(g.matches))
|
eq_(1, len(g.matches))
|
||||||
g.add_match(get_match(o1,o3))
|
g.add_match(get_match(o1, o3))
|
||||||
eq_([o2],g.dupes)
|
eq_([o2], g.dupes)
|
||||||
eq_(2,len(g.matches))
|
eq_(2, len(g.matches))
|
||||||
g.add_match(get_match(o2,o3))
|
g.add_match(get_match(o2, o3))
|
||||||
eq_([o2,o3],g.dupes)
|
eq_([o2, o3], g.dupes)
|
||||||
eq_(3,len(g.matches))
|
eq_(3, len(g.matches))
|
||||||
g.add_match(get_match(o1,o4))
|
g.add_match(get_match(o1, o4))
|
||||||
eq_([o2,o3],g.dupes)
|
eq_([o2, o3], g.dupes)
|
||||||
eq_(4,len(g.matches))
|
eq_(4, len(g.matches))
|
||||||
g.add_match(get_match(o2,o4))
|
g.add_match(get_match(o2, o4))
|
||||||
eq_([o2,o3],g.dupes)
|
eq_([o2, o3], g.dupes)
|
||||||
eq_(5,len(g.matches))
|
eq_(5, len(g.matches))
|
||||||
g.add_match(get_match(o3,o4))
|
g.add_match(get_match(o3, o4))
|
||||||
eq_([o2,o3,o4],g.dupes)
|
eq_([o2, o3, o4], g.dupes)
|
||||||
eq_(6,len(g.matches))
|
eq_(6, len(g.matches))
|
||||||
|
|
||||||
def test_len(self):
|
def test_len(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
eq_(0,len(g))
|
eq_(0, len(g))
|
||||||
g.add_match(get_match(NamedObject("foo",True),NamedObject("bar",True)))
|
g.add_match(get_match(NamedObject("foo", True), NamedObject("bar", True)))
|
||||||
eq_(2,len(g))
|
eq_(2, len(g))
|
||||||
|
|
||||||
def test_add_same_match_twice(self):
|
def test_add_same_match_twice(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
m = get_match(NamedObject("foo",True),NamedObject("foo",True))
|
m = get_match(NamedObject("foo", True), NamedObject("foo", True))
|
||||||
g.add_match(m)
|
g.add_match(m)
|
||||||
eq_(2,len(g))
|
eq_(2, len(g))
|
||||||
eq_(1,len(g.matches))
|
eq_(1, len(g.matches))
|
||||||
g.add_match(m)
|
g.add_match(m)
|
||||||
eq_(2,len(g))
|
eq_(2, len(g))
|
||||||
eq_(1,len(g.matches))
|
eq_(1, len(g.matches))
|
||||||
|
|
||||||
def test_in(self):
|
def test_in(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
o1 = NamedObject("foo",True)
|
o1 = NamedObject("foo", True)
|
||||||
o2 = NamedObject("bar",True)
|
o2 = NamedObject("bar", True)
|
||||||
assert o1 not in g
|
assert o1 not in g
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
assert o1 in g
|
assert o1 in g
|
||||||
assert o2 in g
|
assert o2 in g
|
||||||
|
|
||||||
def test_remove(self):
|
def test_remove(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
o1 = NamedObject("foo",True)
|
o1 = NamedObject("foo", True)
|
||||||
o2 = NamedObject("bar",True)
|
o2 = NamedObject("bar", True)
|
||||||
o3 = NamedObject("bleh",True)
|
o3 = NamedObject("bleh", True)
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
g.add_match(get_match(o1,o3))
|
g.add_match(get_match(o1, o3))
|
||||||
g.add_match(get_match(o2,o3))
|
g.add_match(get_match(o2, o3))
|
||||||
eq_(3,len(g.matches))
|
eq_(3, len(g.matches))
|
||||||
eq_(3,len(g))
|
eq_(3, len(g))
|
||||||
g.remove_dupe(o3)
|
g.remove_dupe(o3)
|
||||||
eq_(1,len(g.matches))
|
eq_(1, len(g.matches))
|
||||||
eq_(2,len(g))
|
eq_(2, len(g))
|
||||||
g.remove_dupe(o1)
|
g.remove_dupe(o1)
|
||||||
eq_(0,len(g.matches))
|
eq_(0, len(g.matches))
|
||||||
eq_(0,len(g))
|
eq_(0, len(g))
|
||||||
|
|
||||||
def test_remove_with_ref_dupes(self):
|
def test_remove_with_ref_dupes(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
o1 = NamedObject("foo",True)
|
o1 = NamedObject("foo", True)
|
||||||
o2 = NamedObject("bar",True)
|
o2 = NamedObject("bar", True)
|
||||||
o3 = NamedObject("bleh",True)
|
o3 = NamedObject("bleh", True)
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
g.add_match(get_match(o1,o3))
|
g.add_match(get_match(o1, o3))
|
||||||
g.add_match(get_match(o2,o3))
|
g.add_match(get_match(o2, o3))
|
||||||
o1.is_ref = True
|
o1.is_ref = True
|
||||||
o2.is_ref = True
|
o2.is_ref = True
|
||||||
g.remove_dupe(o3)
|
g.remove_dupe(o3)
|
||||||
eq_(0,len(g))
|
eq_(0, len(g))
|
||||||
|
|
||||||
def test_switch_ref(self):
|
def test_switch_ref(self):
|
||||||
o1 = NamedObject(with_words=True)
|
o1 = NamedObject(with_words=True)
|
||||||
o2 = NamedObject(with_words=True)
|
o2 = NamedObject(with_words=True)
|
||||||
g = Group()
|
g = Group()
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
assert o1 is g.ref
|
assert o1 is g.ref
|
||||||
g.switch_ref(o2)
|
g.switch_ref(o2)
|
||||||
assert o2 is g.ref
|
assert o2 is g.ref
|
||||||
eq_([o1],g.dupes)
|
eq_([o1], g.dupes)
|
||||||
g.switch_ref(o2)
|
g.switch_ref(o2)
|
||||||
assert o2 is g.ref
|
assert o2 is g.ref
|
||||||
g.switch_ref(NamedObject('',True))
|
g.switch_ref(NamedObject('', True))
|
||||||
assert o2 is g.ref
|
assert o2 is g.ref
|
||||||
|
|
||||||
def test_switch_ref_from_ref_dir(self):
|
def test_switch_ref_from_ref_dir(self):
|
||||||
@ -617,12 +620,12 @@ class TestCaseGroup:
|
|||||||
m = g.get_match_of(o)
|
m = g.get_match_of(o)
|
||||||
assert g.ref in m
|
assert g.ref in m
|
||||||
assert o in m
|
assert o in m
|
||||||
assert g.get_match_of(NamedObject('',True)) is None
|
assert g.get_match_of(NamedObject('', True)) is None
|
||||||
assert g.get_match_of(g.ref) is None
|
assert g.get_match_of(g.ref) is None
|
||||||
|
|
||||||
def test_percentage(self):
|
def test_percentage(self):
|
||||||
#percentage should return the avg percentage in relation to the ref
|
#percentage should return the avg percentage in relation to the ref
|
||||||
m1,m2,m3 = get_match_triangle()
|
m1, m2, m3 = get_match_triangle()
|
||||||
m1 = Match(m1[0], m1[1], 100)
|
m1 = Match(m1[0], m1[1], 100)
|
||||||
m2 = Match(m2[0], m2[1], 50)
|
m2 = Match(m2[0], m2[1], 50)
|
||||||
m3 = Match(m3[0], m3[1], 33)
|
m3 = Match(m3[0], m3[1], 33)
|
||||||
@ -630,21 +633,21 @@ class TestCaseGroup:
|
|||||||
g.add_match(m1)
|
g.add_match(m1)
|
||||||
g.add_match(m2)
|
g.add_match(m2)
|
||||||
g.add_match(m3)
|
g.add_match(m3)
|
||||||
eq_(75,g.percentage)
|
eq_(75, g.percentage)
|
||||||
g.switch_ref(g.dupes[0])
|
g.switch_ref(g.dupes[0])
|
||||||
eq_(66,g.percentage)
|
eq_(66, g.percentage)
|
||||||
g.remove_dupe(g.dupes[0])
|
g.remove_dupe(g.dupes[0])
|
||||||
eq_(33,g.percentage)
|
eq_(33, g.percentage)
|
||||||
g.add_match(m1)
|
g.add_match(m1)
|
||||||
g.add_match(m2)
|
g.add_match(m2)
|
||||||
eq_(66,g.percentage)
|
eq_(66, g.percentage)
|
||||||
|
|
||||||
def test_percentage_on_empty_group(self):
|
def test_percentage_on_empty_group(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
eq_(0,g.percentage)
|
eq_(0, g.percentage)
|
||||||
|
|
||||||
def test_prioritize(self):
|
def test_prioritize(self):
|
||||||
m1,m2,m3 = get_match_triangle()
|
m1, m2, m3 = get_match_triangle()
|
||||||
o1 = m1.first
|
o1 = m1.first
|
||||||
o2 = m1.second
|
o2 = m1.second
|
||||||
o3 = m2.second
|
o3 = m2.second
|
||||||
@ -656,7 +659,7 @@ class TestCaseGroup:
|
|||||||
g.add_match(m2)
|
g.add_match(m2)
|
||||||
g.add_match(m3)
|
g.add_match(m3)
|
||||||
assert o1 is g.ref
|
assert o1 is g.ref
|
||||||
assert g.prioritize(lambda x:x.name)
|
assert g.prioritize(lambda x: x.name)
|
||||||
assert o3 is g.ref
|
assert o3 is g.ref
|
||||||
|
|
||||||
def test_prioritize_with_tie_breaker(self):
|
def test_prioritize_with_tie_breaker(self):
|
||||||
@ -664,7 +667,7 @@ class TestCaseGroup:
|
|||||||
g = get_test_group()
|
g = get_test_group()
|
||||||
o1, o2, o3 = g.ordered
|
o1, o2, o3 = g.ordered
|
||||||
tie_breaker = lambda ref, dupe: dupe is o3
|
tie_breaker = lambda ref, dupe: dupe is o3
|
||||||
g.prioritize(lambda x:0, tie_breaker)
|
g.prioritize(lambda x: 0, tie_breaker)
|
||||||
assert g.ref is o3
|
assert g.ref is o3
|
||||||
|
|
||||||
def test_prioritize_with_tie_breaker_runs_on_all_dupes(self):
|
def test_prioritize_with_tie_breaker_runs_on_all_dupes(self):
|
||||||
@ -676,7 +679,7 @@ class TestCaseGroup:
|
|||||||
o2.foo = 2
|
o2.foo = 2
|
||||||
o3.foo = 3
|
o3.foo = 3
|
||||||
tie_breaker = lambda ref, dupe: dupe.foo > ref.foo
|
tie_breaker = lambda ref, dupe: dupe.foo > ref.foo
|
||||||
g.prioritize(lambda x:0, tie_breaker)
|
g.prioritize(lambda x: 0, tie_breaker)
|
||||||
assert g.ref is o3
|
assert g.ref is o3
|
||||||
|
|
||||||
def test_prioritize_with_tie_breaker_runs_only_on_tie_dupes(self):
|
def test_prioritize_with_tie_breaker_runs_only_on_tie_dupes(self):
|
||||||
@ -709,65 +712,65 @@ class TestCaseGroup:
|
|||||||
g[0].name = 'a'
|
g[0].name = 'a'
|
||||||
g[1].name = 'b'
|
g[1].name = 'b'
|
||||||
g[2].name = 'c'
|
g[2].name = 'c'
|
||||||
assert not g.prioritize(lambda x:x.name)
|
assert not g.prioritize(lambda x: x.name)
|
||||||
|
|
||||||
def test_list_like(self):
|
def test_list_like(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
o1,o2 = (NamedObject("foo",True),NamedObject("bar",True))
|
o1, o2 = (NamedObject("foo", True), NamedObject("bar", True))
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
assert g[0] is o1
|
assert g[0] is o1
|
||||||
assert g[1] is o2
|
assert g[1] is o2
|
||||||
|
|
||||||
def test_discard_matches(self):
|
def test_discard_matches(self):
|
||||||
g = Group()
|
g = Group()
|
||||||
o1,o2,o3 = (NamedObject("foo",True),NamedObject("bar",True),NamedObject("baz",True))
|
o1, o2, o3 = (NamedObject("foo", True), NamedObject("bar", True), NamedObject("baz", True))
|
||||||
g.add_match(get_match(o1,o2))
|
g.add_match(get_match(o1, o2))
|
||||||
g.add_match(get_match(o1,o3))
|
g.add_match(get_match(o1, o3))
|
||||||
g.discard_matches()
|
g.discard_matches()
|
||||||
eq_(1,len(g.matches))
|
eq_(1, len(g.matches))
|
||||||
eq_(0,len(g.candidates))
|
eq_(0, len(g.candidates))
|
||||||
|
|
||||||
|
|
||||||
class TestCaseget_groups:
|
class TestCaseget_groups:
|
||||||
def test_empty(self):
|
def test_empty(self):
|
||||||
r = get_groups([])
|
r = get_groups([])
|
||||||
eq_([],r)
|
eq_([], r)
|
||||||
|
|
||||||
def test_simple(self):
|
def test_simple(self):
|
||||||
l = [NamedObject("foo bar"),NamedObject("bar bleh")]
|
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
|
||||||
matches = getmatches(l)
|
matches = getmatches(l)
|
||||||
m = matches[0]
|
m = matches[0]
|
||||||
r = get_groups(matches)
|
r = get_groups(matches)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
g = r[0]
|
g = r[0]
|
||||||
assert g.ref is m.first
|
assert g.ref is m.first
|
||||||
eq_([m.second],g.dupes)
|
eq_([m.second], g.dupes)
|
||||||
|
|
||||||
def test_group_with_multiple_matches(self):
|
def test_group_with_multiple_matches(self):
|
||||||
#This results in 3 matches
|
#This results in 3 matches
|
||||||
l = [NamedObject("foo"),NamedObject("foo"),NamedObject("foo")]
|
l = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")]
|
||||||
matches = getmatches(l)
|
matches = getmatches(l)
|
||||||
r = get_groups(matches)
|
r = get_groups(matches)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
g = r[0]
|
g = r[0]
|
||||||
eq_(3,len(g))
|
eq_(3, len(g))
|
||||||
|
|
||||||
def test_must_choose_a_group(self):
|
def test_must_choose_a_group(self):
|
||||||
l = [NamedObject("a b"),NamedObject("a b"),NamedObject("b c"),NamedObject("c d"),NamedObject("c d")]
|
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("b c"), NamedObject("c d"), NamedObject("c d")]
|
||||||
#There will be 2 groups here: group "a b" and group "c d"
|
#There will be 2 groups here: group "a b" and group "c d"
|
||||||
#"b c" can go either of them, but not both.
|
#"b c" can go either of them, but not both.
|
||||||
matches = getmatches(l)
|
matches = getmatches(l)
|
||||||
r = get_groups(matches)
|
r = get_groups(matches)
|
||||||
eq_(2,len(r))
|
eq_(2, len(r))
|
||||||
eq_(5,len(r[0])+len(r[1]))
|
eq_(5, len(r[0])+len(r[1]))
|
||||||
|
|
||||||
def test_should_all_go_in_the_same_group(self):
|
def test_should_all_go_in_the_same_group(self):
|
||||||
l = [NamedObject("a b"),NamedObject("a b"),NamedObject("a b"),NamedObject("a b")]
|
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("a b"), NamedObject("a b")]
|
||||||
#There will be 2 groups here: group "a b" and group "c d"
|
#There will be 2 groups here: group "a b" and group "c d"
|
||||||
#"b c" can fit in both, but it must be in only one of them
|
#"b c" can fit in both, but it must be in only one of them
|
||||||
matches = getmatches(l)
|
matches = getmatches(l)
|
||||||
r = get_groups(matches)
|
r = get_groups(matches)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
|
|
||||||
def test_give_priority_to_matches_with_higher_percentage(self):
|
def test_give_priority_to_matches_with_higher_percentage(self):
|
||||||
o1 = NamedObject(with_words=True)
|
o1 = NamedObject(with_words=True)
|
||||||
@ -775,10 +778,10 @@ class TestCaseget_groups:
|
|||||||
o3 = NamedObject(with_words=True)
|
o3 = NamedObject(with_words=True)
|
||||||
m1 = Match(o1, o2, 1)
|
m1 = Match(o1, o2, 1)
|
||||||
m2 = Match(o2, o3, 2)
|
m2 = Match(o2, o3, 2)
|
||||||
r = get_groups([m1,m2])
|
r = get_groups([m1, m2])
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
g = r[0]
|
g = r[0]
|
||||||
eq_(2,len(g))
|
eq_(2, len(g))
|
||||||
assert o1 not in g
|
assert o1 not in g
|
||||||
assert o2 in g
|
assert o2 in g
|
||||||
assert o3 in g
|
assert o3 in g
|
||||||
@ -787,32 +790,32 @@ class TestCaseget_groups:
|
|||||||
l = [NamedObject("foobar") for i in range(4)]
|
l = [NamedObject("foobar") for i in range(4)]
|
||||||
m = getmatches(l)
|
m = getmatches(l)
|
||||||
r = get_groups(m)
|
r = get_groups(m)
|
||||||
eq_(1,len(r))
|
eq_(1, len(r))
|
||||||
eq_(4,len(r[0]))
|
eq_(4, len(r[0]))
|
||||||
|
|
||||||
def test_referenced_by_ref2(self):
|
def test_referenced_by_ref2(self):
|
||||||
o1 = NamedObject(with_words=True)
|
o1 = NamedObject(with_words=True)
|
||||||
o2 = NamedObject(with_words=True)
|
o2 = NamedObject(with_words=True)
|
||||||
o3 = NamedObject(with_words=True)
|
o3 = NamedObject(with_words=True)
|
||||||
m1 = get_match(o1,o2)
|
m1 = get_match(o1, o2)
|
||||||
m2 = get_match(o3,o1)
|
m2 = get_match(o3, o1)
|
||||||
m3 = get_match(o3,o2)
|
m3 = get_match(o3, o2)
|
||||||
r = get_groups([m1,m2,m3])
|
r = get_groups([m1, m2, m3])
|
||||||
eq_(3,len(r[0]))
|
eq_(3, len(r[0]))
|
||||||
|
|
||||||
def test_job(self):
|
def test_job(self):
|
||||||
def do_progress(p,d=''):
|
def do_progress(p, d=''):
|
||||||
self.log.append(p)
|
self.log.append(p)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
self.log = []
|
self.log = []
|
||||||
j = job.Job(1,do_progress)
|
j = job.Job(1, do_progress)
|
||||||
m1,m2,m3 = get_match_triangle()
|
m1, m2, m3 = get_match_triangle()
|
||||||
#101%: To make sure it is processed first so the job test works correctly
|
#101%: To make sure it is processed first so the job test works correctly
|
||||||
m4 = Match(NamedObject('a',True), NamedObject('a',True), 101)
|
m4 = Match(NamedObject('a', True), NamedObject('a', True), 101)
|
||||||
get_groups([m1,m2,m3,m4],j)
|
get_groups([m1, m2, m3, m4], j)
|
||||||
eq_(0,self.log[0])
|
eq_(0, self.log[0])
|
||||||
eq_(100,self.log[-1])
|
eq_(100, self.log[-1])
|
||||||
|
|
||||||
def test_group_admissible_discarded_dupes(self):
|
def test_group_admissible_discarded_dupes(self):
|
||||||
# If, with a (A, B, C, D) set, all match with A, but C and D don't match with B and that the
|
# If, with a (A, B, C, D) set, all match with A, but C and D don't match with B and that the
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/05/02
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -12,54 +10,54 @@ from xml.etree import ElementTree as ET
|
|||||||
from pytest import raises
|
from pytest import raises
|
||||||
from hscommon.testutil import eq_
|
from hscommon.testutil import eq_
|
||||||
|
|
||||||
from ..ignore import *
|
from ..ignore import IgnoreList
|
||||||
|
|
||||||
def test_empty():
|
def test_empty():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
eq_(0,len(il))
|
eq_(0, len(il))
|
||||||
assert not il.AreIgnored('foo','bar')
|
assert not il.AreIgnored('foo', 'bar')
|
||||||
|
|
||||||
def test_simple():
|
def test_simple():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
assert il.AreIgnored('foo','bar')
|
assert il.AreIgnored('foo', 'bar')
|
||||||
assert il.AreIgnored('bar','foo')
|
assert il.AreIgnored('bar', 'foo')
|
||||||
assert not il.AreIgnored('foo','bleh')
|
assert not il.AreIgnored('foo', 'bleh')
|
||||||
assert not il.AreIgnored('bleh','bar')
|
assert not il.AreIgnored('bleh', 'bar')
|
||||||
eq_(1,len(il))
|
eq_(1, len(il))
|
||||||
|
|
||||||
def test_multiple():
|
def test_multiple():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Ignore('foo','bleh')
|
il.Ignore('foo', 'bleh')
|
||||||
il.Ignore('bleh','bar')
|
il.Ignore('bleh', 'bar')
|
||||||
il.Ignore('aybabtu','bleh')
|
il.Ignore('aybabtu', 'bleh')
|
||||||
assert il.AreIgnored('foo','bar')
|
assert il.AreIgnored('foo', 'bar')
|
||||||
assert il.AreIgnored('bar','foo')
|
assert il.AreIgnored('bar', 'foo')
|
||||||
assert il.AreIgnored('foo','bleh')
|
assert il.AreIgnored('foo', 'bleh')
|
||||||
assert il.AreIgnored('bleh','bar')
|
assert il.AreIgnored('bleh', 'bar')
|
||||||
assert not il.AreIgnored('aybabtu','bar')
|
assert not il.AreIgnored('aybabtu', 'bar')
|
||||||
eq_(4,len(il))
|
eq_(4, len(il))
|
||||||
|
|
||||||
def test_clear():
|
def test_clear():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Clear()
|
il.Clear()
|
||||||
assert not il.AreIgnored('foo','bar')
|
assert not il.AreIgnored('foo', 'bar')
|
||||||
assert not il.AreIgnored('bar','foo')
|
assert not il.AreIgnored('bar', 'foo')
|
||||||
eq_(0,len(il))
|
eq_(0, len(il))
|
||||||
|
|
||||||
def test_add_same_twice():
|
def test_add_same_twice():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Ignore('bar','foo')
|
il.Ignore('bar', 'foo')
|
||||||
eq_(1,len(il))
|
eq_(1, len(il))
|
||||||
|
|
||||||
def test_save_to_xml():
|
def test_save_to_xml():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Ignore('foo','bleh')
|
il.Ignore('foo', 'bleh')
|
||||||
il.Ignore('bleh','bar')
|
il.Ignore('bleh', 'bar')
|
||||||
f = io.BytesIO()
|
f = io.BytesIO()
|
||||||
il.save_to_xml(f)
|
il.save_to_xml(f)
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
@ -83,8 +81,8 @@ def test_SaveThenLoad():
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.load_from_xml(f)
|
il.load_from_xml(f)
|
||||||
eq_(4,len(il))
|
eq_(4, len(il))
|
||||||
assert il.AreIgnored('\u00e9','bar')
|
assert il.AreIgnored('\u00e9', 'bar')
|
||||||
|
|
||||||
def test_LoadXML_with_empty_file_tags():
|
def test_LoadXML_with_empty_file_tags():
|
||||||
f = io.BytesIO()
|
f = io.BytesIO()
|
||||||
@ -92,41 +90,41 @@ def test_LoadXML_with_empty_file_tags():
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.load_from_xml(f)
|
il.load_from_xml(f)
|
||||||
eq_(0,len(il))
|
eq_(0, len(il))
|
||||||
|
|
||||||
def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else():
|
def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Ignore('bar','baz')
|
il.Ignore('bar', 'baz')
|
||||||
assert il.AreIgnored('bar','foo')
|
assert il.AreIgnored('bar', 'foo')
|
||||||
|
|
||||||
|
|
||||||
def test_no_dupes_when_a_child_is_a_key_somewhere_else():
|
def test_no_dupes_when_a_child_is_a_key_somewhere_else():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Ignore('bar','baz')
|
il.Ignore('bar', 'baz')
|
||||||
il.Ignore('bar','foo')
|
il.Ignore('bar', 'foo')
|
||||||
eq_(2,len(il))
|
eq_(2, len(il))
|
||||||
|
|
||||||
def test_iterate():
|
def test_iterate():
|
||||||
#It must be possible to iterate through ignore list
|
#It must be possible to iterate through ignore list
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
expected = [('foo','bar'),('bar','baz'),('foo','baz')]
|
expected = [('foo', 'bar'), ('bar', 'baz'), ('foo', 'baz')]
|
||||||
for i in expected:
|
for i in expected:
|
||||||
il.Ignore(i[0],i[1])
|
il.Ignore(i[0], i[1])
|
||||||
for i in il:
|
for i in il:
|
||||||
expected.remove(i) #No exception should be raised
|
expected.remove(i) #No exception should be raised
|
||||||
assert not expected #expected should be empty
|
assert not expected #expected should be empty
|
||||||
|
|
||||||
def test_filter():
|
def test_filter():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
il.Ignore('bar','baz')
|
il.Ignore('bar', 'baz')
|
||||||
il.Ignore('foo','baz')
|
il.Ignore('foo', 'baz')
|
||||||
il.Filter(lambda f,s: f == 'bar')
|
il.Filter(lambda f, s: f == 'bar')
|
||||||
eq_(1,len(il))
|
eq_(1, len(il))
|
||||||
assert not il.AreIgnored('foo','bar')
|
assert not il.AreIgnored('foo', 'bar')
|
||||||
assert il.AreIgnored('bar','baz')
|
assert il.AreIgnored('bar', 'baz')
|
||||||
|
|
||||||
def test_save_with_non_ascii_items():
|
def test_save_with_non_ascii_items():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
@ -139,14 +137,14 @@ def test_save_with_non_ascii_items():
|
|||||||
|
|
||||||
def test_len():
|
def test_len():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
eq_(0,len(il))
|
eq_(0, len(il))
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
eq_(1,len(il))
|
eq_(1, len(il))
|
||||||
|
|
||||||
def test_nonzero():
|
def test_nonzero():
|
||||||
il = IgnoreList()
|
il = IgnoreList()
|
||||||
assert not il
|
assert not il
|
||||||
il.Ignore('foo','bar')
|
il.Ignore('foo', 'bar')
|
||||||
assert il
|
assert il
|
||||||
|
|
||||||
def test_remove():
|
def test_remove():
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/02/23
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
|
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -8,7 +6,7 @@
|
|||||||
|
|
||||||
from hscommon.testutil import eq_
|
from hscommon.testutil import eq_
|
||||||
|
|
||||||
from ..markable import *
|
from ..markable import MarkableList, Markable
|
||||||
|
|
||||||
def gen():
|
def gen():
|
||||||
ml = MarkableList()
|
ml = MarkableList()
|
||||||
@ -73,6 +71,7 @@ def test_change_notifications():
|
|||||||
class Foobar(Markable):
|
class Foobar(Markable):
|
||||||
def _did_mark(self, o):
|
def _did_mark(self, o):
|
||||||
self.log.append((True, o))
|
self.log.append((True, o))
|
||||||
|
|
||||||
def _did_unmark(self, o):
|
def _did_unmark(self, o):
|
||||||
self.log.append((False, o))
|
self.log.append((False, o))
|
||||||
|
|
||||||
@ -84,15 +83,15 @@ def test_change_notifications():
|
|||||||
f.unmark('foo')
|
f.unmark('foo')
|
||||||
f.unmark('foo')
|
f.unmark('foo')
|
||||||
f.mark_toggle('bar')
|
f.mark_toggle('bar')
|
||||||
eq_([(True,'foo'),(True,'bar'),(False,'foo'),(False,'bar')],f.log)
|
eq_([(True, 'foo'), (True, 'bar'), (False, 'foo'), (False, 'bar')], f.log)
|
||||||
|
|
||||||
def test_mark_count():
|
def test_mark_count():
|
||||||
ml = gen()
|
ml = gen()
|
||||||
eq_(0,ml.mark_count)
|
eq_(0, ml.mark_count)
|
||||||
ml.mark(7)
|
ml.mark(7)
|
||||||
eq_(1,ml.mark_count)
|
eq_(1, ml.mark_count)
|
||||||
ml.mark(11)
|
ml.mark(11)
|
||||||
eq_(1,ml.mark_count)
|
eq_(1, ml.mark_count)
|
||||||
|
|
||||||
def test_mark_none():
|
def test_mark_none():
|
||||||
log = []
|
log = []
|
||||||
@ -100,16 +99,16 @@ def test_mark_none():
|
|||||||
ml._did_unmark = lambda o: log.append(o)
|
ml._did_unmark = lambda o: log.append(o)
|
||||||
ml.mark(1)
|
ml.mark(1)
|
||||||
ml.mark(2)
|
ml.mark(2)
|
||||||
eq_(2,ml.mark_count)
|
eq_(2, ml.mark_count)
|
||||||
ml.mark_none()
|
ml.mark_none()
|
||||||
eq_(0,ml.mark_count)
|
eq_(0, ml.mark_count)
|
||||||
eq_([1,2],log)
|
eq_([1, 2], log)
|
||||||
|
|
||||||
def test_mark_all():
|
def test_mark_all():
|
||||||
ml = gen()
|
ml = gen()
|
||||||
eq_(0,ml.mark_count)
|
eq_(0, ml.mark_count)
|
||||||
ml.mark_all()
|
ml.mark_all()
|
||||||
eq_(10,ml.mark_count)
|
eq_(10, ml.mark_count)
|
||||||
assert ml.is_marked(1)
|
assert ml.is_marked(1)
|
||||||
|
|
||||||
def test_mark_invert():
|
def test_mark_invert():
|
||||||
@ -122,8 +121,8 @@ def test_mark_invert():
|
|||||||
def test_mark_while_inverted():
|
def test_mark_while_inverted():
|
||||||
log = []
|
log = []
|
||||||
ml = gen()
|
ml = gen()
|
||||||
ml._did_unmark = lambda o:log.append((False,o))
|
ml._did_unmark = lambda o: log.append((False, o))
|
||||||
ml._did_mark = lambda o:log.append((True,o))
|
ml._did_mark = lambda o: log.append((True, o))
|
||||||
ml.mark(1)
|
ml.mark(1)
|
||||||
ml.mark_invert()
|
ml.mark_invert()
|
||||||
assert ml.mark_inverted
|
assert ml.mark_inverted
|
||||||
@ -132,8 +131,8 @@ def test_mark_while_inverted():
|
|||||||
assert ml.unmark(1)
|
assert ml.unmark(1)
|
||||||
ml.mark_toggle(3)
|
ml.mark_toggle(3)
|
||||||
assert not ml.is_marked(3)
|
assert not ml.is_marked(3)
|
||||||
eq_(7,ml.mark_count)
|
eq_(7, ml.mark_count)
|
||||||
eq_([(True,1),(False,1),(True,2),(True,1),(True,3)],log)
|
eq_([(True, 1), (False, 1), (True, 2), (True, 1), (True, 3)], log)
|
||||||
|
|
||||||
def test_remove_mark_flag():
|
def test_remove_mark_flag():
|
||||||
ml = gen()
|
ml = gen()
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/02/23
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -29,10 +27,10 @@ class TestCaseResultsEmpty:
|
|||||||
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
|
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
|
||||||
|
|
||||||
def test_stat_line(self):
|
def test_stat_line(self):
|
||||||
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.",self.results.stat_line)
|
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.", self.results.stat_line)
|
||||||
|
|
||||||
def test_groups(self):
|
def test_groups(self):
|
||||||
eq_(0,len(self.results.groups))
|
eq_(0, len(self.results.groups))
|
||||||
|
|
||||||
def test_get_group_of_duplicate(self):
|
def test_get_group_of_duplicate(self):
|
||||||
assert self.results.get_group_of_duplicate('foo') is None
|
assert self.results.get_group_of_duplicate('foo') is None
|
||||||
@ -70,14 +68,14 @@ class TestCaseResultsWithSomeGroups:
|
|||||||
def setup_method(self, method):
|
def setup_method(self, method):
|
||||||
self.app = DupeGuru()
|
self.app = DupeGuru()
|
||||||
self.results = self.app.results
|
self.results = self.app.results
|
||||||
self.objects,self.matches,self.groups = GetTestGroups()
|
self.objects, self.matches, self.groups = GetTestGroups()
|
||||||
self.results.groups = self.groups
|
self.results.groups = self.groups
|
||||||
|
|
||||||
def test_stat_line(self):
|
def test_stat_line(self):
|
||||||
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
|
|
||||||
def test_groups(self):
|
def test_groups(self):
|
||||||
eq_(2,len(self.results.groups))
|
eq_(2, len(self.results.groups))
|
||||||
|
|
||||||
def test_get_group_of_duplicate(self):
|
def test_get_group_of_duplicate(self):
|
||||||
for o in self.objects:
|
for o in self.objects:
|
||||||
@ -87,27 +85,27 @@ class TestCaseResultsWithSomeGroups:
|
|||||||
assert self.results.get_group_of_duplicate(self.groups[0]) is None
|
assert self.results.get_group_of_duplicate(self.groups[0]) is None
|
||||||
|
|
||||||
def test_remove_duplicates(self):
|
def test_remove_duplicates(self):
|
||||||
g1,g2 = self.results.groups
|
g1, g2 = self.results.groups
|
||||||
self.results.remove_duplicates([g1.dupes[0]])
|
self.results.remove_duplicates([g1.dupes[0]])
|
||||||
eq_(2,len(g1))
|
eq_(2, len(g1))
|
||||||
assert g1 in self.results.groups
|
assert g1 in self.results.groups
|
||||||
self.results.remove_duplicates([g1.ref])
|
self.results.remove_duplicates([g1.ref])
|
||||||
eq_(2,len(g1))
|
eq_(2, len(g1))
|
||||||
assert g1 in self.results.groups
|
assert g1 in self.results.groups
|
||||||
self.results.remove_duplicates([g1.dupes[0]])
|
self.results.remove_duplicates([g1.dupes[0]])
|
||||||
eq_(0,len(g1))
|
eq_(0, len(g1))
|
||||||
assert g1 not in self.results.groups
|
assert g1 not in self.results.groups
|
||||||
self.results.remove_duplicates([g2.dupes[0]])
|
self.results.remove_duplicates([g2.dupes[0]])
|
||||||
eq_(0,len(g2))
|
eq_(0, len(g2))
|
||||||
assert g2 not in self.results.groups
|
assert g2 not in self.results.groups
|
||||||
eq_(0,len(self.results.groups))
|
eq_(0, len(self.results.groups))
|
||||||
|
|
||||||
def test_remove_duplicates_with_ref_files(self):
|
def test_remove_duplicates_with_ref_files(self):
|
||||||
g1,g2 = self.results.groups
|
g1, g2 = self.results.groups
|
||||||
self.objects[0].is_ref = True
|
self.objects[0].is_ref = True
|
||||||
self.objects[1].is_ref = True
|
self.objects[1].is_ref = True
|
||||||
self.results.remove_duplicates([self.objects[2]])
|
self.results.remove_duplicates([self.objects[2]])
|
||||||
eq_(0,len(g1))
|
eq_(0, len(g1))
|
||||||
assert g1 not in self.results.groups
|
assert g1 not in self.results.groups
|
||||||
|
|
||||||
def test_make_ref(self):
|
def test_make_ref(self):
|
||||||
@ -118,7 +116,7 @@ class TestCaseResultsWithSomeGroups:
|
|||||||
|
|
||||||
def test_sort_groups(self):
|
def test_sort_groups(self):
|
||||||
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
||||||
g1,g2 = self.groups
|
g1, g2 = self.groups
|
||||||
self.results.sort_groups('size')
|
self.results.sort_groups('size')
|
||||||
assert self.results.groups[0] is g2
|
assert self.results.groups[0] is g2
|
||||||
assert self.results.groups[1] is g1
|
assert self.results.groups[1] is g1
|
||||||
@ -129,43 +127,43 @@ class TestCaseResultsWithSomeGroups:
|
|||||||
def test_set_groups_when_sorted(self):
|
def test_set_groups_when_sorted(self):
|
||||||
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
||||||
self.results.sort_groups('size')
|
self.results.sort_groups('size')
|
||||||
objects,matches,groups = GetTestGroups()
|
objects, matches, groups = GetTestGroups()
|
||||||
g1,g2 = groups
|
g1, g2 = groups
|
||||||
g1.switch_ref(objects[1])
|
g1.switch_ref(objects[1])
|
||||||
self.results.groups = groups
|
self.results.groups = groups
|
||||||
assert self.results.groups[0] is g2
|
assert self.results.groups[0] is g2
|
||||||
assert self.results.groups[1] is g1
|
assert self.results.groups[1] is g1
|
||||||
|
|
||||||
def test_get_dupe_list(self):
|
def test_get_dupe_list(self):
|
||||||
eq_([self.objects[1],self.objects[2],self.objects[4]],self.results.dupes)
|
eq_([self.objects[1], self.objects[2], self.objects[4]], self.results.dupes)
|
||||||
|
|
||||||
def test_dupe_list_is_cached(self):
|
def test_dupe_list_is_cached(self):
|
||||||
assert self.results.dupes is self.results.dupes
|
assert self.results.dupes is self.results.dupes
|
||||||
|
|
||||||
def test_dupe_list_cache_is_invalidated_when_needed(self):
|
def test_dupe_list_cache_is_invalidated_when_needed(self):
|
||||||
o1,o2,o3,o4,o5 = self.objects
|
o1, o2, o3, o4, o5 = self.objects
|
||||||
eq_([o2,o3,o5],self.results.dupes)
|
eq_([o2, o3, o5], self.results.dupes)
|
||||||
self.results.make_ref(o2)
|
self.results.make_ref(o2)
|
||||||
eq_([o1,o3,o5],self.results.dupes)
|
eq_([o1, o3, o5], self.results.dupes)
|
||||||
objects,matches,groups = GetTestGroups()
|
objects, matches, groups = GetTestGroups()
|
||||||
o1,o2,o3,o4,o5 = objects
|
o1, o2, o3, o4, o5 = objects
|
||||||
self.results.groups = groups
|
self.results.groups = groups
|
||||||
eq_([o2,o3,o5],self.results.dupes)
|
eq_([o2, o3, o5], self.results.dupes)
|
||||||
|
|
||||||
def test_dupe_list_sort(self):
|
def test_dupe_list_sort(self):
|
||||||
o1,o2,o3,o4,o5 = self.objects
|
o1, o2, o3, o4, o5 = self.objects
|
||||||
o1.size = 5
|
o1.size = 5
|
||||||
o2.size = 4
|
o2.size = 4
|
||||||
o3.size = 3
|
o3.size = 3
|
||||||
o4.size = 2
|
o4.size = 2
|
||||||
o5.size = 1
|
o5.size = 1
|
||||||
self.results.sort_dupes('size')
|
self.results.sort_dupes('size')
|
||||||
eq_([o5,o3,o2],self.results.dupes)
|
eq_([o5, o3, o2], self.results.dupes)
|
||||||
self.results.sort_dupes('size', False)
|
self.results.sort_dupes('size', False)
|
||||||
eq_([o2,o3,o5],self.results.dupes)
|
eq_([o2, o3, o5], self.results.dupes)
|
||||||
|
|
||||||
def test_dupe_list_remember_sort(self):
|
def test_dupe_list_remember_sort(self):
|
||||||
o1,o2,o3,o4,o5 = self.objects
|
o1, o2, o3, o4, o5 = self.objects
|
||||||
o1.size = 5
|
o1.size = 5
|
||||||
o2.size = 4
|
o2.size = 4
|
||||||
o3.size = 3
|
o3.size = 3
|
||||||
@ -173,30 +171,30 @@ class TestCaseResultsWithSomeGroups:
|
|||||||
o5.size = 1
|
o5.size = 1
|
||||||
self.results.sort_dupes('size')
|
self.results.sort_dupes('size')
|
||||||
self.results.make_ref(o2)
|
self.results.make_ref(o2)
|
||||||
eq_([o5,o3,o1],self.results.dupes)
|
eq_([o5, o3, o1], self.results.dupes)
|
||||||
|
|
||||||
def test_dupe_list_sort_delta_values(self):
|
def test_dupe_list_sort_delta_values(self):
|
||||||
o1,o2,o3,o4,o5 = self.objects
|
o1, o2, o3, o4, o5 = self.objects
|
||||||
o1.size = 10
|
o1.size = 10
|
||||||
o2.size = 2 #-8
|
o2.size = 2 #-8
|
||||||
o3.size = 3 #-7
|
o3.size = 3 #-7
|
||||||
o4.size = 20
|
o4.size = 20
|
||||||
o5.size = 1 #-19
|
o5.size = 1 #-19
|
||||||
self.results.sort_dupes('size', delta=True)
|
self.results.sort_dupes('size', delta=True)
|
||||||
eq_([o5,o2,o3],self.results.dupes)
|
eq_([o5, o2, o3], self.results.dupes)
|
||||||
|
|
||||||
def test_sort_empty_list(self):
|
def test_sort_empty_list(self):
|
||||||
#There was an infinite loop when sorting an empty list.
|
#There was an infinite loop when sorting an empty list.
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = app.results
|
r = app.results
|
||||||
r.sort_dupes('name')
|
r.sort_dupes('name')
|
||||||
eq_([],r.dupes)
|
eq_([], r.dupes)
|
||||||
|
|
||||||
def test_dupe_list_update_on_remove_duplicates(self):
|
def test_dupe_list_update_on_remove_duplicates(self):
|
||||||
o1,o2,o3,o4,o5 = self.objects
|
o1, o2, o3, o4, o5 = self.objects
|
||||||
eq_(3,len(self.results.dupes))
|
eq_(3, len(self.results.dupes))
|
||||||
self.results.remove_duplicates([o2])
|
self.results.remove_duplicates([o2])
|
||||||
eq_(2,len(self.results.dupes))
|
eq_(2, len(self.results.dupes))
|
||||||
|
|
||||||
def test_is_modified(self):
|
def test_is_modified(self):
|
||||||
# Changing the groups sets the modified flag
|
# Changing the groups sets the modified flag
|
||||||
@ -218,7 +216,7 @@ class TestCaseResultsWithSomeGroups:
|
|||||||
def test_is_modified_after_removing_all_results(self):
|
def test_is_modified_after_removing_all_results(self):
|
||||||
# Removing all results sets the is_modified flag to false.
|
# Removing all results sets the is_modified flag to false.
|
||||||
self.results.mark_all()
|
self.results.mark_all()
|
||||||
self.results.perform_on_marked(lambda x:None, True)
|
self.results.perform_on_marked(lambda x: None, True)
|
||||||
assert not self.results.is_modified
|
assert not self.results.is_modified
|
||||||
|
|
||||||
def test_group_of_duplicate_after_removal(self):
|
def test_group_of_duplicate_after_removal(self):
|
||||||
@ -252,7 +250,7 @@ class TestCaseResultsWithSavedResults:
|
|||||||
def setup_method(self, method):
|
def setup_method(self, method):
|
||||||
self.app = DupeGuru()
|
self.app = DupeGuru()
|
||||||
self.results = self.app.results
|
self.results = self.app.results
|
||||||
self.objects,self.matches,self.groups = GetTestGroups()
|
self.objects, self.matches, self.groups = GetTestGroups()
|
||||||
self.results.groups = self.groups
|
self.results.groups = self.groups
|
||||||
self.f = io.BytesIO()
|
self.f = io.BytesIO()
|
||||||
self.results.save_to_xml(self.f)
|
self.results.save_to_xml(self.f)
|
||||||
@ -286,31 +284,31 @@ class TestCaseResultsMarkings:
|
|||||||
def setup_method(self, method):
|
def setup_method(self, method):
|
||||||
self.app = DupeGuru()
|
self.app = DupeGuru()
|
||||||
self.results = self.app.results
|
self.results = self.app.results
|
||||||
self.objects,self.matches,self.groups = GetTestGroups()
|
self.objects, self.matches, self.groups = GetTestGroups()
|
||||||
self.results.groups = self.groups
|
self.results.groups = self.groups
|
||||||
|
|
||||||
def test_stat_line(self):
|
def test_stat_line(self):
|
||||||
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.mark(self.objects[1])
|
self.results.mark(self.objects[1])
|
||||||
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.mark_invert()
|
self.results.mark_invert()
|
||||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.mark_invert()
|
self.results.mark_invert()
|
||||||
self.results.unmark(self.objects[1])
|
self.results.unmark(self.objects[1])
|
||||||
self.results.mark(self.objects[2])
|
self.results.mark(self.objects[2])
|
||||||
self.results.mark(self.objects[4])
|
self.results.mark(self.objects[4])
|
||||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.mark(self.objects[0]) #this is a ref, it can't be counted
|
self.results.mark(self.objects[0]) #this is a ref, it can't be counted
|
||||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.groups = self.groups
|
self.results.groups = self.groups
|
||||||
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
|
|
||||||
def test_with_ref_duplicate(self):
|
def test_with_ref_duplicate(self):
|
||||||
self.objects[1].is_ref = True
|
self.objects[1].is_ref = True
|
||||||
self.results.groups = self.groups
|
self.results.groups = self.groups
|
||||||
assert not self.results.mark(self.objects[1])
|
assert not self.results.mark(self.objects[1])
|
||||||
self.results.mark(self.objects[2])
|
self.results.mark(self.objects[2])
|
||||||
eq_("1 / 2 (1.00 B / 2.00 B) duplicates marked.",self.results.stat_line)
|
eq_("1 / 2 (1.00 B / 2.00 B) duplicates marked.", self.results.stat_line)
|
||||||
|
|
||||||
def test_perform_on_marked(self):
|
def test_perform_on_marked(self):
|
||||||
def log_object(o):
|
def log_object(o):
|
||||||
@ -319,18 +317,18 @@ class TestCaseResultsMarkings:
|
|||||||
|
|
||||||
log = []
|
log = []
|
||||||
self.results.mark_all()
|
self.results.mark_all()
|
||||||
self.results.perform_on_marked(log_object,False)
|
self.results.perform_on_marked(log_object, False)
|
||||||
assert self.objects[1] in log
|
assert self.objects[1] in log
|
||||||
assert self.objects[2] in log
|
assert self.objects[2] in log
|
||||||
assert self.objects[4] in log
|
assert self.objects[4] in log
|
||||||
eq_(3,len(log))
|
eq_(3, len(log))
|
||||||
log = []
|
log = []
|
||||||
self.results.mark_none()
|
self.results.mark_none()
|
||||||
self.results.mark(self.objects[4])
|
self.results.mark(self.objects[4])
|
||||||
self.results.perform_on_marked(log_object,True)
|
self.results.perform_on_marked(log_object, True)
|
||||||
eq_(1,len(log))
|
eq_(1, len(log))
|
||||||
assert self.objects[4] in log
|
assert self.objects[4] in log
|
||||||
eq_(1,len(self.results.groups))
|
eq_(1, len(self.results.groups))
|
||||||
|
|
||||||
def test_perform_on_marked_with_problems(self):
|
def test_perform_on_marked_with_problems(self):
|
||||||
def log_object(o):
|
def log_object(o):
|
||||||
@ -362,44 +360,44 @@ class TestCaseResultsMarkings:
|
|||||||
self.objects[0].is_ref = True
|
self.objects[0].is_ref = True
|
||||||
self.objects[1].is_ref = True
|
self.objects[1].is_ref = True
|
||||||
self.results.mark_all()
|
self.results.mark_all()
|
||||||
self.results.perform_on_marked(log_object,True)
|
self.results.perform_on_marked(log_object, True)
|
||||||
assert self.objects[1] not in log
|
assert self.objects[1] not in log
|
||||||
assert self.objects[2] in log
|
assert self.objects[2] in log
|
||||||
assert self.objects[4] in log
|
assert self.objects[4] in log
|
||||||
eq_(2,len(log))
|
eq_(2, len(log))
|
||||||
eq_(0,len(self.results.groups))
|
eq_(0, len(self.results.groups))
|
||||||
|
|
||||||
def test_perform_on_marked_remove_objects_only_at_the_end(self):
|
def test_perform_on_marked_remove_objects_only_at_the_end(self):
|
||||||
def check_groups(o):
|
def check_groups(o):
|
||||||
eq_(3,len(g1))
|
eq_(3, len(g1))
|
||||||
eq_(2,len(g2))
|
eq_(2, len(g2))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
g1,g2 = self.results.groups
|
g1, g2 = self.results.groups
|
||||||
self.results.mark_all()
|
self.results.mark_all()
|
||||||
self.results.perform_on_marked(check_groups,True)
|
self.results.perform_on_marked(check_groups, True)
|
||||||
eq_(0,len(g1))
|
eq_(0, len(g1))
|
||||||
eq_(0,len(g2))
|
eq_(0, len(g2))
|
||||||
eq_(0,len(self.results.groups))
|
eq_(0, len(self.results.groups))
|
||||||
|
|
||||||
def test_remove_duplicates(self):
|
def test_remove_duplicates(self):
|
||||||
g1 = self.results.groups[0]
|
g1 = self.results.groups[0]
|
||||||
self.results.mark(g1.dupes[0])
|
self.results.mark(g1.dupes[0])
|
||||||
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.remove_duplicates([g1.dupes[1]])
|
self.results.remove_duplicates([g1.dupes[1]])
|
||||||
eq_("1 / 2 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("1 / 2 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.remove_duplicates([g1.dupes[0]])
|
self.results.remove_duplicates([g1.dupes[0]])
|
||||||
eq_("0 / 1 (0.00 B / 1.00 B) duplicates marked.",self.results.stat_line)
|
eq_("0 / 1 (0.00 B / 1.00 B) duplicates marked.", self.results.stat_line)
|
||||||
|
|
||||||
def test_make_ref(self):
|
def test_make_ref(self):
|
||||||
g = self.results.groups[0]
|
g = self.results.groups[0]
|
||||||
d = g.dupes[0]
|
d = g.dupes[0]
|
||||||
self.results.mark(d)
|
self.results.mark(d)
|
||||||
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
|
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||||
self.results.make_ref(d)
|
self.results.make_ref(d)
|
||||||
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.",self.results.stat_line)
|
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.", self.results.stat_line)
|
||||||
self.results.make_ref(d)
|
self.results.make_ref(d)
|
||||||
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.",self.results.stat_line)
|
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.", self.results.stat_line)
|
||||||
|
|
||||||
def test_SaveXML(self):
|
def test_SaveXML(self):
|
||||||
self.results.mark(self.objects[1])
|
self.results.mark(self.objects[1])
|
||||||
@ -430,7 +428,7 @@ class TestCaseResultsMarkings:
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(f,get_file)
|
r.load_from_xml(f, get_file)
|
||||||
assert not r.is_marked(self.objects[0])
|
assert not r.is_marked(self.objects[0])
|
||||||
assert not r.is_marked(self.objects[1])
|
assert not r.is_marked(self.objects[1])
|
||||||
assert r.is_marked(self.objects[2])
|
assert r.is_marked(self.objects[2])
|
||||||
@ -450,7 +448,7 @@ class TestCaseResultsXML:
|
|||||||
|
|
||||||
def test_save_to_xml(self):
|
def test_save_to_xml(self):
|
||||||
self.objects[0].is_ref = True
|
self.objects[0].is_ref = True
|
||||||
self.objects[0].words = [['foo','bar']]
|
self.objects[0].words = [['foo', 'bar']]
|
||||||
f = io.BytesIO()
|
f = io.BytesIO()
|
||||||
self.results.save_to_xml(f)
|
self.results.save_to_xml(f)
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
@ -460,29 +458,29 @@ class TestCaseResultsXML:
|
|||||||
eq_(2, len(root))
|
eq_(2, len(root))
|
||||||
eq_(2, len([c for c in root if c.tag == 'group']))
|
eq_(2, len([c for c in root if c.tag == 'group']))
|
||||||
g1, g2 = root
|
g1, g2 = root
|
||||||
eq_(6,len(g1))
|
eq_(6, len(g1))
|
||||||
eq_(3,len([c for c in g1 if c.tag == 'file']))
|
eq_(3, len([c for c in g1 if c.tag == 'file']))
|
||||||
eq_(3,len([c for c in g1 if c.tag == 'match']))
|
eq_(3, len([c for c in g1 if c.tag == 'match']))
|
||||||
d1, d2, d3 = [c for c in g1 if c.tag == 'file']
|
d1, d2, d3 = [c for c in g1 if c.tag == 'file']
|
||||||
eq_(op.join('basepath','foo bar'),d1.get('path'))
|
eq_(op.join('basepath', 'foo bar'), d1.get('path'))
|
||||||
eq_(op.join('basepath','bar bleh'),d2.get('path'))
|
eq_(op.join('basepath', 'bar bleh'), d2.get('path'))
|
||||||
eq_(op.join('basepath','foo bleh'),d3.get('path'))
|
eq_(op.join('basepath', 'foo bleh'), d3.get('path'))
|
||||||
eq_('y',d1.get('is_ref'))
|
eq_('y', d1.get('is_ref'))
|
||||||
eq_('n',d2.get('is_ref'))
|
eq_('n', d2.get('is_ref'))
|
||||||
eq_('n',d3.get('is_ref'))
|
eq_('n', d3.get('is_ref'))
|
||||||
eq_('foo,bar',d1.get('words'))
|
eq_('foo,bar', d1.get('words'))
|
||||||
eq_('bar,bleh',d2.get('words'))
|
eq_('bar,bleh', d2.get('words'))
|
||||||
eq_('foo,bleh',d3.get('words'))
|
eq_('foo,bleh', d3.get('words'))
|
||||||
eq_(3,len(g2))
|
eq_(3, len(g2))
|
||||||
eq_(2,len([c for c in g2 if c.tag == 'file']))
|
eq_(2, len([c for c in g2 if c.tag == 'file']))
|
||||||
eq_(1,len([c for c in g2 if c.tag == 'match']))
|
eq_(1, len([c for c in g2 if c.tag == 'match']))
|
||||||
d1, d2 = [c for c in g2 if c.tag == 'file']
|
d1, d2 = [c for c in g2 if c.tag == 'file']
|
||||||
eq_(op.join('basepath','ibabtu'),d1.get('path'))
|
eq_(op.join('basepath', 'ibabtu'), d1.get('path'))
|
||||||
eq_(op.join('basepath','ibabtu'),d2.get('path'))
|
eq_(op.join('basepath', 'ibabtu'), d2.get('path'))
|
||||||
eq_('n',d1.get('is_ref'))
|
eq_('n', d1.get('is_ref'))
|
||||||
eq_('n',d2.get('is_ref'))
|
eq_('n', d2.get('is_ref'))
|
||||||
eq_('ibabtu',d1.get('words'))
|
eq_('ibabtu', d1.get('words'))
|
||||||
eq_('ibabtu',d2.get('words'))
|
eq_('ibabtu', d2.get('words'))
|
||||||
|
|
||||||
def test_LoadXML(self):
|
def test_LoadXML(self):
|
||||||
def get_file(path):
|
def get_file(path):
|
||||||
@ -495,26 +493,26 @@ class TestCaseResultsXML:
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(f,get_file)
|
r.load_from_xml(f, get_file)
|
||||||
eq_(2,len(r.groups))
|
eq_(2, len(r.groups))
|
||||||
g1,g2 = r.groups
|
g1, g2 = r.groups
|
||||||
eq_(3,len(g1))
|
eq_(3, len(g1))
|
||||||
assert g1[0].is_ref
|
assert g1[0].is_ref
|
||||||
assert not g1[1].is_ref
|
assert not g1[1].is_ref
|
||||||
assert not g1[2].is_ref
|
assert not g1[2].is_ref
|
||||||
assert g1[0] is self.objects[0]
|
assert g1[0] is self.objects[0]
|
||||||
assert g1[1] is self.objects[1]
|
assert g1[1] is self.objects[1]
|
||||||
assert g1[2] is self.objects[2]
|
assert g1[2] is self.objects[2]
|
||||||
eq_(['foo','bar'],g1[0].words)
|
eq_(['foo', 'bar'], g1[0].words)
|
||||||
eq_(['bar','bleh'],g1[1].words)
|
eq_(['bar', 'bleh'], g1[1].words)
|
||||||
eq_(['foo','bleh'],g1[2].words)
|
eq_(['foo', 'bleh'], g1[2].words)
|
||||||
eq_(2,len(g2))
|
eq_(2, len(g2))
|
||||||
assert not g2[0].is_ref
|
assert not g2[0].is_ref
|
||||||
assert not g2[1].is_ref
|
assert not g2[1].is_ref
|
||||||
assert g2[0] is self.objects[3]
|
assert g2[0] is self.objects[3]
|
||||||
assert g2[1] is self.objects[4]
|
assert g2[1] is self.objects[4]
|
||||||
eq_(['ibabtu'],g2[0].words)
|
eq_(['ibabtu'], g2[0].words)
|
||||||
eq_(['ibabtu'],g2[1].words)
|
eq_(['ibabtu'], g2[1].words)
|
||||||
|
|
||||||
def test_LoadXML_with_filename(self, tmpdir):
|
def test_LoadXML_with_filename(self, tmpdir):
|
||||||
def get_file(path):
|
def get_file(path):
|
||||||
@ -525,8 +523,8 @@ class TestCaseResultsXML:
|
|||||||
self.results.save_to_xml(filename)
|
self.results.save_to_xml(filename)
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(filename,get_file)
|
r.load_from_xml(filename, get_file)
|
||||||
eq_(2,len(r.groups))
|
eq_(2, len(r.groups))
|
||||||
|
|
||||||
def test_LoadXML_with_some_files_that_dont_exist_anymore(self):
|
def test_LoadXML_with_some_files_that_dont_exist_anymore(self):
|
||||||
def get_file(path):
|
def get_file(path):
|
||||||
@ -540,9 +538,9 @@ class TestCaseResultsXML:
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(f,get_file)
|
r.load_from_xml(f, get_file)
|
||||||
eq_(1,len(r.groups))
|
eq_(1, len(r.groups))
|
||||||
eq_(3,len(r.groups[0]))
|
eq_(3, len(r.groups[0]))
|
||||||
|
|
||||||
def test_LoadXML_missing_attributes_and_bogus_elements(self):
|
def test_LoadXML_missing_attributes_and_bogus_elements(self):
|
||||||
def get_file(path):
|
def get_file(path):
|
||||||
@ -551,20 +549,20 @@ class TestCaseResultsXML:
|
|||||||
root = ET.Element('foobar') #The root element shouldn't matter, really.
|
root = ET.Element('foobar') #The root element shouldn't matter, really.
|
||||||
group_node = ET.SubElement(root, 'group')
|
group_node = ET.SubElement(root, 'group')
|
||||||
dupe_node = ET.SubElement(group_node, 'file') #Perfectly correct file
|
dupe_node = ET.SubElement(group_node, 'file') #Perfectly correct file
|
||||||
dupe_node.set('path', op.join('basepath','foo bar'))
|
dupe_node.set('path', op.join('basepath', 'foo bar'))
|
||||||
dupe_node.set('is_ref', 'y')
|
dupe_node.set('is_ref', 'y')
|
||||||
dupe_node.set('words', 'foo,bar')
|
dupe_node.set('words', 'foo, bar')
|
||||||
dupe_node = ET.SubElement(group_node, 'file') #is_ref missing, default to 'n'
|
dupe_node = ET.SubElement(group_node, 'file') #is_ref missing, default to 'n'
|
||||||
dupe_node.set('path',op.join('basepath','foo bleh'))
|
dupe_node.set('path', op.join('basepath', 'foo bleh'))
|
||||||
dupe_node.set('words','foo,bleh')
|
dupe_node.set('words', 'foo, bleh')
|
||||||
dupe_node = ET.SubElement(group_node, 'file') #words are missing, valid.
|
dupe_node = ET.SubElement(group_node, 'file') #words are missing, valid.
|
||||||
dupe_node.set('path',op.join('basepath','bar bleh'))
|
dupe_node.set('path', op.join('basepath', 'bar bleh'))
|
||||||
dupe_node = ET.SubElement(group_node, 'file') #path is missing, invalid.
|
dupe_node = ET.SubElement(group_node, 'file') #path is missing, invalid.
|
||||||
dupe_node.set('words','foo,bleh')
|
dupe_node.set('words', 'foo, bleh')
|
||||||
dupe_node = ET.SubElement(group_node, 'foobar') #Invalid element name
|
dupe_node = ET.SubElement(group_node, 'foobar') #Invalid element name
|
||||||
dupe_node.set('path',op.join('basepath','bar bleh'))
|
dupe_node.set('path', op.join('basepath', 'bar bleh'))
|
||||||
dupe_node.set('is_ref','y')
|
dupe_node.set('is_ref', 'y')
|
||||||
dupe_node.set('words','bar,bleh')
|
dupe_node.set('words', 'bar, bleh')
|
||||||
match_node = ET.SubElement(group_node, 'match') # match pointing to a bad index
|
match_node = ET.SubElement(group_node, 'match') # match pointing to a bad index
|
||||||
match_node.set('first', '42')
|
match_node.set('first', '42')
|
||||||
match_node.set('second', '45')
|
match_node.set('second', '45')
|
||||||
@ -582,21 +580,21 @@ class TestCaseResultsXML:
|
|||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(f, get_file)
|
r.load_from_xml(f, get_file)
|
||||||
eq_(1,len(r.groups))
|
eq_(1, len(r.groups))
|
||||||
eq_(3,len(r.groups[0]))
|
eq_(3, len(r.groups[0]))
|
||||||
|
|
||||||
def test_xml_non_ascii(self):
|
def test_xml_non_ascii(self):
|
||||||
def get_file(path):
|
def get_file(path):
|
||||||
if path == op.join('basepath','\xe9foo bar'):
|
if path == op.join('basepath', '\xe9foo bar'):
|
||||||
return objects[0]
|
return objects[0]
|
||||||
if path == op.join('basepath','bar bleh'):
|
if path == op.join('basepath', 'bar bleh'):
|
||||||
return objects[1]
|
return objects[1]
|
||||||
|
|
||||||
objects = [NamedObject("\xe9foo bar",True),NamedObject("bar bleh",True)]
|
objects = [NamedObject("\xe9foo bar", True), NamedObject("bar bleh", True)]
|
||||||
matches = engine.getmatches(objects) #we should have 5 matches
|
matches = engine.getmatches(objects) #we should have 5 matches
|
||||||
groups = engine.get_groups(matches) #We should have 2 groups
|
groups = engine.get_groups(matches) #We should have 2 groups
|
||||||
for g in groups:
|
for g in groups:
|
||||||
g.prioritize(lambda x:objects.index(x)) #We want the dupes to be in the same order as the list is
|
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
results = Results(app)
|
results = Results(app)
|
||||||
results.groups = groups
|
results.groups = groups
|
||||||
@ -605,10 +603,10 @@ class TestCaseResultsXML:
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(f,get_file)
|
r.load_from_xml(f, get_file)
|
||||||
g = r.groups[0]
|
g = r.groups[0]
|
||||||
eq_("\xe9foo bar",g[0].name)
|
eq_("\xe9foo bar", g[0].name)
|
||||||
eq_(['efoo','bar'],g[0].words)
|
eq_(['efoo', 'bar'], g[0].words)
|
||||||
|
|
||||||
def test_load_invalid_xml(self):
|
def test_load_invalid_xml(self):
|
||||||
f = io.BytesIO()
|
f = io.BytesIO()
|
||||||
@ -616,8 +614,8 @@ class TestCaseResultsXML:
|
|||||||
f.seek(0)
|
f.seek(0)
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.load_from_xml(f,None)
|
r.load_from_xml(f, None)
|
||||||
eq_(0,len(r.groups))
|
eq_(0, len(r.groups))
|
||||||
|
|
||||||
def test_load_non_existant_xml(self):
|
def test_load_non_existant_xml(self):
|
||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
@ -626,7 +624,7 @@ class TestCaseResultsXML:
|
|||||||
r.load_from_xml('does_not_exist.xml', None)
|
r.load_from_xml('does_not_exist.xml', None)
|
||||||
except IOError:
|
except IOError:
|
||||||
self.fail()
|
self.fail()
|
||||||
eq_(0,len(r.groups))
|
eq_(0, len(r.groups))
|
||||||
|
|
||||||
def test_remember_match_percentage(self):
|
def test_remember_match_percentage(self):
|
||||||
group = self.groups[0]
|
group = self.groups[0]
|
||||||
@ -742,7 +740,7 @@ class TestCaseResultsFilter:
|
|||||||
def test_sort_groups(self):
|
def test_sort_groups(self):
|
||||||
self.results.apply_filter(None)
|
self.results.apply_filter(None)
|
||||||
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
|
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
|
||||||
g1,g2 = self.groups
|
g1, g2 = self.groups
|
||||||
self.results.apply_filter('a') # Matches both group
|
self.results.apply_filter('a') # Matches both group
|
||||||
self.results.sort_groups('size')
|
self.results.sort_groups('size')
|
||||||
assert self.results.groups[0] is g2
|
assert self.results.groups[0] is g2
|
||||||
@ -772,19 +770,19 @@ class TestCaseResultsFilter:
|
|||||||
app = DupeGuru()
|
app = DupeGuru()
|
||||||
r = Results(app)
|
r = Results(app)
|
||||||
r.apply_filter('foo')
|
r.apply_filter('foo')
|
||||||
r.load_from_xml(filename,get_file)
|
r.load_from_xml(filename, get_file)
|
||||||
eq_(2,len(r.groups))
|
eq_(2, len(r.groups))
|
||||||
|
|
||||||
def test_remove_dupe(self):
|
def test_remove_dupe(self):
|
||||||
self.results.remove_duplicates([self.results.dupes[0]])
|
self.results.remove_duplicates([self.results.dupes[0]])
|
||||||
self.results.apply_filter(None)
|
self.results.apply_filter(None)
|
||||||
eq_(2,len(self.results.groups))
|
eq_(2, len(self.results.groups))
|
||||||
eq_(2,len(self.results.dupes))
|
eq_(2, len(self.results.dupes))
|
||||||
self.results.apply_filter('ibabtu')
|
self.results.apply_filter('ibabtu')
|
||||||
self.results.remove_duplicates([self.results.dupes[0]])
|
self.results.remove_duplicates([self.results.dupes[0]])
|
||||||
self.results.apply_filter(None)
|
self.results.apply_filter(None)
|
||||||
eq_(1,len(self.results.groups))
|
eq_(1, len(self.results.groups))
|
||||||
eq_(1,len(self.results.dupes))
|
eq_(1, len(self.results.dupes))
|
||||||
|
|
||||||
def test_filter_is_case_insensitive(self):
|
def test_filter_is_case_insensitive(self):
|
||||||
self.results.apply_filter(None)
|
self.results.apply_filter(None)
|
||||||
|
@ -88,10 +88,10 @@ def test_priorize(fake_fileexists):
|
|||||||
f[3].is_ref = True
|
f[3].is_ref = True
|
||||||
r = s.get_dupe_groups(f)
|
r = s.get_dupe_groups(f)
|
||||||
g1, g2 = r
|
g1, g2 = r
|
||||||
assert f[1] in (g1.ref,g2.ref)
|
assert f[1] in (g1.ref, g2.ref)
|
||||||
assert f[0] in (g1.dupes[0],g2.dupes[0])
|
assert f[0] in (g1.dupes[0], g2.dupes[0])
|
||||||
assert f[3] in (g1.ref,g2.ref)
|
assert f[3] in (g1.ref, g2.ref)
|
||||||
assert f[2] in (g1.dupes[0],g2.dupes[0])
|
assert f[2] in (g1.dupes[0], g2.dupes[0])
|
||||||
|
|
||||||
def test_content_scan(fake_fileexists):
|
def test_content_scan(fake_fileexists):
|
||||||
s = Scanner()
|
s = Scanner()
|
||||||
@ -135,7 +135,7 @@ def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
|
|||||||
def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
|
def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
|
||||||
s = Scanner()
|
s = Scanner()
|
||||||
s.scan_type = ScanType.Contents
|
s.scan_type = ScanType.Contents
|
||||||
f = [no('foo'),no('bar')]
|
f = [no('foo'), no('bar')]
|
||||||
f[0].md5 = f[0].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
|
f[0].md5 = f[0].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
|
||||||
f[1].md5 = f[1].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
|
f[1].md5 = f[1].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
|
||||||
r = s.get_dupe_groups(f)
|
r = s.get_dupe_groups(f)
|
||||||
@ -209,7 +209,7 @@ def test_tag_scan(fake_fileexists):
|
|||||||
o1.title = 'The Air Near My Fingers'
|
o1.title = 'The Air Near My Fingers'
|
||||||
o2.artist = 'The White Stripes'
|
o2.artist = 'The White Stripes'
|
||||||
o2.title = 'The Air Near My Fingers'
|
o2.title = 'The Air Near My Fingers'
|
||||||
r = s.get_dupe_groups([o1,o2])
|
r = s.get_dupe_groups([o1, o2])
|
||||||
eq_(len(r), 1)
|
eq_(len(r), 1)
|
||||||
|
|
||||||
def test_tag_with_album_scan(fake_fileexists):
|
def test_tag_with_album_scan(fake_fileexists):
|
||||||
@ -228,7 +228,7 @@ def test_tag_with_album_scan(fake_fileexists):
|
|||||||
o3.artist = 'The White Stripes'
|
o3.artist = 'The White Stripes'
|
||||||
o3.title = 'The Air Near My Fingers'
|
o3.title = 'The Air Near My Fingers'
|
||||||
o3.album = 'foobar'
|
o3.album = 'foobar'
|
||||||
r = s.get_dupe_groups([o1,o2,o3])
|
r = s.get_dupe_groups([o1, o2, o3])
|
||||||
eq_(len(r), 1)
|
eq_(len(r), 1)
|
||||||
|
|
||||||
def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
|
def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
|
||||||
@ -244,7 +244,7 @@ def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
|
|||||||
o2.artist = 'The White Stripes - b'
|
o2.artist = 'The White Stripes - b'
|
||||||
o2.title = 'The Air Near My Fingers - b'
|
o2.title = 'The Air Near My Fingers - b'
|
||||||
o2.album = 'Elephant - b'
|
o2.album = 'Elephant - b'
|
||||||
r = s.get_dupe_groups([o1,o2])
|
r = s.get_dupe_groups([o1, o2])
|
||||||
eq_(len(r), 1)
|
eq_(len(r), 1)
|
||||||
|
|
||||||
def test_tag_scan_with_different_scanned(fake_fileexists):
|
def test_tag_scan_with_different_scanned(fake_fileexists):
|
||||||
@ -344,9 +344,9 @@ def test_ignore_list(fake_fileexists):
|
|||||||
f2.path = Path('dir2/foobar')
|
f2.path = Path('dir2/foobar')
|
||||||
f3.path = Path('dir3/foobar')
|
f3.path = Path('dir3/foobar')
|
||||||
ignore_list = IgnoreList()
|
ignore_list = IgnoreList()
|
||||||
ignore_list.Ignore(str(f1.path),str(f2.path))
|
ignore_list.Ignore(str(f1.path), str(f2.path))
|
||||||
ignore_list.Ignore(str(f1.path),str(f3.path))
|
ignore_list.Ignore(str(f1.path), str(f3.path))
|
||||||
r = s.get_dupe_groups([f1,f2,f3], ignore_list=ignore_list)
|
r = s.get_dupe_groups([f1, f2, f3], ignore_list=ignore_list)
|
||||||
eq_(len(r), 1)
|
eq_(len(r), 1)
|
||||||
g = r[0]
|
g = r[0]
|
||||||
eq_(len(g.dupes), 1)
|
eq_(len(g.dupes), 1)
|
||||||
@ -367,9 +367,9 @@ def test_ignore_list_checks_for_unicode(fake_fileexists):
|
|||||||
f2.path = Path('foo2\u00e9')
|
f2.path = Path('foo2\u00e9')
|
||||||
f3.path = Path('foo3\u00e9')
|
f3.path = Path('foo3\u00e9')
|
||||||
ignore_list = IgnoreList()
|
ignore_list = IgnoreList()
|
||||||
ignore_list.Ignore(str(f1.path),str(f2.path))
|
ignore_list.Ignore(str(f1.path), str(f2.path))
|
||||||
ignore_list.Ignore(str(f1.path),str(f3.path))
|
ignore_list.Ignore(str(f1.path), str(f3.path))
|
||||||
r = s.get_dupe_groups([f1,f2,f3], ignore_list=ignore_list)
|
r = s.get_dupe_groups([f1, f2, f3], ignore_list=ignore_list)
|
||||||
eq_(len(r), 1)
|
eq_(len(r), 1)
|
||||||
g = r[0]
|
g = r[0]
|
||||||
eq_(len(g.dupes), 1)
|
eq_(len(g.dupes), 1)
|
||||||
@ -398,7 +398,7 @@ def test_size_threshold(fake_fileexists):
|
|||||||
f2 = no('foo', 2, path='p2')
|
f2 = no('foo', 2, path='p2')
|
||||||
f3 = no('foo', 3, path='p3')
|
f3 = no('foo', 3, path='p3')
|
||||||
s.size_threshold = 2
|
s.size_threshold = 2
|
||||||
groups = s.get_dupe_groups([f1,f2,f3])
|
groups = s.get_dupe_groups([f1, f2, f3])
|
||||||
eq_(len(groups), 1)
|
eq_(len(groups), 1)
|
||||||
[group] = groups
|
[group] = groups
|
||||||
eq_(len(group), 2)
|
eq_(len(group), 2)
|
||||||
@ -471,9 +471,11 @@ def test_dont_group_files_that_dont_exist(tmpdir):
|
|||||||
p['file1'].open('w').write('foo')
|
p['file1'].open('w').write('foo')
|
||||||
p['file2'].open('w').write('foo')
|
p['file2'].open('w').write('foo')
|
||||||
file1, file2 = fs.get_files(p)
|
file1, file2 = fs.get_files(p)
|
||||||
|
|
||||||
def getmatches(*args, **kw):
|
def getmatches(*args, **kw):
|
||||||
file2.path.remove()
|
file2.path.remove()
|
||||||
return [Match(file1, file2, 100)]
|
return [Match(file1, file2, 100)]
|
||||||
|
|
||||||
s._getmatches = getmatches
|
s._getmatches = getmatches
|
||||||
|
|
||||||
assert not s.get_dupe_groups([file1, file2])
|
assert not s.get_dupe_groups([file1, file2])
|
||||||
|
@ -1,7 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created By: Virgil Dupras
|
|
||||||
# Created On: 2009-10-23
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -9,9 +6,8 @@
|
|||||||
|
|
||||||
from hscommon.path import Path
|
from hscommon.path import Path
|
||||||
|
|
||||||
from core.engine import getwords
|
from core.tests.scanner_test import no
|
||||||
from core.tests.scanner_test import NamedObject, no
|
from ..scanner import ScannerME
|
||||||
from ..scanner import *
|
|
||||||
|
|
||||||
def pytest_funcarg__fake_fileexists(request):
|
def pytest_funcarg__fake_fileexists(request):
|
||||||
# This is a hack to avoid invalidating all previous tests since the scanner started to test
|
# This is a hack to avoid invalidating all previous tests since the scanner started to test
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/09/01
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -11,17 +9,17 @@ from pytest import raises, skip
|
|||||||
from hscommon.testutil import eq_
|
from hscommon.testutil import eq_
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from ..block import *
|
from ..block import avgdiff, getblocks2, NoBlocksError, DifferentBlockCountError
|
||||||
except ImportError:
|
except ImportError:
|
||||||
skip("Can't import the block module, probably hasn't been compiled.")
|
skip("Can't import the block module, probably hasn't been compiled.")
|
||||||
|
|
||||||
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
|
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
|
||||||
return avgdiff(first, second, limit, min_iter)
|
return avgdiff(first, second, limit, min_iter)
|
||||||
|
|
||||||
BLACK = (0,0,0)
|
BLACK = (0, 0, 0)
|
||||||
RED = (0xff,0,0)
|
RED = (0xff, 0, 0)
|
||||||
GREEN = (0,0xff,0)
|
GREEN = (0, 0xff, 0)
|
||||||
BLUE = (0,0,0xff)
|
BLUE = (0, 0, 0xff)
|
||||||
|
|
||||||
class FakeImage:
|
class FakeImage:
|
||||||
def __init__(self, size, data):
|
def __init__(self, size, data):
|
||||||
@ -40,20 +38,20 @@ class FakeImage:
|
|||||||
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
|
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
|
||||||
|
|
||||||
def empty():
|
def empty():
|
||||||
return FakeImage((0,0), [])
|
return FakeImage((0, 0), [])
|
||||||
|
|
||||||
def single_pixel(): #one red pixel
|
def single_pixel(): #one red pixel
|
||||||
return FakeImage((1, 1), [(0xff,0,0)])
|
return FakeImage((1, 1), [(0xff, 0, 0)])
|
||||||
|
|
||||||
def four_pixels():
|
def four_pixels():
|
||||||
pixels = [RED,(0,0x80,0xff),(0x80,0,0),(0,0x40,0x80)]
|
pixels = [RED, (0, 0x80, 0xff), (0x80, 0, 0), (0, 0x40, 0x80)]
|
||||||
return FakeImage((2, 2), pixels)
|
return FakeImage((2, 2), pixels)
|
||||||
|
|
||||||
class TestCasegetblock:
|
class TestCasegetblock:
|
||||||
def test_single_pixel(self):
|
def test_single_pixel(self):
|
||||||
im = single_pixel()
|
im = single_pixel()
|
||||||
[b] = getblocks2(im, 1)
|
[b] = getblocks2(im, 1)
|
||||||
eq_(RED,b)
|
eq_(RED, b)
|
||||||
|
|
||||||
def test_no_pixel(self):
|
def test_no_pixel(self):
|
||||||
im = empty()
|
im = empty()
|
||||||
@ -65,143 +63,143 @@ class TestCasegetblock:
|
|||||||
meanred = (0xff + 0x80) // 4
|
meanred = (0xff + 0x80) // 4
|
||||||
meangreen = (0x80 + 0x40) // 4
|
meangreen = (0x80 + 0x40) // 4
|
||||||
meanblue = (0xff + 0x80) // 4
|
meanblue = (0xff + 0x80) // 4
|
||||||
eq_((meanred,meangreen,meanblue),b)
|
eq_((meanred, meangreen, meanblue), b)
|
||||||
|
|
||||||
|
|
||||||
# class TCdiff(unittest.TestCase):
|
# class TCdiff(unittest.TestCase):
|
||||||
# def test_diff(self):
|
# def test_diff(self):
|
||||||
# b1 = (10, 20, 30)
|
# b1 = (10, 20, 30)
|
||||||
# b2 = (1, 2, 3)
|
# b2 = (1, 2, 3)
|
||||||
# eq_(9 + 18 + 27,diff(b1,b2))
|
# eq_(9 + 18 + 27, diff(b1, b2))
|
||||||
#
|
#
|
||||||
# def test_diff_negative(self):
|
# def test_diff_negative(self):
|
||||||
# b1 = (10, 20, 30)
|
# b1 = (10, 20, 30)
|
||||||
# b2 = (1, 2, 3)
|
# b2 = (1, 2, 3)
|
||||||
# eq_(9 + 18 + 27,diff(b2,b1))
|
# eq_(9 + 18 + 27, diff(b2, b1))
|
||||||
#
|
#
|
||||||
# def test_diff_mixed_positive_and_negative(self):
|
# def test_diff_mixed_positive_and_negative(self):
|
||||||
# b1 = (1, 5, 10)
|
# b1 = (1, 5, 10)
|
||||||
# b2 = (10, 1, 15)
|
# b2 = (10, 1, 15)
|
||||||
# eq_(9 + 4 + 5,diff(b1,b2))
|
# eq_(9 + 4 + 5, diff(b1, b2))
|
||||||
#
|
#
|
||||||
|
|
||||||
# class TCgetblocks(unittest.TestCase):
|
# class TCgetblocks(unittest.TestCase):
|
||||||
# def test_empty_image(self):
|
# def test_empty_image(self):
|
||||||
# im = empty()
|
# im = empty()
|
||||||
# blocks = getblocks(im,1)
|
# blocks = getblocks(im, 1)
|
||||||
# eq_(0,len(blocks))
|
# eq_(0, len(blocks))
|
||||||
#
|
#
|
||||||
# def test_one_block_image(self):
|
# def test_one_block_image(self):
|
||||||
# im = four_pixels()
|
# im = four_pixels()
|
||||||
# blocks = getblocks2(im, 1)
|
# blocks = getblocks2(im, 1)
|
||||||
# eq_(1,len(blocks))
|
# eq_(1, len(blocks))
|
||||||
# block = blocks[0]
|
# block = blocks[0]
|
||||||
# meanred = (0xff + 0x80) // 4
|
# meanred = (0xff + 0x80) // 4
|
||||||
# meangreen = (0x80 + 0x40) // 4
|
# meangreen = (0x80 + 0x40) // 4
|
||||||
# meanblue = (0xff + 0x80) // 4
|
# meanblue = (0xff + 0x80) // 4
|
||||||
# eq_((meanred,meangreen,meanblue),block)
|
# eq_((meanred, meangreen, meanblue), block)
|
||||||
#
|
#
|
||||||
# def test_not_enough_height_to_fit_a_block(self):
|
# def test_not_enough_height_to_fit_a_block(self):
|
||||||
# im = FakeImage((2,1), [BLACK, BLACK])
|
# im = FakeImage((2, 1), [BLACK, BLACK])
|
||||||
# blocks = getblocks(im,2)
|
# blocks = getblocks(im, 2)
|
||||||
# eq_(0,len(blocks))
|
# eq_(0, len(blocks))
|
||||||
#
|
#
|
||||||
# def xtest_dont_include_leftovers(self):
|
# def xtest_dont_include_leftovers(self):
|
||||||
# # this test is disabled because getblocks is not used and getblock in cdeffed
|
# # this test is disabled because getblocks is not used and getblock in cdeffed
|
||||||
# pixels = [
|
# pixels = [
|
||||||
# RED,(0,0x80,0xff),BLACK,
|
# RED,(0, 0x80, 0xff), BLACK,
|
||||||
# (0x80,0,0),(0,0x40,0x80),BLACK,
|
# (0x80, 0, 0),(0, 0x40, 0x80), BLACK,
|
||||||
# BLACK,BLACK,BLACK
|
# BLACK, BLACK, BLACK
|
||||||
# ]
|
# ]
|
||||||
# im = FakeImage((3,3), pixels)
|
# im = FakeImage((3, 3), pixels)
|
||||||
# blocks = getblocks(im,2)
|
# blocks = getblocks(im, 2)
|
||||||
# block = blocks[0]
|
# block = blocks[0]
|
||||||
# #Because the block is smaller than the image, only blocksize must be considered.
|
# #Because the block is smaller than the image, only blocksize must be considered.
|
||||||
# meanred = (0xff + 0x80) // 4
|
# meanred = (0xff + 0x80) // 4
|
||||||
# meangreen = (0x80 + 0x40) // 4
|
# meangreen = (0x80 + 0x40) // 4
|
||||||
# meanblue = (0xff + 0x80) // 4
|
# meanblue = (0xff + 0x80) // 4
|
||||||
# eq_((meanred,meangreen,meanblue),block)
|
# eq_((meanred, meangreen, meanblue), block)
|
||||||
#
|
#
|
||||||
# def xtest_two_blocks(self):
|
# def xtest_two_blocks(self):
|
||||||
# # this test is disabled because getblocks is not used and getblock in cdeffed
|
# # this test is disabled because getblocks is not used and getblock in cdeffed
|
||||||
# pixels = [BLACK for i in xrange(4 * 2)]
|
# pixels = [BLACK for i in xrange(4 * 2)]
|
||||||
# pixels[0] = RED
|
# pixels[0] = RED
|
||||||
# pixels[1] = (0,0x80,0xff)
|
# pixels[1] = (0, 0x80, 0xff)
|
||||||
# pixels[4] = (0x80,0,0)
|
# pixels[4] = (0x80, 0, 0)
|
||||||
# pixels[5] = (0,0x40,0x80)
|
# pixels[5] = (0, 0x40, 0x80)
|
||||||
# im = FakeImage((4, 2), pixels)
|
# im = FakeImage((4, 2), pixels)
|
||||||
# blocks = getblocks(im,2)
|
# blocks = getblocks(im, 2)
|
||||||
# eq_(2,len(blocks))
|
# eq_(2, len(blocks))
|
||||||
# block = blocks[0]
|
# block = blocks[0]
|
||||||
# #Because the block is smaller than the image, only blocksize must be considered.
|
# #Because the block is smaller than the image, only blocksize must be considered.
|
||||||
# meanred = (0xff + 0x80) // 4
|
# meanred = (0xff + 0x80) // 4
|
||||||
# meangreen = (0x80 + 0x40) // 4
|
# meangreen = (0x80 + 0x40) // 4
|
||||||
# meanblue = (0xff + 0x80) // 4
|
# meanblue = (0xff + 0x80) // 4
|
||||||
# eq_((meanred,meangreen,meanblue),block)
|
# eq_((meanred, meangreen, meanblue), block)
|
||||||
# eq_(BLACK,blocks[1])
|
# eq_(BLACK, blocks[1])
|
||||||
#
|
#
|
||||||
# def test_four_blocks(self):
|
# def test_four_blocks(self):
|
||||||
# pixels = [BLACK for i in xrange(4 * 4)]
|
# pixels = [BLACK for i in xrange(4 * 4)]
|
||||||
# pixels[0] = RED
|
# pixels[0] = RED
|
||||||
# pixels[1] = (0,0x80,0xff)
|
# pixels[1] = (0, 0x80, 0xff)
|
||||||
# pixels[4] = (0x80,0,0)
|
# pixels[4] = (0x80, 0, 0)
|
||||||
# pixels[5] = (0,0x40,0x80)
|
# pixels[5] = (0, 0x40, 0x80)
|
||||||
# im = FakeImage((4, 4), pixels)
|
# im = FakeImage((4, 4), pixels)
|
||||||
# blocks = getblocks2(im, 2)
|
# blocks = getblocks2(im, 2)
|
||||||
# eq_(4,len(blocks))
|
# eq_(4, len(blocks))
|
||||||
# block = blocks[0]
|
# block = blocks[0]
|
||||||
# #Because the block is smaller than the image, only blocksize must be considered.
|
# #Because the block is smaller than the image, only blocksize must be considered.
|
||||||
# meanred = (0xff + 0x80) // 4
|
# meanred = (0xff + 0x80) // 4
|
||||||
# meangreen = (0x80 + 0x40) // 4
|
# meangreen = (0x80 + 0x40) // 4
|
||||||
# meanblue = (0xff + 0x80) // 4
|
# meanblue = (0xff + 0x80) // 4
|
||||||
# eq_((meanred,meangreen,meanblue),block)
|
# eq_((meanred, meangreen, meanblue), block)
|
||||||
# eq_(BLACK,blocks[1])
|
# eq_(BLACK, blocks[1])
|
||||||
# eq_(BLACK,blocks[2])
|
# eq_(BLACK, blocks[2])
|
||||||
# eq_(BLACK,blocks[3])
|
# eq_(BLACK, blocks[3])
|
||||||
#
|
#
|
||||||
|
|
||||||
class TestCasegetblocks2:
|
class TestCasegetblocks2:
|
||||||
def test_empty_image(self):
|
def test_empty_image(self):
|
||||||
im = empty()
|
im = empty()
|
||||||
blocks = getblocks2(im,1)
|
blocks = getblocks2(im, 1)
|
||||||
eq_(0,len(blocks))
|
eq_(0, len(blocks))
|
||||||
|
|
||||||
def test_one_block_image(self):
|
def test_one_block_image(self):
|
||||||
im = four_pixels()
|
im = four_pixels()
|
||||||
blocks = getblocks2(im,1)
|
blocks = getblocks2(im, 1)
|
||||||
eq_(1,len(blocks))
|
eq_(1, len(blocks))
|
||||||
block = blocks[0]
|
block = blocks[0]
|
||||||
meanred = (0xff + 0x80) // 4
|
meanred = (0xff + 0x80) // 4
|
||||||
meangreen = (0x80 + 0x40) // 4
|
meangreen = (0x80 + 0x40) // 4
|
||||||
meanblue = (0xff + 0x80) // 4
|
meanblue = (0xff + 0x80) // 4
|
||||||
eq_((meanred,meangreen,meanblue),block)
|
eq_((meanred, meangreen, meanblue), block)
|
||||||
|
|
||||||
def test_four_blocks_all_black(self):
|
def test_four_blocks_all_black(self):
|
||||||
im = FakeImage((2, 2), [BLACK, BLACK, BLACK, BLACK])
|
im = FakeImage((2, 2), [BLACK, BLACK, BLACK, BLACK])
|
||||||
blocks = getblocks2(im,2)
|
blocks = getblocks2(im, 2)
|
||||||
eq_(4,len(blocks))
|
eq_(4, len(blocks))
|
||||||
for block in blocks:
|
for block in blocks:
|
||||||
eq_(BLACK,block)
|
eq_(BLACK, block)
|
||||||
|
|
||||||
def test_two_pixels_image_horizontal(self):
|
def test_two_pixels_image_horizontal(self):
|
||||||
pixels = [RED,BLUE]
|
pixels = [RED, BLUE]
|
||||||
im = FakeImage((2, 1), pixels)
|
im = FakeImage((2, 1), pixels)
|
||||||
blocks = getblocks2(im,2)
|
blocks = getblocks2(im, 2)
|
||||||
eq_(4,len(blocks))
|
eq_(4, len(blocks))
|
||||||
eq_(RED,blocks[0])
|
eq_(RED, blocks[0])
|
||||||
eq_(BLUE,blocks[1])
|
eq_(BLUE, blocks[1])
|
||||||
eq_(RED,blocks[2])
|
eq_(RED, blocks[2])
|
||||||
eq_(BLUE,blocks[3])
|
eq_(BLUE, blocks[3])
|
||||||
|
|
||||||
def test_two_pixels_image_vertical(self):
|
def test_two_pixels_image_vertical(self):
|
||||||
pixels = [RED,BLUE]
|
pixels = [RED, BLUE]
|
||||||
im = FakeImage((1, 2), pixels)
|
im = FakeImage((1, 2), pixels)
|
||||||
blocks = getblocks2(im,2)
|
blocks = getblocks2(im, 2)
|
||||||
eq_(4,len(blocks))
|
eq_(4, len(blocks))
|
||||||
eq_(RED,blocks[0])
|
eq_(RED, blocks[0])
|
||||||
eq_(RED,blocks[1])
|
eq_(RED, blocks[1])
|
||||||
eq_(BLUE,blocks[2])
|
eq_(BLUE, blocks[2])
|
||||||
eq_(BLUE,blocks[3])
|
eq_(BLUE, blocks[3])
|
||||||
|
|
||||||
|
|
||||||
class TestCaseavgdiff:
|
class TestCaseavgdiff:
|
||||||
@ -210,106 +208,105 @@ class TestCaseavgdiff:
|
|||||||
my_avgdiff([], [])
|
my_avgdiff([], [])
|
||||||
|
|
||||||
def test_two_blocks(self):
|
def test_two_blocks(self):
|
||||||
im = empty()
|
b1 = (5, 10, 15)
|
||||||
b1 = (5,10,15)
|
b2 = (255, 250, 245)
|
||||||
b2 = (255,250,245)
|
b3 = (0, 0, 0)
|
||||||
b3 = (0,0,0)
|
b4 = (255, 0, 255)
|
||||||
b4 = (255,0,255)
|
blocks1 = [b1, b2]
|
||||||
blocks1 = [b1,b2]
|
blocks2 = [b3, b4]
|
||||||
blocks2 = [b3,b4]
|
|
||||||
expected1 = 5 + 10 + 15
|
expected1 = 5 + 10 + 15
|
||||||
expected2 = 0 + 250 + 10
|
expected2 = 0 + 250 + 10
|
||||||
expected = (expected1 + expected2) // 2
|
expected = (expected1 + expected2) // 2
|
||||||
eq_(expected, my_avgdiff(blocks1, blocks2))
|
eq_(expected, my_avgdiff(blocks1, blocks2))
|
||||||
|
|
||||||
def test_blocks_not_the_same_size(self):
|
def test_blocks_not_the_same_size(self):
|
||||||
b = (0,0,0)
|
b = (0, 0, 0)
|
||||||
with raises(DifferentBlockCountError):
|
with raises(DifferentBlockCountError):
|
||||||
my_avgdiff([b,b],[b])
|
my_avgdiff([b, b], [b])
|
||||||
|
|
||||||
def test_first_arg_is_empty_but_not_second(self):
|
def test_first_arg_is_empty_but_not_second(self):
|
||||||
#Don't return 0 (as when the 2 lists are empty), raise!
|
#Don't return 0 (as when the 2 lists are empty), raise!
|
||||||
b = (0,0,0)
|
b = (0, 0, 0)
|
||||||
with raises(DifferentBlockCountError):
|
with raises(DifferentBlockCountError):
|
||||||
my_avgdiff([],[b])
|
my_avgdiff([], [b])
|
||||||
|
|
||||||
def test_limit(self):
|
def test_limit(self):
|
||||||
ref = (0,0,0)
|
ref = (0, 0, 0)
|
||||||
b1 = (10,10,10) #avg 30
|
b1 = (10, 10, 10) #avg 30
|
||||||
b2 = (20,20,20) #avg 45
|
b2 = (20, 20, 20) #avg 45
|
||||||
b3 = (30,30,30) #avg 60
|
b3 = (30, 30, 30) #avg 60
|
||||||
blocks1 = [ref,ref,ref]
|
blocks1 = [ref, ref, ref]
|
||||||
blocks2 = [b1,b2,b3]
|
blocks2 = [b1, b2, b3]
|
||||||
eq_(45,my_avgdiff(blocks1,blocks2,44))
|
eq_(45, my_avgdiff(blocks1, blocks2, 44))
|
||||||
|
|
||||||
def test_min_iterations(self):
|
def test_min_iterations(self):
|
||||||
ref = (0,0,0)
|
ref = (0, 0, 0)
|
||||||
b1 = (10,10,10) #avg 30
|
b1 = (10, 10, 10) #avg 30
|
||||||
b2 = (20,20,20) #avg 45
|
b2 = (20, 20, 20) #avg 45
|
||||||
b3 = (10,10,10) #avg 40
|
b3 = (10, 10, 10) #avg 40
|
||||||
blocks1 = [ref,ref,ref]
|
blocks1 = [ref, ref, ref]
|
||||||
blocks2 = [b1,b2,b3]
|
blocks2 = [b1, b2, b3]
|
||||||
eq_(40,my_avgdiff(blocks1,blocks2,45 - 1,3))
|
eq_(40, my_avgdiff(blocks1, blocks2, 45 - 1, 3))
|
||||||
|
|
||||||
# Bah, I don't know why this test fails, but I don't think it matters very much
|
# Bah, I don't know why this test fails, but I don't think it matters very much
|
||||||
# def test_just_over_the_limit(self):
|
# def test_just_over_the_limit(self):
|
||||||
# #A score just over the limit might return exactly the limit due to truncating. We should
|
# #A score just over the limit might return exactly the limit due to truncating. We should
|
||||||
# #ceil() the result in this case.
|
# #ceil() the result in this case.
|
||||||
# ref = (0,0,0)
|
# ref = (0, 0, 0)
|
||||||
# b1 = (10,0,0)
|
# b1 = (10, 0, 0)
|
||||||
# b2 = (11,0,0)
|
# b2 = (11, 0, 0)
|
||||||
# blocks1 = [ref,ref]
|
# blocks1 = [ref, ref]
|
||||||
# blocks2 = [b1,b2]
|
# blocks2 = [b1, b2]
|
||||||
# eq_(11,my_avgdiff(blocks1,blocks2,10))
|
# eq_(11, my_avgdiff(blocks1, blocks2, 10))
|
||||||
#
|
#
|
||||||
def test_return_at_least_1_at_the_slightest_difference(self):
|
def test_return_at_least_1_at_the_slightest_difference(self):
|
||||||
ref = (0,0,0)
|
ref = (0, 0, 0)
|
||||||
b1 = (1,0,0)
|
b1 = (1, 0, 0)
|
||||||
blocks1 = [ref for i in range(250)]
|
blocks1 = [ref for i in range(250)]
|
||||||
blocks2 = [ref for i in range(250)]
|
blocks2 = [ref for i in range(250)]
|
||||||
blocks2[0] = b1
|
blocks2[0] = b1
|
||||||
eq_(1,my_avgdiff(blocks1,blocks2))
|
eq_(1, my_avgdiff(blocks1, blocks2))
|
||||||
|
|
||||||
def test_return_0_if_there_is_no_difference(self):
|
def test_return_0_if_there_is_no_difference(self):
|
||||||
ref = (0,0,0)
|
ref = (0, 0, 0)
|
||||||
blocks1 = [ref,ref]
|
blocks1 = [ref, ref]
|
||||||
blocks2 = [ref,ref]
|
blocks2 = [ref, ref]
|
||||||
eq_(0,my_avgdiff(blocks1,blocks2))
|
eq_(0, my_avgdiff(blocks1, blocks2))
|
||||||
|
|
||||||
|
|
||||||
# class TCmaxdiff(unittest.TestCase):
|
# class TCmaxdiff(unittest.TestCase):
|
||||||
# def test_empty(self):
|
# def test_empty(self):
|
||||||
# self.assertRaises(NoBlocksError,maxdiff,[],[])
|
# self.assertRaises(NoBlocksError, maxdiff,[],[])
|
||||||
#
|
#
|
||||||
# def test_two_blocks(self):
|
# def test_two_blocks(self):
|
||||||
# b1 = (5,10,15)
|
# b1 = (5, 10, 15)
|
||||||
# b2 = (255,250,245)
|
# b2 = (255, 250, 245)
|
||||||
# b3 = (0,0,0)
|
# b3 = (0, 0, 0)
|
||||||
# b4 = (255,0,255)
|
# b4 = (255, 0, 255)
|
||||||
# blocks1 = [b1,b2]
|
# blocks1 = [b1, b2]
|
||||||
# blocks2 = [b3,b4]
|
# blocks2 = [b3, b4]
|
||||||
# expected1 = 5 + 10 + 15
|
# expected1 = 5 + 10 + 15
|
||||||
# expected2 = 0 + 250 + 10
|
# expected2 = 0 + 250 + 10
|
||||||
# expected = max(expected1,expected2)
|
# expected = max(expected1, expected2)
|
||||||
# eq_(expected,maxdiff(blocks1,blocks2))
|
# eq_(expected, maxdiff(blocks1, blocks2))
|
||||||
#
|
#
|
||||||
# def test_blocks_not_the_same_size(self):
|
# def test_blocks_not_the_same_size(self):
|
||||||
# b = (0,0,0)
|
# b = (0, 0, 0)
|
||||||
# self.assertRaises(DifferentBlockCountError,maxdiff,[b,b],[b])
|
# self.assertRaises(DifferentBlockCountError, maxdiff,[b, b],[b])
|
||||||
#
|
#
|
||||||
# def test_first_arg_is_empty_but_not_second(self):
|
# def test_first_arg_is_empty_but_not_second(self):
|
||||||
# #Don't return 0 (as when the 2 lists are empty), raise!
|
# #Don't return 0 (as when the 2 lists are empty), raise!
|
||||||
# b = (0,0,0)
|
# b = (0, 0, 0)
|
||||||
# self.assertRaises(DifferentBlockCountError,maxdiff,[],[b])
|
# self.assertRaises(DifferentBlockCountError, maxdiff,[],[b])
|
||||||
#
|
#
|
||||||
# def test_limit(self):
|
# def test_limit(self):
|
||||||
# b1 = (5,10,15)
|
# b1 = (5, 10, 15)
|
||||||
# b2 = (255,250,245)
|
# b2 = (255, 250, 245)
|
||||||
# b3 = (0,0,0)
|
# b3 = (0, 0, 0)
|
||||||
# b4 = (255,0,255)
|
# b4 = (255, 0, 255)
|
||||||
# blocks1 = [b1,b2]
|
# blocks1 = [b1, b2]
|
||||||
# blocks2 = [b3,b4]
|
# blocks2 = [b3, b4]
|
||||||
# expected1 = 5 + 10 + 15
|
# expected1 = 5 + 10 + 15
|
||||||
# expected2 = 0 + 250 + 10
|
# expected2 = 0 + 250 + 10
|
||||||
# eq_(expected1,maxdiff(blocks1,blocks2,expected1 - 1))
|
# eq_(expected1, maxdiff(blocks1, blocks2, expected1 - 1))
|
||||||
#
|
#
|
@ -1,6 +1,4 @@
|
|||||||
# Created By: Virgil Dupras
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
|
||||||
# Created On: 2006/09/14
|
|
||||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
|
||||||
#
|
#
|
||||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||||
# which should be included with this package. The terms are also available at
|
# which should be included with this package. The terms are also available at
|
||||||
@ -18,46 +16,46 @@ except ImportError:
|
|||||||
|
|
||||||
class TestCasecolors_to_string:
|
class TestCasecolors_to_string:
|
||||||
def test_no_color(self):
|
def test_no_color(self):
|
||||||
eq_('',colors_to_string([]))
|
eq_('', colors_to_string([]))
|
||||||
|
|
||||||
def test_single_color(self):
|
def test_single_color(self):
|
||||||
eq_('000000',colors_to_string([(0,0,0)]))
|
eq_('000000', colors_to_string([(0, 0, 0)]))
|
||||||
eq_('010101',colors_to_string([(1,1,1)]))
|
eq_('010101', colors_to_string([(1, 1, 1)]))
|
||||||
eq_('0a141e',colors_to_string([(10,20,30)]))
|
eq_('0a141e', colors_to_string([(10, 20, 30)]))
|
||||||
|
|
||||||
def test_two_colors(self):
|
def test_two_colors(self):
|
||||||
eq_('000102030405',colors_to_string([(0,1,2),(3,4,5)]))
|
eq_('000102030405', colors_to_string([(0, 1, 2), (3, 4, 5)]))
|
||||||
|
|
||||||
|
|
||||||
class TestCasestring_to_colors:
|
class TestCasestring_to_colors:
|
||||||
def test_empty(self):
|
def test_empty(self):
|
||||||
eq_([],string_to_colors(''))
|
eq_([], string_to_colors(''))
|
||||||
|
|
||||||
def test_single_color(self):
|
def test_single_color(self):
|
||||||
eq_([(0,0,0)],string_to_colors('000000'))
|
eq_([(0, 0, 0)], string_to_colors('000000'))
|
||||||
eq_([(2,3,4)],string_to_colors('020304'))
|
eq_([(2, 3, 4)], string_to_colors('020304'))
|
||||||
eq_([(10,20,30)],string_to_colors('0a141e'))
|
eq_([(10, 20, 30)], string_to_colors('0a141e'))
|
||||||
|
|
||||||
def test_two_colors(self):
|
def test_two_colors(self):
|
||||||
eq_([(10,20,30),(40,50,60)],string_to_colors('0a141e28323c'))
|
eq_([(10, 20, 30), (40, 50, 60)], string_to_colors('0a141e28323c'))
|
||||||
|
|
||||||
def test_incomplete_color(self):
|
def test_incomplete_color(self):
|
||||||
# don't return anything if it's not a complete color
|
# don't return anything if it's not a complete color
|
||||||
eq_([],string_to_colors('102'))
|
eq_([], string_to_colors('102'))
|
||||||
|
|
||||||
|
|
||||||
class TestCaseCache:
|
class TestCaseCache:
|
||||||
def test_empty(self):
|
def test_empty(self):
|
||||||
c = Cache()
|
c = Cache()
|
||||||
eq_(0,len(c))
|
eq_(0, len(c))
|
||||||
with raises(KeyError):
|
with raises(KeyError):
|
||||||
c['foo']
|
c['foo']
|
||||||
|
|
||||||
def test_set_then_retrieve_blocks(self):
|
def test_set_then_retrieve_blocks(self):
|
||||||
c = Cache()
|
c = Cache()
|
||||||
b = [(0,0,0),(1,2,3)]
|
b = [(0, 0, 0), (1, 2, 3)]
|
||||||
c['foo'] = b
|
c['foo'] = b
|
||||||
eq_(b,c['foo'])
|
eq_(b, c['foo'])
|
||||||
|
|
||||||
def test_delitem(self):
|
def test_delitem(self):
|
||||||
c = Cache()
|
c = Cache()
|
||||||
@ -70,18 +68,18 @@ class TestCaseCache:
|
|||||||
def test_persistance(self, tmpdir):
|
def test_persistance(self, tmpdir):
|
||||||
DBNAME = tmpdir.join('hstest.db')
|
DBNAME = tmpdir.join('hstest.db')
|
||||||
c = Cache(str(DBNAME))
|
c = Cache(str(DBNAME))
|
||||||
c['foo'] = [(1,2,3)]
|
c['foo'] = [(1, 2, 3)]
|
||||||
del c
|
del c
|
||||||
c = Cache(str(DBNAME))
|
c = Cache(str(DBNAME))
|
||||||
eq_([(1,2,3)],c['foo'])
|
eq_([(1, 2, 3)], c['foo'])
|
||||||
|
|
||||||
def test_filter(self):
|
def test_filter(self):
|
||||||
c = Cache()
|
c = Cache()
|
||||||
c['foo'] = ''
|
c['foo'] = ''
|
||||||
c['bar'] = ''
|
c['bar'] = ''
|
||||||
c['baz'] = ''
|
c['baz'] = ''
|
||||||
c.filter(lambda p:p != 'bar') #only 'bar' is removed
|
c.filter(lambda p: p != 'bar') #only 'bar' is removed
|
||||||
eq_(2,len(c))
|
eq_(2, len(c))
|
||||||
assert 'foo' in c
|
assert 'foo' in c
|
||||||
assert 'baz' in c
|
assert 'baz' in c
|
||||||
assert 'bar' not in c
|
assert 'bar' not in c
|
||||||
@ -92,7 +90,7 @@ class TestCaseCache:
|
|||||||
c['bar'] = ''
|
c['bar'] = ''
|
||||||
c['baz'] = ''
|
c['baz'] = ''
|
||||||
c.clear()
|
c.clear()
|
||||||
eq_(0,len(c))
|
eq_(0, len(c))
|
||||||
assert 'foo' not in c
|
assert 'foo' not in c
|
||||||
assert 'baz' not in c
|
assert 'baz' not in c
|
||||||
assert 'bar' not in c
|
assert 'bar' not in c
|
||||||
@ -115,7 +113,7 @@ class TestCaseCache:
|
|||||||
def test_by_id(self):
|
def test_by_id(self):
|
||||||
# it's possible to use the cache by referring to the files by their row_id
|
# it's possible to use the cache by referring to the files by their row_id
|
||||||
c = Cache()
|
c = Cache()
|
||||||
b = [(0,0,0),(1,2,3)]
|
b = [(0, 0, 0), (1, 2, 3)]
|
||||||
c['foo'] = b
|
c['foo'] = b
|
||||||
foo_id = c.get_id('foo')
|
foo_id = c.get_id('foo')
|
||||||
eq_(c[foo_id], b)
|
eq_(c[foo_id], b)
|
||||||
|
2
tox.ini
2
tox.ini
@ -11,7 +11,7 @@ deps =
|
|||||||
-r{toxinidir}/requirements-extra.txt
|
-r{toxinidir}/requirements-extra.txt
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
exclude = .tox,env,build,hscommon,qtlib,cocoalib,cocoa,help,./get-pip.py,./qt/dg_rc.py,./core*/tests,qt/run_template.py,cocoa/run_template.py,./run.py,./pkg
|
exclude = .tox,env,build,hscommon,qtlib,cocoalib,cocoa,help,./qt/dg_rc.py,qt/run_template.py,cocoa/run_template.py,./run.py,./pkg
|
||||||
max-line-length = 120
|
max-line-length = 120
|
||||||
ignore = W391,W293,E302,E261,E226,E227,W291,E262,E303,E265,E731
|
ignore = W391,W293,E302,E261,E226,E227,W291,E262,E303,E265,E731
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user