Apply flake8 checks to tests

This commit is contained in:
Virgil Dupras 2016-05-29 15:02:39 -04:00
parent 9ed4b7abf0
commit 130581db53
13 changed files with 798 additions and 809 deletions

View File

@ -160,7 +160,7 @@ class TestCaseDupeGuruWithResults:
def pytest_funcarg__do_setup(self, request):
app = TestApp()
self.app = app.app
self.objects,self.matches,self.groups = GetTestGroups()
self.objects, self.matches, self.groups = GetTestGroups()
self.app.results.groups = self.groups
self.dpanel = app.dpanel
self.dtree = app.dtree
@ -273,7 +273,6 @@ class TestCaseDupeGuruWithResults:
# When marking selected dupes with a heterogenous selection, mark all selected dupes. When
# it's homogenous, simply toggle.
app = self.app
objects = self.objects
self.rtable.select([1])
app.toggle_selected_mark_state()
# index 0 is unmarkable, but we throw it in the bunch to be sure that it doesn't make the
@ -358,19 +357,19 @@ class TestCaseDupeGuruWithResults:
open(p1, 'w').close()
open(p2, 'w').close()
dne = '/does_not_exist'
app.ignore_list.Ignore(dne,p1)
app.ignore_list.Ignore(p2,dne)
app.ignore_list.Ignore(p1,p2)
app.ignore_list.Ignore(dne, p1)
app.ignore_list.Ignore(p2, dne)
app.ignore_list.Ignore(p1, p2)
app.purge_ignore_list()
eq_(1,len(app.ignore_list))
assert app.ignore_list.AreIgnored(p1,p2)
assert not app.ignore_list.AreIgnored(dne,p1)
eq_(1, len(app.ignore_list))
assert app.ignore_list.AreIgnored(p1, p2)
assert not app.ignore_list.AreIgnored(dne, p1)
def test_only_unicode_is_added_to_ignore_list(self, do_setup):
def FakeIgnore(first,second):
if not isinstance(first,str):
def FakeIgnore(first, second):
if not isinstance(first, str):
self.fail()
if not isinstance(second,str):
if not isinstance(second, str):
self.fail()
app = self.app
@ -400,8 +399,6 @@ class TestCaseDupeGuruWithResults:
def test_dont_crash_on_delta_powermarker_dupecount_sort(self, do_setup):
# Don't crash when sorting by dupe count or percentage while delta+powermarker are enabled.
# Ref #238
app = self.app
objects = self.objects
self.rtable.delta_values = True
self.rtable.power_marker = True
self.rtable.sort('dupe_count', False)
@ -414,11 +411,11 @@ class TestCaseDupeGuru_renameSelected:
def pytest_funcarg__do_setup(self, request):
tmpdir = request.getfuncargvalue('tmpdir')
p = Path(str(tmpdir))
fp = open(str(p['foo bar 1']),mode='w')
fp = open(str(p['foo bar 1']), mode='w')
fp.close()
fp = open(str(p['foo bar 2']),mode='w')
fp = open(str(p['foo bar 2']), mode='w')
fp.close()
fp = open(str(p['foo bar 3']),mode='w')
fp = open(str(p['foo bar 3']), mode='w')
fp.close()
files = fs.get_files(p)
for f in files:
@ -426,7 +423,7 @@ class TestCaseDupeGuru_renameSelected:
matches = engine.getmatches(files)
groups = engine.get_groups(matches)
g = groups[0]
g.prioritize(lambda x:x.name)
g.prioritize(lambda x: x.name)
app = TestApp()
app.app.results.groups = groups
self.app = app.app

View File

@ -1,12 +1,10 @@
# Created By: Virgil Dupras
# Created On: 2011/09/07
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.testutil import TestApp as TestAppBase, eq_, with_app
from hscommon.testutil import TestApp as TestAppBase, eq_, with_app # noqa
from hscommon.path import Path
from hscommon.util import get_file_ext, format_size
from hscommon.gui.column import Column
@ -15,9 +13,7 @@ from hscommon.jobprogress.job import nulljob, JobCancelled
from .. import engine
from .. import prioritize
from ..engine import getwords
from ..app import DupeGuru as DupeGuruBase, cmp_value
from ..gui.details_panel import DetailsPanel
from ..gui.directory_tree import DirectoryTree
from ..app import DupeGuru as DupeGuruBase
from ..gui.result_table import ResultTable as ResultTableBase
from ..gui.prioritize_dialog import PrioritizeDialog
@ -119,14 +115,20 @@ class NamedObject:
# "ibabtu" (1)
# "ibabtu" (1)
def GetTestGroups():
objects = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject("foo bleh"),NamedObject("ibabtu"),NamedObject("ibabtu")]
objects = [
NamedObject("foo bar"),
NamedObject("bar bleh"),
NamedObject("foo bleh"),
NamedObject("ibabtu"),
NamedObject("ibabtu")
]
objects[1].size = 1024
matches = engine.getmatches(objects) #we should have 5 matches
groups = engine.get_groups(matches) #We should have 2 groups
for g in groups:
g.prioritize(lambda x:objects.index(x)) #We want the dupes to be in the same order as the list is
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
groups.sort(key=len, reverse=True) # We want the group with 3 members to be first.
return (objects,matches,groups)
return (objects, matches, groups)
class TestApp(TestAppBase):
def __init__(self):
@ -137,7 +139,6 @@ class TestApp(TestAppBase):
return gui
TestAppBase.__init__(self)
make_gui = self.make_gui
self.app = DupeGuru()
self.default_parent = self.app
self.rtable = link_gui(self.app.result_table)

View File

@ -1 +1 @@
from hscommon.testutil import pytest_funcarg__app
from hscommon.testutil import pytest_funcarg__app # noqa

View File

@ -1,9 +1,7 @@
# Created By: Virgil Dupras
# Created On: 2006/02/27
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import os
@ -15,7 +13,7 @@ from pytest import raises
from hscommon.path import Path
from hscommon.testutil import eq_
from ..directories import *
from ..directories import Directories, DirectoryState, AlreadyThereError, InvalidPathError
def create_fake_fs(rootpath):
# We have it as a separate function because other units are using it.
@ -44,6 +42,8 @@ def create_fake_fs(rootpath):
fp.close()
return rootpath
testpath = None
def setup_module(module):
# In this unit, we have tests depending on two directory structure. One with only one file in it
# and another with a more complex structure.
@ -68,13 +68,13 @@ def test_add_path():
d = Directories()
p = testpath['onefile']
d.add_path(p)
eq_(1,len(d))
eq_(1, len(d))
assert p in d
assert (p['foobar']) in d
assert p.parent() not in d
p = testpath['fs']
d.add_path(p)
eq_(2,len(d))
eq_(2, len(d))
assert p in d
def test_AddPath_when_path_is_already_there():
@ -96,14 +96,14 @@ def test_add_path_containing_paths_already_there():
eq_(d[0], testpath)
def test_AddPath_non_latin(tmpdir):
p = Path(str(tmpdir))
to_add = p['unicode\u201a']
os.mkdir(str(to_add))
d = Directories()
try:
d.add_path(to_add)
except UnicodeDecodeError:
assert False
p = Path(str(tmpdir))
to_add = p['unicode\u201a']
os.mkdir(str(to_add))
d = Directories()
try:
d.add_path(to_add)
except UnicodeDecodeError:
assert False
def test_del():
d = Directories()
@ -121,13 +121,13 @@ def test_states():
d = Directories()
p = testpath['onefile']
d.add_path(p)
eq_(DirectoryState.Normal ,d.get_state(p))
eq_(DirectoryState.Normal, d.get_state(p))
d.set_state(p, DirectoryState.Reference)
eq_(DirectoryState.Reference ,d.get_state(p))
eq_(DirectoryState.Reference ,d.get_state(p['dir1']))
eq_(1,len(d.states))
eq_(p,list(d.states.keys())[0])
eq_(DirectoryState.Reference ,d.states[p])
eq_(DirectoryState.Reference, d.get_state(p))
eq_(DirectoryState.Reference, d.get_state(p['dir1']))
eq_(1, len(d.states))
eq_(p, list(d.states.keys())[0])
eq_(DirectoryState.Reference, d.states[p])
def test_get_state_with_path_not_there():
# When the path's not there, just return DirectoryState.Normal
@ -199,8 +199,8 @@ def test_save_and_load(tmpdir):
d1.save_to_file(tmpxml)
d2.load_from_file(tmpxml)
eq_(2, len(d2))
eq_(DirectoryState.Reference ,d2.get_state(p1))
eq_(DirectoryState.Excluded ,d2.get_state(p1['dir1']))
eq_(DirectoryState.Reference, d2.get_state(p1))
eq_(DirectoryState.Excluded, d2.get_state(p1['dir1']))
def test_invalid_path():
d = Directories()
@ -280,7 +280,7 @@ def test_default_path_state_override(tmpdir):
def _default_state_for_path(self, path):
if 'foobar' in path:
return DirectoryState.Excluded
d = MyDirectories()
p1 = Path(str(tmpdir))
p1['foobar'].mkdir()

View File

@ -1,6 +1,4 @@
# Created By: Virgil Dupras
# Created On: 2006/01/29
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
@ -14,7 +12,11 @@ from hscommon.testutil import eq_, log_calls
from .base import NamedObject
from .. import engine
from ..engine import *
from ..engine import (
get_match, getwords, Group, getfields, unpack_fields, compare_fields, compare, WEIGHT_WORDS,
MATCH_SIMILAR_WORDS, NO_FIELD_ORDER, build_word_dict, get_groups, getmatches, Match,
getmatches_by_contents, merge_similar_words, reduce_common_words
)
no = NamedObject
@ -22,9 +24,9 @@ def get_match_triangle():
o1 = NamedObject(with_words=True)
o2 = NamedObject(with_words=True)
o3 = NamedObject(with_words=True)
m1 = get_match(o1,o2)
m2 = get_match(o1,o3)
m3 = get_match(o2,o3)
m1 = get_match(o1, o2)
m2 = get_match(o1, o3)
m3 = get_match(o2, o3)
return [m1, m2, m3]
def get_test_group():
@ -51,7 +53,7 @@ class TestCasegetwords:
def test_splitter_chars(self):
eq_(
[chr(i) for i in range(ord('a'),ord('z')+1)],
[chr(i) for i in range(ord('a'), ord('z')+1)],
getwords("a-b_c&d+e(f)g;h\\i[j]k{l}m:n.o,p<q>r/s?t~u!v@w#x$y*z")
)
@ -99,8 +101,8 @@ class TestCaseunpack_fields:
class TestCaseWordCompare:
def test_list(self):
eq_(100, compare(['a', 'b', 'c', 'd'],['a', 'b', 'c', 'd']))
eq_(86, compare(['a', 'b', 'c', 'd'],['a', 'b', 'c']))
eq_(100, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']))
eq_(86, compare(['a', 'b', 'c', 'd'], ['a', 'b', 'c']))
def test_unordered(self):
#Sometimes, users don't want fuzzy matching too much When they set the slider
@ -123,7 +125,7 @@ class TestCaseWordCompare:
eq_(int((6.0 / 13.0) * 100), compare(['foo', 'bar'], ['bar', 'bleh'], (WEIGHT_WORDS, )))
def test_similar_words(self):
eq_(100, compare(['the', 'white', 'stripes'],['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, )))
eq_(100, compare(['the', 'white', 'stripes'], ['the', 'whites', 'stripe'], (MATCH_SIMILAR_WORDS, )))
def test_empty(self):
eq_(0, compare([], []))
@ -154,7 +156,7 @@ class TestCaseWordCompareWithFields:
eq_((0, 1, 2, 3, 5), flags)
monkeypatch.setattr(engine, 'compare_fields', mock_compare)
compare_fields([['a']], [['a']],(0, 1, 2, 3, 5))
compare_fields([['a']], [['a']], (0, 1, 2, 3, 5))
def test_order(self):
first = [['a', 'b'], ['c', 'd', 'e']]
@ -162,124 +164,124 @@ class TestCaseWordCompareWithFields:
eq_(0, compare_fields(first, second))
def test_no_order(self):
first = [['a','b'],['c','d','e']]
second = [['c','d','f'],['a','b']]
first = [['a', 'b'], ['c', 'd', 'e']]
second = [['c', 'd', 'f'], ['a', 'b']]
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
first = [['a','b'],['a','b']] #a field can only be matched once.
second = [['c','d','f'],['a','b']]
first = [['a', 'b'], ['a', 'b']] #a field can only be matched once.
second = [['c', 'd', 'f'], ['a', 'b']]
eq_(0, compare_fields(first, second, (NO_FIELD_ORDER, )))
first = [['a','b'],['a','b','c']]
second = [['c','d','f'],['a','b']]
first = [['a', 'b'], ['a', 'b', 'c']]
second = [['c', 'd', 'f'], ['a', 'b']]
eq_(33, compare_fields(first, second, (NO_FIELD_ORDER, )))
def test_compare_fields_without_order_doesnt_alter_fields(self):
#The NO_ORDER comp type altered the fields!
first = [['a','b'],['c','d','e']]
second = [['c','d','f'],['a','b']]
first = [['a', 'b'], ['c', 'd', 'e']]
second = [['c', 'd', 'f'], ['a', 'b']]
eq_(67, compare_fields(first, second, (NO_FIELD_ORDER, )))
eq_([['a','b'],['c','d','e']],first)
eq_([['c','d','f'],['a','b']],second)
eq_([['a', 'b'], ['c', 'd', 'e']], first)
eq_([['c', 'd', 'f'], ['a', 'b']], second)
class TestCasebuild_word_dict:
def test_with_standard_words(self):
l = [NamedObject('foo bar',True)]
l.append(NamedObject('bar baz',True))
l.append(NamedObject('baz bleh foo',True))
l = [NamedObject('foo bar', True)]
l.append(NamedObject('bar baz', True))
l.append(NamedObject('baz bleh foo', True))
d = build_word_dict(l)
eq_(4,len(d))
eq_(2,len(d['foo']))
eq_(4, len(d))
eq_(2, len(d['foo']))
assert l[0] in d['foo']
assert l[2] in d['foo']
eq_(2,len(d['bar']))
eq_(2, len(d['bar']))
assert l[0] in d['bar']
assert l[1] in d['bar']
eq_(2,len(d['baz']))
eq_(2, len(d['baz']))
assert l[1] in d['baz']
assert l[2] in d['baz']
eq_(1,len(d['bleh']))
eq_(1, len(d['bleh']))
assert l[2] in d['bleh']
def test_unpack_fields(self):
o = NamedObject('')
o.words = [['foo','bar'],['baz']]
o.words = [['foo', 'bar'], ['baz']]
d = build_word_dict([o])
eq_(3,len(d))
eq_(1,len(d['foo']))
eq_(3, len(d))
eq_(1, len(d['foo']))
def test_words_are_unaltered(self):
o = NamedObject('')
o.words = [['foo','bar'],['baz']]
o.words = [['foo', 'bar'], ['baz']]
build_word_dict([o])
eq_([['foo','bar'],['baz']],o.words)
eq_([['foo', 'bar'], ['baz']], o.words)
def test_object_instances_can_only_be_once_in_words_object_list(self):
o = NamedObject('foo foo',True)
o = NamedObject('foo foo', True)
d = build_word_dict([o])
eq_(1,len(d['foo']))
eq_(1, len(d['foo']))
def test_job(self):
def do_progress(p,d=''):
def do_progress(p, d=''):
self.log.append(p)
return True
j = job.Job(1,do_progress)
j = job.Job(1, do_progress)
self.log = []
s = "foo bar"
build_word_dict([NamedObject(s, True), NamedObject(s, True), NamedObject(s, True)], j)
# We don't have intermediate log because iter_with_progress is called with every > 1
eq_(0,self.log[0])
eq_(100,self.log[1])
eq_(0, self.log[0])
eq_(100, self.log[1])
class TestCasemerge_similar_words:
def test_some_similar_words(self):
d = {
'foobar':set([1]),
'foobar1':set([2]),
'foobar2':set([3]),
'foobar': set([1]),
'foobar1': set([2]),
'foobar2': set([3]),
}
merge_similar_words(d)
eq_(1,len(d))
eq_(3,len(d['foobar']))
eq_(1, len(d))
eq_(3, len(d['foobar']))
class TestCasereduce_common_words:
def test_typical(self):
d = {
'foo': set([NamedObject('foo bar',True) for i in range(50)]),
'bar': set([NamedObject('foo bar',True) for i in range(49)])
'foo': set([NamedObject('foo bar', True) for i in range(50)]),
'bar': set([NamedObject('foo bar', True) for i in range(49)])
}
reduce_common_words(d, 50)
assert 'foo' not in d
eq_(49,len(d['bar']))
eq_(49, len(d['bar']))
def test_dont_remove_objects_with_only_common_words(self):
d = {
'common': set([NamedObject("common uncommon",True) for i in range(50)] + [NamedObject("common",True)]),
'uncommon': set([NamedObject("common uncommon",True)])
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
'uncommon': set([NamedObject("common uncommon", True)])
}
reduce_common_words(d, 50)
eq_(1,len(d['common']))
eq_(1,len(d['uncommon']))
eq_(1, len(d['common']))
eq_(1, len(d['uncommon']))
def test_values_still_are_set_instances(self):
d = {
'common': set([NamedObject("common uncommon",True) for i in range(50)] + [NamedObject("common",True)]),
'uncommon': set([NamedObject("common uncommon",True)])
'common': set([NamedObject("common uncommon", True) for i in range(50)] + [NamedObject("common", True)]),
'uncommon': set([NamedObject("common uncommon", True)])
}
reduce_common_words(d, 50)
assert isinstance(d['common'],set)
assert isinstance(d['uncommon'],set)
assert isinstance(d['common'], set)
assert isinstance(d['uncommon'], set)
def test_dont_raise_KeyError_when_a_word_has_been_removed(self):
#If a word has been removed by the reduce, an object in a subsequent common word that
#contains the word that has been removed would cause a KeyError.
d = {
'foo': set([NamedObject('foo bar baz',True) for i in range(50)]),
'bar': set([NamedObject('foo bar baz',True) for i in range(50)]),
'baz': set([NamedObject('foo bar baz',True) for i in range(49)])
'foo': set([NamedObject('foo bar baz', True) for i in range(50)]),
'bar': set([NamedObject('foo bar baz', True) for i in range(50)]),
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
}
try:
reduce_common_words(d, 50)
@ -290,7 +292,7 @@ class TestCasereduce_common_words:
#object.words may be fields.
def create_it():
o = NamedObject('')
o.words = [['foo','bar'],['baz']]
o.words = [['foo', 'bar'], ['baz']]
return o
d = {
@ -306,39 +308,40 @@ class TestCasereduce_common_words:
#be counted as a common word for subsequent words. For example, if 'foo' is processed
#as a common word, keeping a "foo bar" file in it, and the 'bar' is processed, "foo bar"
#would not stay in 'bar' because 'foo' is not a common word anymore.
only_common = NamedObject('foo bar',True)
only_common = NamedObject('foo bar', True)
d = {
'foo': set([NamedObject('foo bar baz',True) for i in range(49)] + [only_common]),
'bar': set([NamedObject('foo bar baz',True) for i in range(49)] + [only_common]),
'baz': set([NamedObject('foo bar baz',True) for i in range(49)])
'foo': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
'bar': set([NamedObject('foo bar baz', True) for i in range(49)] + [only_common]),
'baz': set([NamedObject('foo bar baz', True) for i in range(49)])
}
reduce_common_words(d, 50)
eq_(1,len(d['foo']))
eq_(1,len(d['bar']))
eq_(49,len(d['baz']))
eq_(1, len(d['foo']))
eq_(1, len(d['bar']))
eq_(49, len(d['baz']))
class TestCaseget_match:
def test_simple(self):
o1 = NamedObject("foo bar",True)
o2 = NamedObject("bar bleh",True)
m = get_match(o1,o2)
eq_(50,m.percentage)
eq_(['foo','bar'],m.first.words)
eq_(['bar','bleh'],m.second.words)
o1 = NamedObject("foo bar", True)
o2 = NamedObject("bar bleh", True)
m = get_match(o1, o2)
eq_(50, m.percentage)
eq_(['foo', 'bar'], m.first.words)
eq_(['bar', 'bleh'], m.second.words)
assert m.first is o1
assert m.second is o2
def test_in(self):
o1 = NamedObject("foo",True)
o2 = NamedObject("bar",True)
m = get_match(o1,o2)
o1 = NamedObject("foo", True)
o2 = NamedObject("bar", True)
m = get_match(o1, o2)
assert o1 in m
assert o2 in m
assert object() not in m
def test_word_weight(self):
eq_(int((6.0 / 13.0) * 100),get_match(NamedObject("foo bar",True),NamedObject("bar bleh",True),(WEIGHT_WORDS,)).percentage)
m = get_match(NamedObject("foo bar", True), NamedObject("bar bleh", True), (WEIGHT_WORDS, ))
eq_(m.percentage, int((6.0 / 13.0) * 100))
class TestCaseGetMatches:
@ -346,16 +349,16 @@ class TestCaseGetMatches:
eq_(getmatches([]), [])
def test_simple(self):
l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject("a b c foo")]
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
r = getmatches(l)
eq_(2,len(r))
eq_(2, len(r))
m = first(m for m in r if m.percentage == 50) #"foo bar" and "bar bleh"
assert_match(m, 'foo bar', 'bar bleh')
m = first(m for m in r if m.percentage == 33) #"foo bar" and "a b c foo"
assert_match(m, 'foo bar', 'a b c foo')
def test_null_and_unrelated_objects(self):
l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject(""),NamedObject("unrelated object")]
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject(""), NamedObject("unrelated object")]
r = getmatches(l)
eq_(len(r), 1)
m = r[0]
@ -363,19 +366,19 @@ class TestCaseGetMatches:
assert_match(m, 'foo bar', 'bar bleh')
def test_twice_the_same_word(self):
l = [NamedObject("foo foo bar"),NamedObject("bar bleh")]
l = [NamedObject("foo foo bar"), NamedObject("bar bleh")]
r = getmatches(l)
eq_(1,len(r))
eq_(1, len(r))
def test_twice_the_same_word_when_preworded(self):
l = [NamedObject("foo foo bar",True),NamedObject("bar bleh",True)]
l = [NamedObject("foo foo bar", True), NamedObject("bar bleh", True)]
r = getmatches(l)
eq_(1,len(r))
eq_(1, len(r))
def test_two_words_match(self):
l = [NamedObject("foo bar"),NamedObject("foo bar bleh")]
l = [NamedObject("foo bar"), NamedObject("foo bar bleh")]
r = getmatches(l)
eq_(1,len(r))
eq_(1, len(r))
def test_match_files_with_only_common_words(self):
#If a word occurs more than 50 times, it is excluded from the matching process
@ -384,41 +387,41 @@ class TestCaseGetMatches:
# This test assumes that the common word threashold const is 50
l = [NamedObject("foo") for i in range(50)]
r = getmatches(l)
eq_(1225,len(r))
eq_(1225, len(r))
def test_use_words_already_there_if_there(self):
o1 = NamedObject('foo')
o2 = NamedObject('bar')
o2.words = ['foo']
eq_(1, len(getmatches([o1,o2])))
eq_(1, len(getmatches([o1, o2])))
def test_job(self):
def do_progress(p,d=''):
def do_progress(p, d=''):
self.log.append(p)
return True
j = job.Job(1,do_progress)
j = job.Job(1, do_progress)
self.log = []
s = "foo bar"
getmatches([NamedObject(s), NamedObject(s), NamedObject(s)], j=j)
assert len(self.log) > 2
eq_(0,self.log[0])
eq_(100,self.log[-1])
eq_(0, self.log[0])
eq_(100, self.log[-1])
def test_weight_words(self):
l = [NamedObject("foo bar"),NamedObject("bar bleh")]
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
m = getmatches(l, weight_words=True)[0]
eq_(int((6.0 / 13.0) * 100),m.percentage)
eq_(int((6.0 / 13.0) * 100), m.percentage)
def test_similar_word(self):
l = [NamedObject("foobar"),NamedObject("foobars")]
l = [NamedObject("foobar"), NamedObject("foobars")]
eq_(len(getmatches(l, match_similar_words=True)), 1)
eq_(getmatches(l, match_similar_words=True)[0].percentage, 100)
l = [NamedObject("foobar"),NamedObject("foo")]
l = [NamedObject("foobar"), NamedObject("foo")]
eq_(len(getmatches(l, match_similar_words=True)), 0) #too far
l = [NamedObject("bizkit"),NamedObject("bizket")]
l = [NamedObject("bizkit"), NamedObject("bizket")]
eq_(len(getmatches(l, match_similar_words=True)), 1)
l = [NamedObject("foobar"),NamedObject("foosbar")]
l = [NamedObject("foobar"), NamedObject("foosbar")]
eq_(len(getmatches(l, match_similar_words=True)), 1)
def test_single_object_with_similar_words(self):
@ -426,9 +429,9 @@ class TestCaseGetMatches:
eq_(len(getmatches(l, match_similar_words=True)), 0)
def test_double_words_get_counted_only_once(self):
l = [NamedObject("foo bar foo bleh"),NamedObject("foo bar bleh bar")]
l = [NamedObject("foo bar foo bleh"), NamedObject("foo bar bleh bar")]
m = getmatches(l)[0]
eq_(75,m.percentage)
eq_(75, m.percentage)
def test_with_fields(self):
o1 = NamedObject("foo bar - foo bleh")
@ -447,7 +450,7 @@ class TestCaseGetMatches:
eq_(m.percentage, 50)
def test_only_match_similar_when_the_option_is_set(self):
l = [NamedObject("foobar"),NamedObject("foobars")]
l = [NamedObject("foobar"), NamedObject("foobars")]
eq_(len(getmatches(l, match_similar_words=False)), 0)
def test_dont_recurse_do_match(self):
@ -462,9 +465,9 @@ class TestCaseGetMatches:
sys.setrecursionlimit(1000)
def test_min_match_percentage(self):
l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject("a b c foo")]
l = [NamedObject("foo bar"), NamedObject("bar bleh"), NamedObject("a b c foo")]
r = getmatches(l, min_match_percentage=50)
eq_(1,len(r)) #Only "foo bar" / "bar bleh" should match
eq_(1, len(r)) #Only "foo bar" / "bar bleh" should match
def test_MemoryError(self, monkeypatch):
@log_calls
@ -491,112 +494,112 @@ class TestCaseGetMatchesByContents:
class TestCaseGroup:
def test_empy(self):
g = Group()
eq_(None,g.ref)
eq_([],g.dupes)
eq_(0,len(g.matches))
eq_(None, g.ref)
eq_([], g.dupes)
eq_(0, len(g.matches))
def test_add_match(self):
g = Group()
m = get_match(NamedObject("foo",True),NamedObject("bar",True))
m = get_match(NamedObject("foo", True), NamedObject("bar", True))
g.add_match(m)
assert g.ref is m.first
eq_([m.second],g.dupes)
eq_(1,len(g.matches))
eq_([m.second], g.dupes)
eq_(1, len(g.matches))
assert m in g.matches
def test_multiple_add_match(self):
g = Group()
o1 = NamedObject("a",True)
o2 = NamedObject("b",True)
o3 = NamedObject("c",True)
o4 = NamedObject("d",True)
g.add_match(get_match(o1,o2))
o1 = NamedObject("a", True)
o2 = NamedObject("b", True)
o3 = NamedObject("c", True)
o4 = NamedObject("d", True)
g.add_match(get_match(o1, o2))
assert g.ref is o1
eq_([o2],g.dupes)
eq_(1,len(g.matches))
g.add_match(get_match(o1,o3))
eq_([o2],g.dupes)
eq_(2,len(g.matches))
g.add_match(get_match(o2,o3))
eq_([o2,o3],g.dupes)
eq_(3,len(g.matches))
g.add_match(get_match(o1,o4))
eq_([o2,o3],g.dupes)
eq_(4,len(g.matches))
g.add_match(get_match(o2,o4))
eq_([o2,o3],g.dupes)
eq_(5,len(g.matches))
g.add_match(get_match(o3,o4))
eq_([o2,o3,o4],g.dupes)
eq_(6,len(g.matches))
eq_([o2], g.dupes)
eq_(1, len(g.matches))
g.add_match(get_match(o1, o3))
eq_([o2], g.dupes)
eq_(2, len(g.matches))
g.add_match(get_match(o2, o3))
eq_([o2, o3], g.dupes)
eq_(3, len(g.matches))
g.add_match(get_match(o1, o4))
eq_([o2, o3], g.dupes)
eq_(4, len(g.matches))
g.add_match(get_match(o2, o4))
eq_([o2, o3], g.dupes)
eq_(5, len(g.matches))
g.add_match(get_match(o3, o4))
eq_([o2, o3, o4], g.dupes)
eq_(6, len(g.matches))
def test_len(self):
g = Group()
eq_(0,len(g))
g.add_match(get_match(NamedObject("foo",True),NamedObject("bar",True)))
eq_(2,len(g))
eq_(0, len(g))
g.add_match(get_match(NamedObject("foo", True), NamedObject("bar", True)))
eq_(2, len(g))
def test_add_same_match_twice(self):
g = Group()
m = get_match(NamedObject("foo",True),NamedObject("foo",True))
m = get_match(NamedObject("foo", True), NamedObject("foo", True))
g.add_match(m)
eq_(2,len(g))
eq_(1,len(g.matches))
eq_(2, len(g))
eq_(1, len(g.matches))
g.add_match(m)
eq_(2,len(g))
eq_(1,len(g.matches))
eq_(2, len(g))
eq_(1, len(g.matches))
def test_in(self):
g = Group()
o1 = NamedObject("foo",True)
o2 = NamedObject("bar",True)
o1 = NamedObject("foo", True)
o2 = NamedObject("bar", True)
assert o1 not in g
g.add_match(get_match(o1,o2))
g.add_match(get_match(o1, o2))
assert o1 in g
assert o2 in g
def test_remove(self):
g = Group()
o1 = NamedObject("foo",True)
o2 = NamedObject("bar",True)
o3 = NamedObject("bleh",True)
g.add_match(get_match(o1,o2))
g.add_match(get_match(o1,o3))
g.add_match(get_match(o2,o3))
eq_(3,len(g.matches))
eq_(3,len(g))
o1 = NamedObject("foo", True)
o2 = NamedObject("bar", True)
o3 = NamedObject("bleh", True)
g.add_match(get_match(o1, o2))
g.add_match(get_match(o1, o3))
g.add_match(get_match(o2, o3))
eq_(3, len(g.matches))
eq_(3, len(g))
g.remove_dupe(o3)
eq_(1,len(g.matches))
eq_(2,len(g))
eq_(1, len(g.matches))
eq_(2, len(g))
g.remove_dupe(o1)
eq_(0,len(g.matches))
eq_(0,len(g))
eq_(0, len(g.matches))
eq_(0, len(g))
def test_remove_with_ref_dupes(self):
g = Group()
o1 = NamedObject("foo",True)
o2 = NamedObject("bar",True)
o3 = NamedObject("bleh",True)
g.add_match(get_match(o1,o2))
g.add_match(get_match(o1,o3))
g.add_match(get_match(o2,o3))
o1 = NamedObject("foo", True)
o2 = NamedObject("bar", True)
o3 = NamedObject("bleh", True)
g.add_match(get_match(o1, o2))
g.add_match(get_match(o1, o3))
g.add_match(get_match(o2, o3))
o1.is_ref = True
o2.is_ref = True
g.remove_dupe(o3)
eq_(0,len(g))
eq_(0, len(g))
def test_switch_ref(self):
o1 = NamedObject(with_words=True)
o2 = NamedObject(with_words=True)
g = Group()
g.add_match(get_match(o1,o2))
g.add_match(get_match(o1, o2))
assert o1 is g.ref
g.switch_ref(o2)
assert o2 is g.ref
eq_([o1],g.dupes)
eq_([o1], g.dupes)
g.switch_ref(o2)
assert o2 is g.ref
g.switch_ref(NamedObject('',True))
g.switch_ref(NamedObject('', True))
assert o2 is g.ref
def test_switch_ref_from_ref_dir(self):
@ -617,12 +620,12 @@ class TestCaseGroup:
m = g.get_match_of(o)
assert g.ref in m
assert o in m
assert g.get_match_of(NamedObject('',True)) is None
assert g.get_match_of(NamedObject('', True)) is None
assert g.get_match_of(g.ref) is None
def test_percentage(self):
#percentage should return the avg percentage in relation to the ref
m1,m2,m3 = get_match_triangle()
m1, m2, m3 = get_match_triangle()
m1 = Match(m1[0], m1[1], 100)
m2 = Match(m2[0], m2[1], 50)
m3 = Match(m3[0], m3[1], 33)
@ -630,21 +633,21 @@ class TestCaseGroup:
g.add_match(m1)
g.add_match(m2)
g.add_match(m3)
eq_(75,g.percentage)
eq_(75, g.percentage)
g.switch_ref(g.dupes[0])
eq_(66,g.percentage)
eq_(66, g.percentage)
g.remove_dupe(g.dupes[0])
eq_(33,g.percentage)
eq_(33, g.percentage)
g.add_match(m1)
g.add_match(m2)
eq_(66,g.percentage)
eq_(66, g.percentage)
def test_percentage_on_empty_group(self):
g = Group()
eq_(0,g.percentage)
eq_(0, g.percentage)
def test_prioritize(self):
m1,m2,m3 = get_match_triangle()
m1, m2, m3 = get_match_triangle()
o1 = m1.first
o2 = m1.second
o3 = m2.second
@ -656,7 +659,7 @@ class TestCaseGroup:
g.add_match(m2)
g.add_match(m3)
assert o1 is g.ref
assert g.prioritize(lambda x:x.name)
assert g.prioritize(lambda x: x.name)
assert o3 is g.ref
def test_prioritize_with_tie_breaker(self):
@ -664,7 +667,7 @@ class TestCaseGroup:
g = get_test_group()
o1, o2, o3 = g.ordered
tie_breaker = lambda ref, dupe: dupe is o3
g.prioritize(lambda x:0, tie_breaker)
g.prioritize(lambda x: 0, tie_breaker)
assert g.ref is o3
def test_prioritize_with_tie_breaker_runs_on_all_dupes(self):
@ -676,7 +679,7 @@ class TestCaseGroup:
o2.foo = 2
o3.foo = 3
tie_breaker = lambda ref, dupe: dupe.foo > ref.foo
g.prioritize(lambda x:0, tie_breaker)
g.prioritize(lambda x: 0, tie_breaker)
assert g.ref is o3
def test_prioritize_with_tie_breaker_runs_only_on_tie_dupes(self):
@ -709,65 +712,65 @@ class TestCaseGroup:
g[0].name = 'a'
g[1].name = 'b'
g[2].name = 'c'
assert not g.prioritize(lambda x:x.name)
assert not g.prioritize(lambda x: x.name)
def test_list_like(self):
g = Group()
o1,o2 = (NamedObject("foo",True),NamedObject("bar",True))
g.add_match(get_match(o1,o2))
o1, o2 = (NamedObject("foo", True), NamedObject("bar", True))
g.add_match(get_match(o1, o2))
assert g[0] is o1
assert g[1] is o2
def test_discard_matches(self):
g = Group()
o1,o2,o3 = (NamedObject("foo",True),NamedObject("bar",True),NamedObject("baz",True))
g.add_match(get_match(o1,o2))
g.add_match(get_match(o1,o3))
o1, o2, o3 = (NamedObject("foo", True), NamedObject("bar", True), NamedObject("baz", True))
g.add_match(get_match(o1, o2))
g.add_match(get_match(o1, o3))
g.discard_matches()
eq_(1,len(g.matches))
eq_(0,len(g.candidates))
eq_(1, len(g.matches))
eq_(0, len(g.candidates))
class TestCaseget_groups:
def test_empty(self):
r = get_groups([])
eq_([],r)
eq_([], r)
def test_simple(self):
l = [NamedObject("foo bar"),NamedObject("bar bleh")]
l = [NamedObject("foo bar"), NamedObject("bar bleh")]
matches = getmatches(l)
m = matches[0]
r = get_groups(matches)
eq_(1,len(r))
eq_(1, len(r))
g = r[0]
assert g.ref is m.first
eq_([m.second],g.dupes)
eq_([m.second], g.dupes)
def test_group_with_multiple_matches(self):
#This results in 3 matches
l = [NamedObject("foo"),NamedObject("foo"),NamedObject("foo")]
l = [NamedObject("foo"), NamedObject("foo"), NamedObject("foo")]
matches = getmatches(l)
r = get_groups(matches)
eq_(1,len(r))
eq_(1, len(r))
g = r[0]
eq_(3,len(g))
eq_(3, len(g))
def test_must_choose_a_group(self):
l = [NamedObject("a b"),NamedObject("a b"),NamedObject("b c"),NamedObject("c d"),NamedObject("c d")]
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("b c"), NamedObject("c d"), NamedObject("c d")]
#There will be 2 groups here: group "a b" and group "c d"
#"b c" can go either of them, but not both.
matches = getmatches(l)
r = get_groups(matches)
eq_(2,len(r))
eq_(5,len(r[0])+len(r[1]))
eq_(2, len(r))
eq_(5, len(r[0])+len(r[1]))
def test_should_all_go_in_the_same_group(self):
l = [NamedObject("a b"),NamedObject("a b"),NamedObject("a b"),NamedObject("a b")]
l = [NamedObject("a b"), NamedObject("a b"), NamedObject("a b"), NamedObject("a b")]
#There will be 2 groups here: group "a b" and group "c d"
#"b c" can fit in both, but it must be in only one of them
matches = getmatches(l)
r = get_groups(matches)
eq_(1,len(r))
eq_(1, len(r))
def test_give_priority_to_matches_with_higher_percentage(self):
o1 = NamedObject(with_words=True)
@ -775,10 +778,10 @@ class TestCaseget_groups:
o3 = NamedObject(with_words=True)
m1 = Match(o1, o2, 1)
m2 = Match(o2, o3, 2)
r = get_groups([m1,m2])
eq_(1,len(r))
r = get_groups([m1, m2])
eq_(1, len(r))
g = r[0]
eq_(2,len(g))
eq_(2, len(g))
assert o1 not in g
assert o2 in g
assert o3 in g
@ -787,32 +790,32 @@ class TestCaseget_groups:
l = [NamedObject("foobar") for i in range(4)]
m = getmatches(l)
r = get_groups(m)
eq_(1,len(r))
eq_(4,len(r[0]))
eq_(1, len(r))
eq_(4, len(r[0]))
def test_referenced_by_ref2(self):
o1 = NamedObject(with_words=True)
o2 = NamedObject(with_words=True)
o3 = NamedObject(with_words=True)
m1 = get_match(o1,o2)
m2 = get_match(o3,o1)
m3 = get_match(o3,o2)
r = get_groups([m1,m2,m3])
eq_(3,len(r[0]))
m1 = get_match(o1, o2)
m2 = get_match(o3, o1)
m3 = get_match(o3, o2)
r = get_groups([m1, m2, m3])
eq_(3, len(r[0]))
def test_job(self):
def do_progress(p,d=''):
def do_progress(p, d=''):
self.log.append(p)
return True
self.log = []
j = job.Job(1,do_progress)
m1,m2,m3 = get_match_triangle()
j = job.Job(1, do_progress)
m1, m2, m3 = get_match_triangle()
#101%: To make sure it is processed first so the job test works correctly
m4 = Match(NamedObject('a',True), NamedObject('a',True), 101)
get_groups([m1,m2,m3,m4],j)
eq_(0,self.log[0])
eq_(100,self.log[-1])
m4 = Match(NamedObject('a', True), NamedObject('a', True), 101)
get_groups([m1, m2, m3, m4], j)
eq_(0, self.log[0])
eq_(100, self.log[-1])
def test_group_admissible_discarded_dupes(self):
# If, with a (A, B, C, D) set, all match with A, but C and D don't match with B and that the

View File

@ -1,9 +1,7 @@
# Created By: Virgil Dupras
# Created On: 2006/05/02
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import io
@ -12,54 +10,54 @@ from xml.etree import ElementTree as ET
from pytest import raises
from hscommon.testutil import eq_
from ..ignore import *
from ..ignore import IgnoreList
def test_empty():
il = IgnoreList()
eq_(0,len(il))
assert not il.AreIgnored('foo','bar')
eq_(0, len(il))
assert not il.AreIgnored('foo', 'bar')
def test_simple():
il = IgnoreList()
il.Ignore('foo','bar')
assert il.AreIgnored('foo','bar')
assert il.AreIgnored('bar','foo')
assert not il.AreIgnored('foo','bleh')
assert not il.AreIgnored('bleh','bar')
eq_(1,len(il))
il.Ignore('foo', 'bar')
assert il.AreIgnored('foo', 'bar')
assert il.AreIgnored('bar', 'foo')
assert not il.AreIgnored('foo', 'bleh')
assert not il.AreIgnored('bleh', 'bar')
eq_(1, len(il))
def test_multiple():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('foo','bleh')
il.Ignore('bleh','bar')
il.Ignore('aybabtu','bleh')
assert il.AreIgnored('foo','bar')
assert il.AreIgnored('bar','foo')
assert il.AreIgnored('foo','bleh')
assert il.AreIgnored('bleh','bar')
assert not il.AreIgnored('aybabtu','bar')
eq_(4,len(il))
il.Ignore('foo', 'bar')
il.Ignore('foo', 'bleh')
il.Ignore('bleh', 'bar')
il.Ignore('aybabtu', 'bleh')
assert il.AreIgnored('foo', 'bar')
assert il.AreIgnored('bar', 'foo')
assert il.AreIgnored('foo', 'bleh')
assert il.AreIgnored('bleh', 'bar')
assert not il.AreIgnored('aybabtu', 'bar')
eq_(4, len(il))
def test_clear():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('foo', 'bar')
il.Clear()
assert not il.AreIgnored('foo','bar')
assert not il.AreIgnored('bar','foo')
eq_(0,len(il))
assert not il.AreIgnored('foo', 'bar')
assert not il.AreIgnored('bar', 'foo')
eq_(0, len(il))
def test_add_same_twice():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','foo')
eq_(1,len(il))
il.Ignore('foo', 'bar')
il.Ignore('bar', 'foo')
eq_(1, len(il))
def test_save_to_xml():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('foo','bleh')
il.Ignore('bleh','bar')
il.Ignore('foo', 'bar')
il.Ignore('foo', 'bleh')
il.Ignore('bleh', 'bar')
f = io.BytesIO()
il.save_to_xml(f)
f.seek(0)
@ -83,50 +81,50 @@ def test_SaveThenLoad():
f.seek(0)
il = IgnoreList()
il.load_from_xml(f)
eq_(4,len(il))
assert il.AreIgnored('\u00e9','bar')
eq_(4, len(il))
assert il.AreIgnored('\u00e9', 'bar')
def test_LoadXML_with_empty_file_tags():
f = io.BytesIO()
f.write(b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>')
f.seek(0)
il = IgnoreList()
il.load_from_xml(f)
eq_(0,len(il))
eq_(0, len(il))
def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','baz')
assert il.AreIgnored('bar','foo')
il.Ignore('foo', 'bar')
il.Ignore('bar', 'baz')
assert il.AreIgnored('bar', 'foo')
def test_no_dupes_when_a_child_is_a_key_somewhere_else():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','baz')
il.Ignore('bar','foo')
eq_(2,len(il))
il.Ignore('foo', 'bar')
il.Ignore('bar', 'baz')
il.Ignore('bar', 'foo')
eq_(2, len(il))
def test_iterate():
#It must be possible to iterate through ignore list
il = IgnoreList()
expected = [('foo','bar'),('bar','baz'),('foo','baz')]
expected = [('foo', 'bar'), ('bar', 'baz'), ('foo', 'baz')]
for i in expected:
il.Ignore(i[0],i[1])
il.Ignore(i[0], i[1])
for i in il:
expected.remove(i) #No exception should be raised
assert not expected #expected should be empty
def test_filter():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','baz')
il.Ignore('foo','baz')
il.Filter(lambda f,s: f == 'bar')
eq_(1,len(il))
assert not il.AreIgnored('foo','bar')
assert il.AreIgnored('bar','baz')
il.Ignore('foo', 'bar')
il.Ignore('bar', 'baz')
il.Ignore('foo', 'baz')
il.Filter(lambda f, s: f == 'bar')
eq_(1, len(il))
assert not il.AreIgnored('foo', 'bar')
assert il.AreIgnored('bar', 'baz')
def test_save_with_non_ascii_items():
il = IgnoreList()
@ -139,14 +137,14 @@ def test_save_with_non_ascii_items():
def test_len():
il = IgnoreList()
eq_(0,len(il))
il.Ignore('foo','bar')
eq_(1,len(il))
eq_(0, len(il))
il.Ignore('foo', 'bar')
eq_(1, len(il))
def test_nonzero():
il = IgnoreList()
assert not il
il.Ignore('foo','bar')
il.Ignore('foo', 'bar')
assert il
def test_remove():

View File

@ -1,14 +1,12 @@
# Created By: Virgil Dupras
# Created On: 2006/02/23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.testutil import eq_
from ..markable import *
from ..markable import MarkableList, Markable
def gen():
ml = MarkableList()
@ -36,7 +34,7 @@ def test_unmark_unmarked():
ml = gen()
assert not ml.unmark(4)
assert not ml.is_marked(4)
def test_mark_twice_and_unmark():
ml = gen()
assert ml.mark(5)
@ -73,6 +71,7 @@ def test_change_notifications():
class Foobar(Markable):
def _did_mark(self, o):
self.log.append((True, o))
def _did_unmark(self, o):
self.log.append((False, o))
@ -84,15 +83,15 @@ def test_change_notifications():
f.unmark('foo')
f.unmark('foo')
f.mark_toggle('bar')
eq_([(True,'foo'),(True,'bar'),(False,'foo'),(False,'bar')],f.log)
eq_([(True, 'foo'), (True, 'bar'), (False, 'foo'), (False, 'bar')], f.log)
def test_mark_count():
ml = gen()
eq_(0,ml.mark_count)
eq_(0, ml.mark_count)
ml.mark(7)
eq_(1,ml.mark_count)
eq_(1, ml.mark_count)
ml.mark(11)
eq_(1,ml.mark_count)
eq_(1, ml.mark_count)
def test_mark_none():
log = []
@ -100,16 +99,16 @@ def test_mark_none():
ml._did_unmark = lambda o: log.append(o)
ml.mark(1)
ml.mark(2)
eq_(2,ml.mark_count)
eq_(2, ml.mark_count)
ml.mark_none()
eq_(0,ml.mark_count)
eq_([1,2],log)
eq_(0, ml.mark_count)
eq_([1, 2], log)
def test_mark_all():
ml = gen()
eq_(0,ml.mark_count)
eq_(0, ml.mark_count)
ml.mark_all()
eq_(10,ml.mark_count)
eq_(10, ml.mark_count)
assert ml.is_marked(1)
def test_mark_invert():
@ -122,8 +121,8 @@ def test_mark_invert():
def test_mark_while_inverted():
log = []
ml = gen()
ml._did_unmark = lambda o:log.append((False,o))
ml._did_mark = lambda o:log.append((True,o))
ml._did_unmark = lambda o: log.append((False, o))
ml._did_mark = lambda o: log.append((True, o))
ml.mark(1)
ml.mark_invert()
assert ml.mark_inverted
@ -132,9 +131,9 @@ def test_mark_while_inverted():
assert ml.unmark(1)
ml.mark_toggle(3)
assert not ml.is_marked(3)
eq_(7,ml.mark_count)
eq_([(True,1),(False,1),(True,2),(True,1),(True,3)],log)
eq_(7, ml.mark_count)
eq_([(True, 1), (False, 1), (True, 2), (True, 1), (True, 3)], log)
def test_remove_mark_flag():
ml = gen()
ml.mark(1)
@ -145,7 +144,7 @@ def test_remove_mark_flag():
assert not ml.is_marked(1)
ml._remove_mark_flag(1)
assert ml.is_marked(1)
def test_is_marked_returns_false_if_object_not_markable():
class MyMarkableList(MarkableList):
def _is_markable(self, o):

View File

@ -1,9 +1,7 @@
# Created By: Virgil Dupras
# Created On: 2006/02/23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import io
@ -22,21 +20,21 @@ class TestCaseResultsEmpty:
def setup_method(self, method):
self.app = DupeGuru()
self.results = self.app.results
def test_apply_invalid_filter(self):
# If the applied filter is an invalid regexp, just ignore the filter.
self.results.apply_filter('[') # invalid
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
def test_stat_line(self):
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.",self.results.stat_line)
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.", self.results.stat_line)
def test_groups(self):
eq_(0,len(self.results.groups))
eq_(0, len(self.results.groups))
def test_get_group_of_duplicate(self):
assert self.results.get_group_of_duplicate('foo') is None
def test_save_to_xml(self):
f = io.BytesIO()
self.results.save_to_xml(f)
@ -44,15 +42,15 @@ class TestCaseResultsEmpty:
doc = ET.parse(f)
root = doc.getroot()
eq_('results', root.tag)
def test_is_modified(self):
assert not self.results.is_modified
def test_is_modified_after_setting_empty_group(self):
# Don't consider results as modified if they're empty
self.results.groups = []
assert not self.results.is_modified
def test_save_to_same_name_as_folder(self, tmpdir):
# Issue #149
# When saving to a filename that already exists, the file is overwritten. However, when
@ -64,108 +62,108 @@ class TestCaseResultsEmpty:
folderpath.mkdir()
self.results.save_to_xml(str(folderpath)) # no crash
assert tmpdir.join('[000] foo').check()
class TestCaseResultsWithSomeGroups:
def setup_method(self, method):
self.app = DupeGuru()
self.results = self.app.results
self.objects,self.matches,self.groups = GetTestGroups()
self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups
def test_stat_line(self):
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
def test_groups(self):
eq_(2,len(self.results.groups))
eq_(2, len(self.results.groups))
def test_get_group_of_duplicate(self):
for o in self.objects:
g = self.results.get_group_of_duplicate(o)
assert isinstance(g, engine.Group)
assert o in g
assert self.results.get_group_of_duplicate(self.groups[0]) is None
def test_remove_duplicates(self):
g1,g2 = self.results.groups
g1, g2 = self.results.groups
self.results.remove_duplicates([g1.dupes[0]])
eq_(2,len(g1))
eq_(2, len(g1))
assert g1 in self.results.groups
self.results.remove_duplicates([g1.ref])
eq_(2,len(g1))
eq_(2, len(g1))
assert g1 in self.results.groups
self.results.remove_duplicates([g1.dupes[0]])
eq_(0,len(g1))
eq_(0, len(g1))
assert g1 not in self.results.groups
self.results.remove_duplicates([g2.dupes[0]])
eq_(0,len(g2))
eq_(0, len(g2))
assert g2 not in self.results.groups
eq_(0,len(self.results.groups))
eq_(0, len(self.results.groups))
def test_remove_duplicates_with_ref_files(self):
g1,g2 = self.results.groups
g1, g2 = self.results.groups
self.objects[0].is_ref = True
self.objects[1].is_ref = True
self.results.remove_duplicates([self.objects[2]])
eq_(0,len(g1))
eq_(0, len(g1))
assert g1 not in self.results.groups
def test_make_ref(self):
g = self.results.groups[0]
d = g.dupes[0]
self.results.make_ref(d)
assert d is g.ref
def test_sort_groups(self):
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
g1,g2 = self.groups
g1, g2 = self.groups
self.results.sort_groups('size')
assert self.results.groups[0] is g2
assert self.results.groups[1] is g1
self.results.sort_groups('size', False)
assert self.results.groups[0] is g1
assert self.results.groups[1] is g2
def test_set_groups_when_sorted(self):
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
self.results.sort_groups('size')
objects,matches,groups = GetTestGroups()
g1,g2 = groups
objects, matches, groups = GetTestGroups()
g1, g2 = groups
g1.switch_ref(objects[1])
self.results.groups = groups
assert self.results.groups[0] is g2
assert self.results.groups[1] is g1
def test_get_dupe_list(self):
eq_([self.objects[1],self.objects[2],self.objects[4]],self.results.dupes)
eq_([self.objects[1], self.objects[2], self.objects[4]], self.results.dupes)
def test_dupe_list_is_cached(self):
assert self.results.dupes is self.results.dupes
def test_dupe_list_cache_is_invalidated_when_needed(self):
o1,o2,o3,o4,o5 = self.objects
eq_([o2,o3,o5],self.results.dupes)
o1, o2, o3, o4, o5 = self.objects
eq_([o2, o3, o5], self.results.dupes)
self.results.make_ref(o2)
eq_([o1,o3,o5],self.results.dupes)
objects,matches,groups = GetTestGroups()
o1,o2,o3,o4,o5 = objects
eq_([o1, o3, o5], self.results.dupes)
objects, matches, groups = GetTestGroups()
o1, o2, o3, o4, o5 = objects
self.results.groups = groups
eq_([o2,o3,o5],self.results.dupes)
eq_([o2, o3, o5], self.results.dupes)
def test_dupe_list_sort(self):
o1,o2,o3,o4,o5 = self.objects
o1, o2, o3, o4, o5 = self.objects
o1.size = 5
o2.size = 4
o3.size = 3
o4.size = 2
o5.size = 1
self.results.sort_dupes('size')
eq_([o5,o3,o2],self.results.dupes)
eq_([o5, o3, o2], self.results.dupes)
self.results.sort_dupes('size', False)
eq_([o2,o3,o5],self.results.dupes)
eq_([o2, o3, o5], self.results.dupes)
def test_dupe_list_remember_sort(self):
o1,o2,o3,o4,o5 = self.objects
o1, o2, o3, o4, o5 = self.objects
o1.size = 5
o2.size = 4
o3.size = 3
@ -173,40 +171,40 @@ class TestCaseResultsWithSomeGroups:
o5.size = 1
self.results.sort_dupes('size')
self.results.make_ref(o2)
eq_([o5,o3,o1],self.results.dupes)
eq_([o5, o3, o1], self.results.dupes)
def test_dupe_list_sort_delta_values(self):
o1,o2,o3,o4,o5 = self.objects
o1, o2, o3, o4, o5 = self.objects
o1.size = 10
o2.size = 2 #-8
o3.size = 3 #-7
o4.size = 20
o5.size = 1 #-19
self.results.sort_dupes('size', delta=True)
eq_([o5,o2,o3],self.results.dupes)
eq_([o5, o2, o3], self.results.dupes)
def test_sort_empty_list(self):
#There was an infinite loop when sorting an empty list.
app = DupeGuru()
r = app.results
r.sort_dupes('name')
eq_([],r.dupes)
eq_([], r.dupes)
def test_dupe_list_update_on_remove_duplicates(self):
o1,o2,o3,o4,o5 = self.objects
eq_(3,len(self.results.dupes))
o1, o2, o3, o4, o5 = self.objects
eq_(3, len(self.results.dupes))
self.results.remove_duplicates([o2])
eq_(2,len(self.results.dupes))
eq_(2, len(self.results.dupes))
def test_is_modified(self):
# Changing the groups sets the modified flag
assert self.results.is_modified
def test_is_modified_after_save_and_load(self):
# Saving/Loading a file sets the modified flag back to False
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
f = io.BytesIO()
self.results.save_to_xml(f)
assert not self.results.is_modified
@ -214,13 +212,13 @@ class TestCaseResultsWithSomeGroups:
f.seek(0)
self.results.load_from_xml(f, get_file)
assert not self.results.is_modified
def test_is_modified_after_removing_all_results(self):
# Removing all results sets the is_modified flag to false.
self.results.mark_all()
self.results.perform_on_marked(lambda x:None, True)
self.results.perform_on_marked(lambda x: None, True)
assert not self.results.is_modified
def test_group_of_duplicate_after_removal(self):
# removing a duplicate also removes it from the dupe:group map.
dupe = self.results.groups[1].dupes[0]
@ -229,7 +227,7 @@ class TestCaseResultsWithSomeGroups:
assert self.results.get_group_of_duplicate(dupe) is None
# also remove group ref
assert self.results.get_group_of_duplicate(ref) is None
def test_dupe_list_sort_delta_values_nonnumeric(self):
# When sorting dupes in delta mode on a non-numeric column, our first sort criteria is if
# the string is the same as its ref.
@ -239,7 +237,7 @@ class TestCaseResultsWithSomeGroups:
g2r.name = g2d1.name = "aaa"
self.results.sort_dupes('name', delta=True)
eq_("aaa", self.results.dupes[2].name)
def test_dupe_list_sort_delta_values_nonnumeric_case_insensitive(self):
# Non-numeric delta sorting comparison is case insensitive
g1r, g1d1, g1d2, g2r, g2d1 = self.objects
@ -252,92 +250,92 @@ class TestCaseResultsWithSavedResults:
def setup_method(self, method):
self.app = DupeGuru()
self.results = self.app.results
self.objects,self.matches,self.groups = GetTestGroups()
self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups
self.f = io.BytesIO()
self.results.save_to_xml(self.f)
self.f.seek(0)
def test_is_modified(self):
# Saving a file sets the modified flag back to False
assert not self.results.is_modified
def test_is_modified_after_load(self):
# Loading a file sets the modified flag back to False
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
self.results.groups = self.groups # sets the flag back
self.results.load_from_xml(self.f, get_file)
assert not self.results.is_modified
def test_is_modified_after_remove(self):
# Removing dupes sets the modified flag
self.results.remove_duplicates([self.results.groups[0].dupes[0]])
assert self.results.is_modified
def test_is_modified_after_make_ref(self):
# Making a dupe ref sets the modified flag
self.results.make_ref(self.results.groups[0].dupes[0])
assert self.results.is_modified
class TestCaseResultsMarkings:
def setup_method(self, method):
self.app = DupeGuru()
self.results = self.app.results
self.objects,self.matches,self.groups = GetTestGroups()
self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups
def test_stat_line(self):
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.mark(self.objects[1])
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.mark_invert()
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.mark_invert()
self.results.unmark(self.objects[1])
self.results.mark(self.objects[2])
self.results.mark(self.objects[4])
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.mark(self.objects[0]) #this is a ref, it can't be counted
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.groups = self.groups
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
def test_with_ref_duplicate(self):
self.objects[1].is_ref = True
self.results.groups = self.groups
assert not self.results.mark(self.objects[1])
self.results.mark(self.objects[2])
eq_("1 / 2 (1.00 B / 2.00 B) duplicates marked.",self.results.stat_line)
eq_("1 / 2 (1.00 B / 2.00 B) duplicates marked.", self.results.stat_line)
def test_perform_on_marked(self):
def log_object(o):
log.append(o)
return True
log = []
self.results.mark_all()
self.results.perform_on_marked(log_object,False)
self.results.perform_on_marked(log_object, False)
assert self.objects[1] in log
assert self.objects[2] in log
assert self.objects[4] in log
eq_(3,len(log))
eq_(3, len(log))
log = []
self.results.mark_none()
self.results.mark(self.objects[4])
self.results.perform_on_marked(log_object,True)
eq_(1,len(log))
self.results.perform_on_marked(log_object, True)
eq_(1, len(log))
assert self.objects[4] in log
eq_(1,len(self.results.groups))
eq_(1, len(self.results.groups))
def test_perform_on_marked_with_problems(self):
def log_object(o):
log.append(o)
if o is self.objects[1]:
raise EnvironmentError('foobar')
log = []
self.results.mark_all()
assert self.results.is_marked(self.objects[1])
@ -352,55 +350,55 @@ class TestCaseResultsMarkings:
dupe, msg = self.results.problems[0]
assert dupe is self.objects[1]
eq_(msg, 'foobar')
def test_perform_on_marked_with_ref(self):
def log_object(o):
log.append(o)
return True
log = []
self.objects[0].is_ref = True
self.objects[1].is_ref = True
self.results.mark_all()
self.results.perform_on_marked(log_object,True)
self.results.perform_on_marked(log_object, True)
assert self.objects[1] not in log
assert self.objects[2] in log
assert self.objects[4] in log
eq_(2,len(log))
eq_(0,len(self.results.groups))
eq_(2, len(log))
eq_(0, len(self.results.groups))
def test_perform_on_marked_remove_objects_only_at_the_end(self):
def check_groups(o):
eq_(3,len(g1))
eq_(2,len(g2))
eq_(3, len(g1))
eq_(2, len(g2))
return True
g1,g2 = self.results.groups
g1, g2 = self.results.groups
self.results.mark_all()
self.results.perform_on_marked(check_groups,True)
eq_(0,len(g1))
eq_(0,len(g2))
eq_(0,len(self.results.groups))
self.results.perform_on_marked(check_groups, True)
eq_(0, len(g1))
eq_(0, len(g2))
eq_(0, len(self.results.groups))
def test_remove_duplicates(self):
g1 = self.results.groups[0]
self.results.mark(g1.dupes[0])
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.remove_duplicates([g1.dupes[1]])
eq_("1 / 2 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("1 / 2 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.remove_duplicates([g1.dupes[0]])
eq_("0 / 1 (0.00 B / 1.00 B) duplicates marked.",self.results.stat_line)
eq_("0 / 1 (0.00 B / 1.00 B) duplicates marked.", self.results.stat_line)
def test_make_ref(self):
g = self.results.groups[0]
d = g.dupes[0]
self.results.mark(d)
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.",self.results.stat_line)
eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line)
self.results.make_ref(d)
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.",self.results.stat_line)
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.", self.results.stat_line)
self.results.make_ref(d)
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.",self.results.stat_line)
eq_("0 / 3 (0.00 B / 3.00 B) duplicates marked.", self.results.stat_line)
def test_SaveXML(self):
self.results.mark(self.objects[1])
self.results.mark_invert()
@ -417,11 +415,11 @@ class TestCaseResultsMarkings:
d1, d2 = g2.getiterator('file')
eq_('n', d1.get('marked'))
eq_('y', d2.get('marked'))
def test_LoadXML(self):
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
self.results.mark(self.objects[1])
self.results.mark_invert()
@ -430,13 +428,13 @@ class TestCaseResultsMarkings:
f.seek(0)
app = DupeGuru()
r = Results(app)
r.load_from_xml(f,get_file)
r.load_from_xml(f, get_file)
assert not r.is_marked(self.objects[0])
assert not r.is_marked(self.objects[1])
assert r.is_marked(self.objects[2])
assert not r.is_marked(self.objects[3])
assert r.is_marked(self.objects[4])
class TestCaseResultsXML:
def setup_method(self, method):
@ -444,13 +442,13 @@ class TestCaseResultsXML:
self.results = self.app.results
self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups
def get_file(self, path): # use this as a callback for load_from_xml
return [o for o in self.objects if o.path == path][0]
def test_save_to_xml(self):
self.objects[0].is_ref = True
self.objects[0].words = [['foo','bar']]
self.objects[0].words = [['foo', 'bar']]
f = io.BytesIO()
self.results.save_to_xml(f)
f.seek(0)
@ -460,34 +458,34 @@ class TestCaseResultsXML:
eq_(2, len(root))
eq_(2, len([c for c in root if c.tag == 'group']))
g1, g2 = root
eq_(6,len(g1))
eq_(3,len([c for c in g1 if c.tag == 'file']))
eq_(3,len([c for c in g1 if c.tag == 'match']))
eq_(6, len(g1))
eq_(3, len([c for c in g1 if c.tag == 'file']))
eq_(3, len([c for c in g1 if c.tag == 'match']))
d1, d2, d3 = [c for c in g1 if c.tag == 'file']
eq_(op.join('basepath','foo bar'),d1.get('path'))
eq_(op.join('basepath','bar bleh'),d2.get('path'))
eq_(op.join('basepath','foo bleh'),d3.get('path'))
eq_('y',d1.get('is_ref'))
eq_('n',d2.get('is_ref'))
eq_('n',d3.get('is_ref'))
eq_('foo,bar',d1.get('words'))
eq_('bar,bleh',d2.get('words'))
eq_('foo,bleh',d3.get('words'))
eq_(3,len(g2))
eq_(2,len([c for c in g2 if c.tag == 'file']))
eq_(1,len([c for c in g2 if c.tag == 'match']))
eq_(op.join('basepath', 'foo bar'), d1.get('path'))
eq_(op.join('basepath', 'bar bleh'), d2.get('path'))
eq_(op.join('basepath', 'foo bleh'), d3.get('path'))
eq_('y', d1.get('is_ref'))
eq_('n', d2.get('is_ref'))
eq_('n', d3.get('is_ref'))
eq_('foo,bar', d1.get('words'))
eq_('bar,bleh', d2.get('words'))
eq_('foo,bleh', d3.get('words'))
eq_(3, len(g2))
eq_(2, len([c for c in g2 if c.tag == 'file']))
eq_(1, len([c for c in g2 if c.tag == 'match']))
d1, d2 = [c for c in g2 if c.tag == 'file']
eq_(op.join('basepath','ibabtu'),d1.get('path'))
eq_(op.join('basepath','ibabtu'),d2.get('path'))
eq_('n',d1.get('is_ref'))
eq_('n',d2.get('is_ref'))
eq_('ibabtu',d1.get('words'))
eq_('ibabtu',d2.get('words'))
eq_(op.join('basepath', 'ibabtu'), d1.get('path'))
eq_(op.join('basepath', 'ibabtu'), d2.get('path'))
eq_('n', d1.get('is_ref'))
eq_('n', d2.get('is_ref'))
eq_('ibabtu', d1.get('words'))
eq_('ibabtu', d2.get('words'))
def test_LoadXML(self):
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
self.objects[0].is_ref = True
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
f = io.BytesIO()
@ -495,76 +493,76 @@ class TestCaseResultsXML:
f.seek(0)
app = DupeGuru()
r = Results(app)
r.load_from_xml(f,get_file)
eq_(2,len(r.groups))
g1,g2 = r.groups
eq_(3,len(g1))
r.load_from_xml(f, get_file)
eq_(2, len(r.groups))
g1, g2 = r.groups
eq_(3, len(g1))
assert g1[0].is_ref
assert not g1[1].is_ref
assert not g1[2].is_ref
assert g1[0] is self.objects[0]
assert g1[1] is self.objects[1]
assert g1[2] is self.objects[2]
eq_(['foo','bar'],g1[0].words)
eq_(['bar','bleh'],g1[1].words)
eq_(['foo','bleh'],g1[2].words)
eq_(2,len(g2))
eq_(['foo', 'bar'], g1[0].words)
eq_(['bar', 'bleh'], g1[1].words)
eq_(['foo', 'bleh'], g1[2].words)
eq_(2, len(g2))
assert not g2[0].is_ref
assert not g2[1].is_ref
assert g2[0] is self.objects[3]
assert g2[1] is self.objects[4]
eq_(['ibabtu'],g2[0].words)
eq_(['ibabtu'],g2[1].words)
eq_(['ibabtu'], g2[0].words)
eq_(['ibabtu'], g2[1].words)
def test_LoadXML_with_filename(self, tmpdir):
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
filename = str(tmpdir.join('dupeguru_results.xml'))
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
self.results.save_to_xml(filename)
app = DupeGuru()
r = Results(app)
r.load_from_xml(filename,get_file)
eq_(2,len(r.groups))
r.load_from_xml(filename, get_file)
eq_(2, len(r.groups))
def test_LoadXML_with_some_files_that_dont_exist_anymore(self):
def get_file(path):
if path.endswith('ibabtu 2'):
return None
return [f for f in self.objects if str(f.path) == path][0]
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
f = io.BytesIO()
self.results.save_to_xml(f)
f.seek(0)
app = DupeGuru()
r = Results(app)
r.load_from_xml(f,get_file)
eq_(1,len(r.groups))
eq_(3,len(r.groups[0]))
r.load_from_xml(f, get_file)
eq_(1, len(r.groups))
eq_(3, len(r.groups[0]))
def test_LoadXML_missing_attributes_and_bogus_elements(self):
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
root = ET.Element('foobar') #The root element shouldn't matter, really.
group_node = ET.SubElement(root, 'group')
dupe_node = ET.SubElement(group_node, 'file') #Perfectly correct file
dupe_node.set('path', op.join('basepath','foo bar'))
dupe_node.set('path', op.join('basepath', 'foo bar'))
dupe_node.set('is_ref', 'y')
dupe_node.set('words', 'foo,bar')
dupe_node.set('words', 'foo, bar')
dupe_node = ET.SubElement(group_node, 'file') #is_ref missing, default to 'n'
dupe_node.set('path',op.join('basepath','foo bleh'))
dupe_node.set('words','foo,bleh')
dupe_node.set('path', op.join('basepath', 'foo bleh'))
dupe_node.set('words', 'foo, bleh')
dupe_node = ET.SubElement(group_node, 'file') #words are missing, valid.
dupe_node.set('path',op.join('basepath','bar bleh'))
dupe_node.set('path', op.join('basepath', 'bar bleh'))
dupe_node = ET.SubElement(group_node, 'file') #path is missing, invalid.
dupe_node.set('words','foo,bleh')
dupe_node.set('words', 'foo, bleh')
dupe_node = ET.SubElement(group_node, 'foobar') #Invalid element name
dupe_node.set('path',op.join('basepath','bar bleh'))
dupe_node.set('is_ref','y')
dupe_node.set('words','bar,bleh')
dupe_node.set('path', op.join('basepath', 'bar bleh'))
dupe_node.set('is_ref', 'y')
dupe_node.set('words', 'bar, bleh')
match_node = ET.SubElement(group_node, 'match') # match pointing to a bad index
match_node.set('first', '42')
match_node.set('second', '45')
@ -582,21 +580,21 @@ class TestCaseResultsXML:
app = DupeGuru()
r = Results(app)
r.load_from_xml(f, get_file)
eq_(1,len(r.groups))
eq_(3,len(r.groups[0]))
eq_(1, len(r.groups))
eq_(3, len(r.groups[0]))
def test_xml_non_ascii(self):
def get_file(path):
if path == op.join('basepath','\xe9foo bar'):
if path == op.join('basepath', '\xe9foo bar'):
return objects[0]
if path == op.join('basepath','bar bleh'):
if path == op.join('basepath', 'bar bleh'):
return objects[1]
objects = [NamedObject("\xe9foo bar",True),NamedObject("bar bleh",True)]
objects = [NamedObject("\xe9foo bar", True), NamedObject("bar bleh", True)]
matches = engine.getmatches(objects) #we should have 5 matches
groups = engine.get_groups(matches) #We should have 2 groups
for g in groups:
g.prioritize(lambda x:objects.index(x)) #We want the dupes to be in the same order as the list is
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
app = DupeGuru()
results = Results(app)
results.groups = groups
@ -605,20 +603,20 @@ class TestCaseResultsXML:
f.seek(0)
app = DupeGuru()
r = Results(app)
r.load_from_xml(f,get_file)
r.load_from_xml(f, get_file)
g = r.groups[0]
eq_("\xe9foo bar",g[0].name)
eq_(['efoo','bar'],g[0].words)
eq_("\xe9foo bar", g[0].name)
eq_(['efoo', 'bar'], g[0].words)
def test_load_invalid_xml(self):
f = io.BytesIO()
f.write(b'<this is invalid')
f.seek(0)
app = DupeGuru()
r = Results(app)
r.load_from_xml(f,None)
eq_(0,len(r.groups))
r.load_from_xml(f, None)
eq_(0, len(r.groups))
def test_load_non_existant_xml(self):
app = DupeGuru()
r = Results(app)
@ -626,8 +624,8 @@ class TestCaseResultsXML:
r.load_from_xml('does_not_exist.xml', None)
except IOError:
self.fail()
eq_(0,len(r.groups))
eq_(0, len(r.groups))
def test_remember_match_percentage(self):
group = self.groups[0]
d1, d2, d3 = group
@ -652,7 +650,7 @@ class TestCaseResultsXML:
group.switch_ref(d2)
match = group.get_match_of(d3) #d2 - d3
eq_(46, match[2])
def test_save_and_load(self):
# previously, when reloading matches, they wouldn't be reloaded as namedtuples
f = io.BytesIO()
@ -660,17 +658,17 @@ class TestCaseResultsXML:
f.seek(0)
self.results.load_from_xml(f, self.get_file)
first(self.results.groups[0].matches).percentage
def test_apply_filter_works_on_paths(self):
# apply_filter() searches on the whole path, not just on the filename.
self.results.apply_filter('basepath')
eq_(len(self.results.groups), 2)
def test_save_xml_with_invalid_characters(self):
# Don't crash when saving files that have invalid xml characters in their path
self.objects[0].name = 'foo\x19'
self.results.save_to_xml(io.BytesIO()) # don't crash
class TestCaseResultsFilter:
def setup_method(self, method):
@ -679,40 +677,40 @@ class TestCaseResultsFilter:
self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups
self.results.apply_filter(r'foo')
def test_groups(self):
eq_(1, len(self.results.groups))
assert self.results.groups[0] is self.groups[0]
def test_dupes(self):
# There are 2 objects matching. The first one is ref. Only the 3rd one is supposed to be in dupes.
eq_(1, len(self.results.dupes))
assert self.results.dupes[0] is self.objects[2]
def test_cancel_filter(self):
self.results.apply_filter(None)
eq_(3, len(self.results.dupes))
eq_(2, len(self.results.groups))
def test_dupes_reconstructed_filtered(self):
# make_ref resets self.__dupes to None. When it's reconstructed, we want it filtered
dupe = self.results.dupes[0] #3rd object
self.results.make_ref(dupe)
eq_(1, len(self.results.dupes))
assert self.results.dupes[0] is self.objects[0]
def test_include_ref_dupes_in_filter(self):
# When only the ref of a group match the filter, include it in the group
self.results.apply_filter(None)
self.results.apply_filter(r'foo bar')
eq_(1, len(self.results.groups))
eq_(0, len(self.results.dupes))
def test_filters_build_on_one_another(self):
self.results.apply_filter(r'bar')
eq_(1, len(self.results.groups))
eq_(0, len(self.results.dupes))
def test_stat_line(self):
expected = '0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo'
eq_(expected, self.results.stat_line)
@ -722,7 +720,7 @@ class TestCaseResultsFilter:
self.results.apply_filter(None)
expected = '0 / 3 (0.00 B / 1.01 KB) duplicates marked.'
eq_(expected, self.results.stat_line)
def test_mark_count_is_filtered_as_well(self):
self.results.apply_filter(None)
# We don't want to perform mark_all() because we want the mark list to contain objects
@ -731,18 +729,18 @@ class TestCaseResultsFilter:
self.results.apply_filter(r'foo')
expected = '1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo'
eq_(expected, self.results.stat_line)
def test_mark_all_only_affects_filtered_items(self):
# When performing actions like mark_all() and mark_none in a filtered environment, only mark
# items that are actually in the filter.
self.results.mark_all()
self.results.apply_filter(None)
eq_(self.results.mark_count, 1)
def test_sort_groups(self):
self.results.apply_filter(None)
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
g1,g2 = self.groups
g1, g2 = self.groups
self.results.apply_filter('a') # Matches both group
self.results.sort_groups('size')
assert self.results.groups[0] is g2
@ -754,43 +752,43 @@ class TestCaseResultsFilter:
self.results.apply_filter('a')
assert self.results.groups[1] is g2
assert self.results.groups[0] is g1
def test_set_group(self):
#We want the new group to be filtered
self.objects, self.matches, self.groups = GetTestGroups()
self.results.groups = self.groups
eq_(1, len(self.results.groups))
assert self.results.groups[0] is self.groups[0]
def test_load_cancels_filter(self, tmpdir):
def get_file(path):
return [f for f in self.objects if str(f.path) == path][0]
filename = str(tmpdir.join('dupeguru_results.xml'))
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
self.results.save_to_xml(filename)
app = DupeGuru()
r = Results(app)
r.apply_filter('foo')
r.load_from_xml(filename,get_file)
eq_(2,len(r.groups))
r.load_from_xml(filename, get_file)
eq_(2, len(r.groups))
def test_remove_dupe(self):
self.results.remove_duplicates([self.results.dupes[0]])
self.results.apply_filter(None)
eq_(2,len(self.results.groups))
eq_(2,len(self.results.dupes))
eq_(2, len(self.results.groups))
eq_(2, len(self.results.dupes))
self.results.apply_filter('ibabtu')
self.results.remove_duplicates([self.results.dupes[0]])
self.results.apply_filter(None)
eq_(1,len(self.results.groups))
eq_(1,len(self.results.dupes))
eq_(1, len(self.results.groups))
eq_(1, len(self.results.dupes))
def test_filter_is_case_insensitive(self):
self.results.apply_filter(None)
self.results.apply_filter('FOO')
eq_(1, len(self.results.dupes))
def test_make_ref_on_filtered_out_doesnt_mess_stats(self):
# When filtered, a group containing filtered out dupes will display them as being reference.
# When calling make_ref on such a dupe, the total size and dupecount stats gets messed up
@ -804,7 +802,7 @@ class TestCaseResultsFilter:
self.results.apply_filter(None) # Now let's make sure our unfiltered results aren't fucked up
expected = '0 / 3 (0.00 B / 3.00 B) duplicates marked.'
eq_(expected, self.results.stat_line)
class TestCaseResultsRefFile:
def setup_method(self, method):
@ -814,8 +812,8 @@ class TestCaseResultsRefFile:
self.objects[0].is_ref = True
self.objects[1].is_ref = True
self.results.groups = self.groups
def test_stat_line(self):
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked.'
eq_(expected, self.results.stat_line)

View File

@ -88,10 +88,10 @@ def test_priorize(fake_fileexists):
f[3].is_ref = True
r = s.get_dupe_groups(f)
g1, g2 = r
assert f[1] in (g1.ref,g2.ref)
assert f[0] in (g1.dupes[0],g2.dupes[0])
assert f[3] in (g1.ref,g2.ref)
assert f[2] in (g1.dupes[0],g2.dupes[0])
assert f[1] in (g1.ref, g2.ref)
assert f[0] in (g1.dupes[0], g2.dupes[0])
assert f[3] in (g1.ref, g2.ref)
assert f[2] in (g1.dupes[0], g2.dupes[0])
def test_content_scan(fake_fileexists):
s = Scanner()
@ -135,7 +135,7 @@ def test_min_match_perc_doesnt_matter_for_content_scan(fake_fileexists):
def test_content_scan_doesnt_put_md5_in_words_at_the_end(fake_fileexists):
s = Scanner()
s.scan_type = ScanType.Contents
f = [no('foo'),no('bar')]
f = [no('foo'), no('bar')]
f[0].md5 = f[0].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
f[1].md5 = f[1].md5partial = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
r = s.get_dupe_groups(f)
@ -209,7 +209,7 @@ def test_tag_scan(fake_fileexists):
o1.title = 'The Air Near My Fingers'
o2.artist = 'The White Stripes'
o2.title = 'The Air Near My Fingers'
r = s.get_dupe_groups([o1,o2])
r = s.get_dupe_groups([o1, o2])
eq_(len(r), 1)
def test_tag_with_album_scan(fake_fileexists):
@ -228,7 +228,7 @@ def test_tag_with_album_scan(fake_fileexists):
o3.artist = 'The White Stripes'
o3.title = 'The Air Near My Fingers'
o3.album = 'foobar'
r = s.get_dupe_groups([o1,o2,o3])
r = s.get_dupe_groups([o1, o2, o3])
eq_(len(r), 1)
def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
@ -244,7 +244,7 @@ def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
o2.artist = 'The White Stripes - b'
o2.title = 'The Air Near My Fingers - b'
o2.album = 'Elephant - b'
r = s.get_dupe_groups([o1,o2])
r = s.get_dupe_groups([o1, o2])
eq_(len(r), 1)
def test_tag_scan_with_different_scanned(fake_fileexists):
@ -344,9 +344,9 @@ def test_ignore_list(fake_fileexists):
f2.path = Path('dir2/foobar')
f3.path = Path('dir3/foobar')
ignore_list = IgnoreList()
ignore_list.Ignore(str(f1.path),str(f2.path))
ignore_list.Ignore(str(f1.path),str(f3.path))
r = s.get_dupe_groups([f1,f2,f3], ignore_list=ignore_list)
ignore_list.Ignore(str(f1.path), str(f2.path))
ignore_list.Ignore(str(f1.path), str(f3.path))
r = s.get_dupe_groups([f1, f2, f3], ignore_list=ignore_list)
eq_(len(r), 1)
g = r[0]
eq_(len(g.dupes), 1)
@ -367,9 +367,9 @@ def test_ignore_list_checks_for_unicode(fake_fileexists):
f2.path = Path('foo2\u00e9')
f3.path = Path('foo3\u00e9')
ignore_list = IgnoreList()
ignore_list.Ignore(str(f1.path),str(f2.path))
ignore_list.Ignore(str(f1.path),str(f3.path))
r = s.get_dupe_groups([f1,f2,f3], ignore_list=ignore_list)
ignore_list.Ignore(str(f1.path), str(f2.path))
ignore_list.Ignore(str(f1.path), str(f3.path))
r = s.get_dupe_groups([f1, f2, f3], ignore_list=ignore_list)
eq_(len(r), 1)
g = r[0]
eq_(len(g.dupes), 1)
@ -398,7 +398,7 @@ def test_size_threshold(fake_fileexists):
f2 = no('foo', 2, path='p2')
f3 = no('foo', 3, path='p3')
s.size_threshold = 2
groups = s.get_dupe_groups([f1,f2,f3])
groups = s.get_dupe_groups([f1, f2, f3])
eq_(len(groups), 1)
[group] = groups
eq_(len(group), 2)
@ -471,9 +471,11 @@ def test_dont_group_files_that_dont_exist(tmpdir):
p['file1'].open('w').write('foo')
p['file2'].open('w').write('foo')
file1, file2 = fs.get_files(p)
def getmatches(*args, **kw):
file2.path.remove()
return [Match(file1, file2, 100)]
s._getmatches = getmatches
assert not s.get_dupe_groups([file1, file2])

View File

@ -1,17 +1,13 @@
# -*- coding: utf-8 -*-
# Created By: Virgil Dupras
# Created On: 2009-10-23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.path import Path
from core.engine import getwords
from core.tests.scanner_test import NamedObject, no
from ..scanner import *
from core.tests.scanner_test import no
from ..scanner import ScannerME
def pytest_funcarg__fake_fileexists(request):
# This is a hack to avoid invalidating all previous tests since the scanner started to test

View File

@ -1,9 +1,7 @@
# Created By: Virgil Dupras
# Created On: 2006/09/01
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# The commented out tests are tests for function that have been converted to pure C for speed
@ -11,26 +9,26 @@ from pytest import raises, skip
from hscommon.testutil import eq_
try:
from ..block import *
from ..block import avgdiff, getblocks2, NoBlocksError, DifferentBlockCountError
except ImportError:
skip("Can't import the block module, probably hasn't been compiled.")
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
return avgdiff(first, second, limit, min_iter)
BLACK = (0,0,0)
RED = (0xff,0,0)
GREEN = (0,0xff,0)
BLUE = (0,0,0xff)
BLACK = (0, 0, 0)
RED = (0xff, 0, 0)
GREEN = (0, 0xff, 0)
BLUE = (0, 0, 0xff)
class FakeImage:
def __init__(self, size, data):
self.size = size
self.data = data
def getdata(self):
return self.data
def crop(self, box):
pixels = []
for i in range(box[1], box[3]):
@ -40,276 +38,275 @@ class FakeImage:
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
def empty():
return FakeImage((0,0), [])
return FakeImage((0, 0), [])
def single_pixel(): #one red pixel
return FakeImage((1, 1), [(0xff,0,0)])
return FakeImage((1, 1), [(0xff, 0, 0)])
def four_pixels():
pixels = [RED,(0,0x80,0xff),(0x80,0,0),(0,0x40,0x80)]
pixels = [RED, (0, 0x80, 0xff), (0x80, 0, 0), (0, 0x40, 0x80)]
return FakeImage((2, 2), pixels)
class TestCasegetblock:
def test_single_pixel(self):
im = single_pixel()
[b] = getblocks2(im, 1)
eq_(RED,b)
eq_(RED, b)
def test_no_pixel(self):
im = empty()
eq_([], getblocks2(im, 1))
def test_four_pixels(self):
im = four_pixels()
[b] = getblocks2(im, 1)
meanred = (0xff + 0x80) // 4
meangreen = (0x80 + 0x40) // 4
meanblue = (0xff + 0x80) // 4
eq_((meanred,meangreen,meanblue),b)
eq_((meanred, meangreen, meanblue), b)
# class TCdiff(unittest.TestCase):
# def test_diff(self):
# b1 = (10, 20, 30)
# b2 = (1, 2, 3)
# eq_(9 + 18 + 27,diff(b1,b2))
#
# eq_(9 + 18 + 27, diff(b1, b2))
#
# def test_diff_negative(self):
# b1 = (10, 20, 30)
# b2 = (1, 2, 3)
# eq_(9 + 18 + 27,diff(b2,b1))
#
# eq_(9 + 18 + 27, diff(b2, b1))
#
# def test_diff_mixed_positive_and_negative(self):
# b1 = (1, 5, 10)
# b2 = (10, 1, 15)
# eq_(9 + 4 + 5,diff(b1,b2))
#
# eq_(9 + 4 + 5, diff(b1, b2))
#
# class TCgetblocks(unittest.TestCase):
# def test_empty_image(self):
# im = empty()
# blocks = getblocks(im,1)
# eq_(0,len(blocks))
#
# blocks = getblocks(im, 1)
# eq_(0, len(blocks))
#
# def test_one_block_image(self):
# im = four_pixels()
# blocks = getblocks2(im, 1)
# eq_(1,len(blocks))
# eq_(1, len(blocks))
# block = blocks[0]
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
#
# eq_((meanred, meangreen, meanblue), block)
#
# def test_not_enough_height_to_fit_a_block(self):
# im = FakeImage((2,1), [BLACK, BLACK])
# blocks = getblocks(im,2)
# eq_(0,len(blocks))
#
# im = FakeImage((2, 1), [BLACK, BLACK])
# blocks = getblocks(im, 2)
# eq_(0, len(blocks))
#
# def xtest_dont_include_leftovers(self):
# # this test is disabled because getblocks is not used and getblock in cdeffed
# pixels = [
# RED,(0,0x80,0xff),BLACK,
# (0x80,0,0),(0,0x40,0x80),BLACK,
# BLACK,BLACK,BLACK
# RED,(0, 0x80, 0xff), BLACK,
# (0x80, 0, 0),(0, 0x40, 0x80), BLACK,
# BLACK, BLACK, BLACK
# ]
# im = FakeImage((3,3), pixels)
# blocks = getblocks(im,2)
# im = FakeImage((3, 3), pixels)
# blocks = getblocks(im, 2)
# block = blocks[0]
# #Because the block is smaller than the image, only blocksize must be considered.
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
#
# eq_((meanred, meangreen, meanblue), block)
#
# def xtest_two_blocks(self):
# # this test is disabled because getblocks is not used and getblock in cdeffed
# pixels = [BLACK for i in xrange(4 * 2)]
# pixels[0] = RED
# pixels[1] = (0,0x80,0xff)
# pixels[4] = (0x80,0,0)
# pixels[5] = (0,0x40,0x80)
# pixels[1] = (0, 0x80, 0xff)
# pixels[4] = (0x80, 0, 0)
# pixels[5] = (0, 0x40, 0x80)
# im = FakeImage((4, 2), pixels)
# blocks = getblocks(im,2)
# eq_(2,len(blocks))
# blocks = getblocks(im, 2)
# eq_(2, len(blocks))
# block = blocks[0]
# #Because the block is smaller than the image, only blocksize must be considered.
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
# eq_(BLACK,blocks[1])
#
# eq_((meanred, meangreen, meanblue), block)
# eq_(BLACK, blocks[1])
#
# def test_four_blocks(self):
# pixels = [BLACK for i in xrange(4 * 4)]
# pixels[0] = RED
# pixels[1] = (0,0x80,0xff)
# pixels[4] = (0x80,0,0)
# pixels[5] = (0,0x40,0x80)
# pixels[1] = (0, 0x80, 0xff)
# pixels[4] = (0x80, 0, 0)
# pixels[5] = (0, 0x40, 0x80)
# im = FakeImage((4, 4), pixels)
# blocks = getblocks2(im, 2)
# eq_(4,len(blocks))
# eq_(4, len(blocks))
# block = blocks[0]
# #Because the block is smaller than the image, only blocksize must be considered.
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
# eq_(BLACK,blocks[1])
# eq_(BLACK,blocks[2])
# eq_(BLACK,blocks[3])
#
# eq_((meanred, meangreen, meanblue), block)
# eq_(BLACK, blocks[1])
# eq_(BLACK, blocks[2])
# eq_(BLACK, blocks[3])
#
class TestCasegetblocks2:
def test_empty_image(self):
im = empty()
blocks = getblocks2(im,1)
eq_(0,len(blocks))
blocks = getblocks2(im, 1)
eq_(0, len(blocks))
def test_one_block_image(self):
im = four_pixels()
blocks = getblocks2(im,1)
eq_(1,len(blocks))
blocks = getblocks2(im, 1)
eq_(1, len(blocks))
block = blocks[0]
meanred = (0xff + 0x80) // 4
meangreen = (0x80 + 0x40) // 4
meanblue = (0xff + 0x80) // 4
eq_((meanred,meangreen,meanblue),block)
eq_((meanred, meangreen, meanblue), block)
def test_four_blocks_all_black(self):
im = FakeImage((2, 2), [BLACK, BLACK, BLACK, BLACK])
blocks = getblocks2(im,2)
eq_(4,len(blocks))
blocks = getblocks2(im, 2)
eq_(4, len(blocks))
for block in blocks:
eq_(BLACK,block)
eq_(BLACK, block)
def test_two_pixels_image_horizontal(self):
pixels = [RED,BLUE]
pixels = [RED, BLUE]
im = FakeImage((2, 1), pixels)
blocks = getblocks2(im,2)
eq_(4,len(blocks))
eq_(RED,blocks[0])
eq_(BLUE,blocks[1])
eq_(RED,blocks[2])
eq_(BLUE,blocks[3])
blocks = getblocks2(im, 2)
eq_(4, len(blocks))
eq_(RED, blocks[0])
eq_(BLUE, blocks[1])
eq_(RED, blocks[2])
eq_(BLUE, blocks[3])
def test_two_pixels_image_vertical(self):
pixels = [RED,BLUE]
pixels = [RED, BLUE]
im = FakeImage((1, 2), pixels)
blocks = getblocks2(im,2)
eq_(4,len(blocks))
eq_(RED,blocks[0])
eq_(RED,blocks[1])
eq_(BLUE,blocks[2])
eq_(BLUE,blocks[3])
blocks = getblocks2(im, 2)
eq_(4, len(blocks))
eq_(RED, blocks[0])
eq_(RED, blocks[1])
eq_(BLUE, blocks[2])
eq_(BLUE, blocks[3])
class TestCaseavgdiff:
def test_empty(self):
with raises(NoBlocksError):
my_avgdiff([], [])
def test_two_blocks(self):
im = empty()
b1 = (5,10,15)
b2 = (255,250,245)
b3 = (0,0,0)
b4 = (255,0,255)
blocks1 = [b1,b2]
blocks2 = [b3,b4]
b1 = (5, 10, 15)
b2 = (255, 250, 245)
b3 = (0, 0, 0)
b4 = (255, 0, 255)
blocks1 = [b1, b2]
blocks2 = [b3, b4]
expected1 = 5 + 10 + 15
expected2 = 0 + 250 + 10
expected = (expected1 + expected2) // 2
eq_(expected, my_avgdiff(blocks1, blocks2))
def test_blocks_not_the_same_size(self):
b = (0,0,0)
b = (0, 0, 0)
with raises(DifferentBlockCountError):
my_avgdiff([b,b],[b])
my_avgdiff([b, b], [b])
def test_first_arg_is_empty_but_not_second(self):
#Don't return 0 (as when the 2 lists are empty), raise!
b = (0,0,0)
b = (0, 0, 0)
with raises(DifferentBlockCountError):
my_avgdiff([],[b])
my_avgdiff([], [b])
def test_limit(self):
ref = (0,0,0)
b1 = (10,10,10) #avg 30
b2 = (20,20,20) #avg 45
b3 = (30,30,30) #avg 60
blocks1 = [ref,ref,ref]
blocks2 = [b1,b2,b3]
eq_(45,my_avgdiff(blocks1,blocks2,44))
ref = (0, 0, 0)
b1 = (10, 10, 10) #avg 30
b2 = (20, 20, 20) #avg 45
b3 = (30, 30, 30) #avg 60
blocks1 = [ref, ref, ref]
blocks2 = [b1, b2, b3]
eq_(45, my_avgdiff(blocks1, blocks2, 44))
def test_min_iterations(self):
ref = (0,0,0)
b1 = (10,10,10) #avg 30
b2 = (20,20,20) #avg 45
b3 = (10,10,10) #avg 40
blocks1 = [ref,ref,ref]
blocks2 = [b1,b2,b3]
eq_(40,my_avgdiff(blocks1,blocks2,45 - 1,3))
ref = (0, 0, 0)
b1 = (10, 10, 10) #avg 30
b2 = (20, 20, 20) #avg 45
b3 = (10, 10, 10) #avg 40
blocks1 = [ref, ref, ref]
blocks2 = [b1, b2, b3]
eq_(40, my_avgdiff(blocks1, blocks2, 45 - 1, 3))
# Bah, I don't know why this test fails, but I don't think it matters very much
# def test_just_over_the_limit(self):
# #A score just over the limit might return exactly the limit due to truncating. We should
# #ceil() the result in this case.
# ref = (0,0,0)
# b1 = (10,0,0)
# b2 = (11,0,0)
# blocks1 = [ref,ref]
# blocks2 = [b1,b2]
# eq_(11,my_avgdiff(blocks1,blocks2,10))
#
# ref = (0, 0, 0)
# b1 = (10, 0, 0)
# b2 = (11, 0, 0)
# blocks1 = [ref, ref]
# blocks2 = [b1, b2]
# eq_(11, my_avgdiff(blocks1, blocks2, 10))
#
def test_return_at_least_1_at_the_slightest_difference(self):
ref = (0,0,0)
b1 = (1,0,0)
ref = (0, 0, 0)
b1 = (1, 0, 0)
blocks1 = [ref for i in range(250)]
blocks2 = [ref for i in range(250)]
blocks2[0] = b1
eq_(1,my_avgdiff(blocks1,blocks2))
eq_(1, my_avgdiff(blocks1, blocks2))
def test_return_0_if_there_is_no_difference(self):
ref = (0,0,0)
blocks1 = [ref,ref]
blocks2 = [ref,ref]
eq_(0,my_avgdiff(blocks1,blocks2))
ref = (0, 0, 0)
blocks1 = [ref, ref]
blocks2 = [ref, ref]
eq_(0, my_avgdiff(blocks1, blocks2))
# class TCmaxdiff(unittest.TestCase):
# def test_empty(self):
# self.assertRaises(NoBlocksError,maxdiff,[],[])
#
# self.assertRaises(NoBlocksError, maxdiff,[],[])
#
# def test_two_blocks(self):
# b1 = (5,10,15)
# b2 = (255,250,245)
# b3 = (0,0,0)
# b4 = (255,0,255)
# blocks1 = [b1,b2]
# blocks2 = [b3,b4]
# b1 = (5, 10, 15)
# b2 = (255, 250, 245)
# b3 = (0, 0, 0)
# b4 = (255, 0, 255)
# blocks1 = [b1, b2]
# blocks2 = [b3, b4]
# expected1 = 5 + 10 + 15
# expected2 = 0 + 250 + 10
# expected = max(expected1,expected2)
# eq_(expected,maxdiff(blocks1,blocks2))
#
# expected = max(expected1, expected2)
# eq_(expected, maxdiff(blocks1, blocks2))
#
# def test_blocks_not_the_same_size(self):
# b = (0,0,0)
# self.assertRaises(DifferentBlockCountError,maxdiff,[b,b],[b])
#
# b = (0, 0, 0)
# self.assertRaises(DifferentBlockCountError, maxdiff,[b, b],[b])
#
# def test_first_arg_is_empty_but_not_second(self):
# #Don't return 0 (as when the 2 lists are empty), raise!
# b = (0,0,0)
# self.assertRaises(DifferentBlockCountError,maxdiff,[],[b])
#
# b = (0, 0, 0)
# self.assertRaises(DifferentBlockCountError, maxdiff,[],[b])
#
# def test_limit(self):
# b1 = (5,10,15)
# b2 = (255,250,245)
# b3 = (0,0,0)
# b4 = (255,0,255)
# blocks1 = [b1,b2]
# blocks2 = [b3,b4]
# b1 = (5, 10, 15)
# b2 = (255, 250, 245)
# b3 = (0, 0, 0)
# b4 = (255, 0, 255)
# blocks1 = [b1, b2]
# blocks2 = [b3, b4]
# expected1 = 5 + 10 + 15
# expected2 = 0 + 250 + 10
# eq_(expected1,maxdiff(blocks1,blocks2,expected1 - 1))
#
# eq_(expected1, maxdiff(blocks1, blocks2, expected1 - 1))
#

View File

@ -1,9 +1,7 @@
# Created By: Virgil Dupras
# Created On: 2006/09/14
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import logging
@ -18,47 +16,47 @@ except ImportError:
class TestCasecolors_to_string:
def test_no_color(self):
eq_('',colors_to_string([]))
eq_('', colors_to_string([]))
def test_single_color(self):
eq_('000000',colors_to_string([(0,0,0)]))
eq_('010101',colors_to_string([(1,1,1)]))
eq_('0a141e',colors_to_string([(10,20,30)]))
eq_('000000', colors_to_string([(0, 0, 0)]))
eq_('010101', colors_to_string([(1, 1, 1)]))
eq_('0a141e', colors_to_string([(10, 20, 30)]))
def test_two_colors(self):
eq_('000102030405',colors_to_string([(0,1,2),(3,4,5)]))
eq_('000102030405', colors_to_string([(0, 1, 2), (3, 4, 5)]))
class TestCasestring_to_colors:
def test_empty(self):
eq_([],string_to_colors(''))
eq_([], string_to_colors(''))
def test_single_color(self):
eq_([(0,0,0)],string_to_colors('000000'))
eq_([(2,3,4)],string_to_colors('020304'))
eq_([(10,20,30)],string_to_colors('0a141e'))
eq_([(0, 0, 0)], string_to_colors('000000'))
eq_([(2, 3, 4)], string_to_colors('020304'))
eq_([(10, 20, 30)], string_to_colors('0a141e'))
def test_two_colors(self):
eq_([(10,20,30),(40,50,60)],string_to_colors('0a141e28323c'))
eq_([(10, 20, 30), (40, 50, 60)], string_to_colors('0a141e28323c'))
def test_incomplete_color(self):
# don't return anything if it's not a complete color
eq_([],string_to_colors('102'))
eq_([], string_to_colors('102'))
class TestCaseCache:
def test_empty(self):
c = Cache()
eq_(0,len(c))
eq_(0, len(c))
with raises(KeyError):
c['foo']
def test_set_then_retrieve_blocks(self):
c = Cache()
b = [(0,0,0),(1,2,3)]
b = [(0, 0, 0), (1, 2, 3)]
c['foo'] = b
eq_(b,c['foo'])
eq_(b, c['foo'])
def test_delitem(self):
c = Cache()
c['foo'] = ''
@ -66,37 +64,37 @@ class TestCaseCache:
assert 'foo' not in c
with raises(KeyError):
del c['foo']
def test_persistance(self, tmpdir):
DBNAME = tmpdir.join('hstest.db')
c = Cache(str(DBNAME))
c['foo'] = [(1,2,3)]
c['foo'] = [(1, 2, 3)]
del c
c = Cache(str(DBNAME))
eq_([(1,2,3)],c['foo'])
eq_([(1, 2, 3)], c['foo'])
def test_filter(self):
c = Cache()
c['foo'] = ''
c['bar'] = ''
c['baz'] = ''
c.filter(lambda p:p != 'bar') #only 'bar' is removed
eq_(2,len(c))
c.filter(lambda p: p != 'bar') #only 'bar' is removed
eq_(2, len(c))
assert 'foo' in c
assert 'baz' in c
assert 'bar' not in c
def test_clear(self):
c = Cache()
c['foo'] = ''
c['bar'] = ''
c['baz'] = ''
c.clear()
eq_(0,len(c))
eq_(0, len(c))
assert 'foo' not in c
assert 'baz' not in c
assert 'bar' not in c
def test_corrupted_db(self, tmpdir, monkeypatch):
# If we don't do this monkeypatching, we get a weird exception about trying to flush a
# closed file. I've tried setting logging level and stuff, but nothing worked. So, there we
@ -111,30 +109,30 @@ class TestCaseCache:
del c
c = Cache(dbname)
eq_(c['foo'], [(1, 2, 3)])
def test_by_id(self):
# it's possible to use the cache by referring to the files by their row_id
c = Cache()
b = [(0,0,0),(1,2,3)]
b = [(0, 0, 0), (1, 2, 3)]
c['foo'] = b
foo_id = c.get_id('foo')
eq_(c[foo_id], b)
class TestCaseCacheSQLEscape:
def test_contains(self):
c = Cache()
assert "foo'bar" not in c
def test_getitem(self):
c = Cache()
with raises(KeyError):
c["foo'bar"]
def test_setitem(self):
c = Cache()
c["foo'bar"] = []
def test_delitem(self):
c = Cache()
c["foo'bar"] = []
@ -142,4 +140,4 @@ class TestCaseCacheSQLEscape:
del c["foo'bar"]
except KeyError:
assert False

View File

@ -11,7 +11,7 @@ deps =
-r{toxinidir}/requirements-extra.txt
[flake8]
exclude = .tox,env,build,hscommon,qtlib,cocoalib,cocoa,help,./get-pip.py,./qt/dg_rc.py,./core*/tests,qt/run_template.py,cocoa/run_template.py,./run.py,./pkg
exclude = .tox,env,build,hscommon,qtlib,cocoalib,cocoa,help,./qt/dg_rc.py,qt/run_template.py,cocoa/run_template.py,./run.py,./pkg
max-line-length = 120
ignore = W391,W293,E302,E261,E226,E227,W291,E262,E303,E265,E731