mirror of
https://github.com/arsenetar/dupeguru.git
synced 2026-01-22 14:41:39 +00:00
Format files with black
- Format all files with black - Update tox.ini flake8 arguments to be compatible - Add black to requirements-extra.txt - Reduce ignored flake8 rules and fix a few violations
This commit is contained in:
@@ -17,6 +17,7 @@ from .. import engine
|
||||
from .base import NamedObject, GetTestGroups, DupeGuru
|
||||
from ..results import Results
|
||||
|
||||
|
||||
class TestCaseResultsEmpty:
|
||||
def setup_method(self, method):
|
||||
self.app = DupeGuru()
|
||||
@@ -24,8 +25,8 @@ class TestCaseResultsEmpty:
|
||||
|
||||
def test_apply_invalid_filter(self):
|
||||
# If the applied filter is an invalid regexp, just ignore the filter.
|
||||
self.results.apply_filter('[') # invalid
|
||||
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
|
||||
self.results.apply_filter("[") # invalid
|
||||
self.test_stat_line() # make sure that the stats line isn't saying we applied a '[' filter
|
||||
|
||||
def test_stat_line(self):
|
||||
eq_("0 / 0 (0.00 B / 0.00 B) duplicates marked.", self.results.stat_line)
|
||||
@@ -34,7 +35,7 @@ class TestCaseResultsEmpty:
|
||||
eq_(0, len(self.results.groups))
|
||||
|
||||
def test_get_group_of_duplicate(self):
|
||||
assert self.results.get_group_of_duplicate('foo') is None
|
||||
assert self.results.get_group_of_duplicate("foo") is None
|
||||
|
||||
def test_save_to_xml(self):
|
||||
f = io.BytesIO()
|
||||
@@ -42,7 +43,7 @@ class TestCaseResultsEmpty:
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
eq_('results', root.tag)
|
||||
eq_("results", root.tag)
|
||||
|
||||
def test_is_modified(self):
|
||||
assert not self.results.is_modified
|
||||
@@ -59,10 +60,10 @@ class TestCaseResultsEmpty:
|
||||
# would have been some kind of feedback to the user, but the work involved for something
|
||||
# that simply never happens (I never received a report of this crash, I experienced it
|
||||
# while fooling around) is too much. Instead, use standard name conflict resolution.
|
||||
folderpath = tmpdir.join('foo')
|
||||
folderpath = tmpdir.join("foo")
|
||||
folderpath.mkdir()
|
||||
self.results.save_to_xml(str(folderpath)) # no crash
|
||||
assert tmpdir.join('[000] foo').check()
|
||||
self.results.save_to_xml(str(folderpath)) # no crash
|
||||
assert tmpdir.join("[000] foo").check()
|
||||
|
||||
|
||||
class TestCaseResultsWithSomeGroups:
|
||||
@@ -116,18 +117,22 @@ class TestCaseResultsWithSomeGroups:
|
||||
assert d is g.ref
|
||||
|
||||
def test_sort_groups(self):
|
||||
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
||||
self.results.make_ref(
|
||||
self.objects[1]
|
||||
) # We want to make the 1024 sized object to go ref.
|
||||
g1, g2 = self.groups
|
||||
self.results.sort_groups('size')
|
||||
self.results.sort_groups("size")
|
||||
assert self.results.groups[0] is g2
|
||||
assert self.results.groups[1] is g1
|
||||
self.results.sort_groups('size', False)
|
||||
self.results.sort_groups("size", False)
|
||||
assert self.results.groups[0] is g1
|
||||
assert self.results.groups[1] is g2
|
||||
|
||||
def test_set_groups_when_sorted(self):
|
||||
self.results.make_ref(self.objects[1]) #We want to make the 1024 sized object to go ref.
|
||||
self.results.sort_groups('size')
|
||||
self.results.make_ref(
|
||||
self.objects[1]
|
||||
) # We want to make the 1024 sized object to go ref.
|
||||
self.results.sort_groups("size")
|
||||
objects, matches, groups = GetTestGroups()
|
||||
g1, g2 = groups
|
||||
g1.switch_ref(objects[1])
|
||||
@@ -158,9 +163,9 @@ class TestCaseResultsWithSomeGroups:
|
||||
o3.size = 3
|
||||
o4.size = 2
|
||||
o5.size = 1
|
||||
self.results.sort_dupes('size')
|
||||
self.results.sort_dupes("size")
|
||||
eq_([o5, o3, o2], self.results.dupes)
|
||||
self.results.sort_dupes('size', False)
|
||||
self.results.sort_dupes("size", False)
|
||||
eq_([o2, o3, o5], self.results.dupes)
|
||||
|
||||
def test_dupe_list_remember_sort(self):
|
||||
@@ -170,25 +175,25 @@ class TestCaseResultsWithSomeGroups:
|
||||
o3.size = 3
|
||||
o4.size = 2
|
||||
o5.size = 1
|
||||
self.results.sort_dupes('size')
|
||||
self.results.sort_dupes("size")
|
||||
self.results.make_ref(o2)
|
||||
eq_([o5, o3, o1], self.results.dupes)
|
||||
|
||||
def test_dupe_list_sort_delta_values(self):
|
||||
o1, o2, o3, o4, o5 = self.objects
|
||||
o1.size = 10
|
||||
o2.size = 2 #-8
|
||||
o3.size = 3 #-7
|
||||
o2.size = 2 # -8
|
||||
o3.size = 3 # -7
|
||||
o4.size = 20
|
||||
o5.size = 1 #-19
|
||||
self.results.sort_dupes('size', delta=True)
|
||||
o5.size = 1 # -19
|
||||
self.results.sort_dupes("size", delta=True)
|
||||
eq_([o5, o2, o3], self.results.dupes)
|
||||
|
||||
def test_sort_empty_list(self):
|
||||
#There was an infinite loop when sorting an empty list.
|
||||
# There was an infinite loop when sorting an empty list.
|
||||
app = DupeGuru()
|
||||
r = app.results
|
||||
r.sort_dupes('name')
|
||||
r.sort_dupes("name")
|
||||
eq_([], r.dupes)
|
||||
|
||||
def test_dupe_list_update_on_remove_duplicates(self):
|
||||
@@ -209,7 +214,7 @@ class TestCaseResultsWithSomeGroups:
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
assert not self.results.is_modified
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
f.seek(0)
|
||||
self.results.load_from_xml(f, get_file)
|
||||
assert not self.results.is_modified
|
||||
@@ -236,7 +241,7 @@ class TestCaseResultsWithSomeGroups:
|
||||
# "aaa" makes our dupe go first in alphabetical order, but since we have the same value as
|
||||
# ref, we're going last.
|
||||
g2r.name = g2d1.name = "aaa"
|
||||
self.results.sort_dupes('name', delta=True)
|
||||
self.results.sort_dupes("name", delta=True)
|
||||
eq_("aaa", self.results.dupes[2].name)
|
||||
|
||||
def test_dupe_list_sort_delta_values_nonnumeric_case_insensitive(self):
|
||||
@@ -244,9 +249,10 @@ class TestCaseResultsWithSomeGroups:
|
||||
g1r, g1d1, g1d2, g2r, g2d1 = self.objects
|
||||
g2r.name = "AaA"
|
||||
g2d1.name = "aAa"
|
||||
self.results.sort_dupes('name', delta=True)
|
||||
self.results.sort_dupes("name", delta=True)
|
||||
eq_("aAa", self.results.dupes[2].name)
|
||||
|
||||
|
||||
class TestCaseResultsWithSavedResults:
|
||||
def setup_method(self, method):
|
||||
self.app = DupeGuru()
|
||||
@@ -266,7 +272,7 @@ class TestCaseResultsWithSavedResults:
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
self.results.groups = self.groups # sets the flag back
|
||||
self.results.load_from_xml(self.f, get_file)
|
||||
assert not self.results.is_modified
|
||||
|
||||
@@ -299,7 +305,7 @@ class TestCaseResultsMarkings:
|
||||
self.results.mark(self.objects[2])
|
||||
self.results.mark(self.objects[4])
|
||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||
self.results.mark(self.objects[0]) #this is a ref, it can't be counted
|
||||
self.results.mark(self.objects[0]) # this is a ref, it can't be counted
|
||||
eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||
self.results.groups = self.groups
|
||||
eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
|
||||
@@ -335,7 +341,7 @@ class TestCaseResultsMarkings:
|
||||
def log_object(o):
|
||||
log.append(o)
|
||||
if o is self.objects[1]:
|
||||
raise EnvironmentError('foobar')
|
||||
raise EnvironmentError("foobar")
|
||||
|
||||
log = []
|
||||
self.results.mark_all()
|
||||
@@ -350,7 +356,7 @@ class TestCaseResultsMarkings:
|
||||
eq_(len(self.results.problems), 1)
|
||||
dupe, msg = self.results.problems[0]
|
||||
assert dupe is self.objects[1]
|
||||
eq_(msg, 'foobar')
|
||||
eq_(msg, "foobar")
|
||||
|
||||
def test_perform_on_marked_with_ref(self):
|
||||
def log_object(o):
|
||||
@@ -408,20 +414,20 @@ class TestCaseResultsMarkings:
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
g1, g2 = root.getiterator('group')
|
||||
d1, d2, d3 = g1.getiterator('file')
|
||||
eq_('n', d1.get('marked'))
|
||||
eq_('n', d2.get('marked'))
|
||||
eq_('y', d3.get('marked'))
|
||||
d1, d2 = g2.getiterator('file')
|
||||
eq_('n', d1.get('marked'))
|
||||
eq_('y', d2.get('marked'))
|
||||
g1, g2 = root.getiterator("group")
|
||||
d1, d2, d3 = g1.getiterator("file")
|
||||
eq_("n", d1.get("marked"))
|
||||
eq_("n", d2.get("marked"))
|
||||
eq_("y", d3.get("marked"))
|
||||
d1, d2 = g2.getiterator("file")
|
||||
eq_("n", d1.get("marked"))
|
||||
eq_("y", d2.get("marked"))
|
||||
|
||||
def test_LoadXML(self):
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
self.results.mark(self.objects[1])
|
||||
self.results.mark_invert()
|
||||
f = io.BytesIO()
|
||||
@@ -444,51 +450,51 @@ class TestCaseResultsXML:
|
||||
self.objects, self.matches, self.groups = GetTestGroups()
|
||||
self.results.groups = self.groups
|
||||
|
||||
def get_file(self, path): # use this as a callback for load_from_xml
|
||||
def get_file(self, path): # use this as a callback for load_from_xml
|
||||
return [o for o in self.objects if o.path == path][0]
|
||||
|
||||
def test_save_to_xml(self):
|
||||
self.objects[0].is_ref = True
|
||||
self.objects[0].words = [['foo', 'bar']]
|
||||
self.objects[0].words = [["foo", "bar"]]
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
f.seek(0)
|
||||
doc = ET.parse(f)
|
||||
root = doc.getroot()
|
||||
eq_('results', root.tag)
|
||||
eq_("results", root.tag)
|
||||
eq_(2, len(root))
|
||||
eq_(2, len([c for c in root if c.tag == 'group']))
|
||||
eq_(2, len([c for c in root if c.tag == "group"]))
|
||||
g1, g2 = root
|
||||
eq_(6, len(g1))
|
||||
eq_(3, len([c for c in g1 if c.tag == 'file']))
|
||||
eq_(3, len([c for c in g1 if c.tag == 'match']))
|
||||
d1, d2, d3 = [c for c in g1 if c.tag == 'file']
|
||||
eq_(op.join('basepath', 'foo bar'), d1.get('path'))
|
||||
eq_(op.join('basepath', 'bar bleh'), d2.get('path'))
|
||||
eq_(op.join('basepath', 'foo bleh'), d3.get('path'))
|
||||
eq_('y', d1.get('is_ref'))
|
||||
eq_('n', d2.get('is_ref'))
|
||||
eq_('n', d3.get('is_ref'))
|
||||
eq_('foo,bar', d1.get('words'))
|
||||
eq_('bar,bleh', d2.get('words'))
|
||||
eq_('foo,bleh', d3.get('words'))
|
||||
eq_(3, len([c for c in g1 if c.tag == "file"]))
|
||||
eq_(3, len([c for c in g1 if c.tag == "match"]))
|
||||
d1, d2, d3 = [c for c in g1 if c.tag == "file"]
|
||||
eq_(op.join("basepath", "foo bar"), d1.get("path"))
|
||||
eq_(op.join("basepath", "bar bleh"), d2.get("path"))
|
||||
eq_(op.join("basepath", "foo bleh"), d3.get("path"))
|
||||
eq_("y", d1.get("is_ref"))
|
||||
eq_("n", d2.get("is_ref"))
|
||||
eq_("n", d3.get("is_ref"))
|
||||
eq_("foo,bar", d1.get("words"))
|
||||
eq_("bar,bleh", d2.get("words"))
|
||||
eq_("foo,bleh", d3.get("words"))
|
||||
eq_(3, len(g2))
|
||||
eq_(2, len([c for c in g2 if c.tag == 'file']))
|
||||
eq_(1, len([c for c in g2 if c.tag == 'match']))
|
||||
d1, d2 = [c for c in g2 if c.tag == 'file']
|
||||
eq_(op.join('basepath', 'ibabtu'), d1.get('path'))
|
||||
eq_(op.join('basepath', 'ibabtu'), d2.get('path'))
|
||||
eq_('n', d1.get('is_ref'))
|
||||
eq_('n', d2.get('is_ref'))
|
||||
eq_('ibabtu', d1.get('words'))
|
||||
eq_('ibabtu', d2.get('words'))
|
||||
eq_(2, len([c for c in g2 if c.tag == "file"]))
|
||||
eq_(1, len([c for c in g2 if c.tag == "match"]))
|
||||
d1, d2 = [c for c in g2 if c.tag == "file"]
|
||||
eq_(op.join("basepath", "ibabtu"), d1.get("path"))
|
||||
eq_(op.join("basepath", "ibabtu"), d2.get("path"))
|
||||
eq_("n", d1.get("is_ref"))
|
||||
eq_("n", d2.get("is_ref"))
|
||||
eq_("ibabtu", d1.get("words"))
|
||||
eq_("ibabtu", d2.get("words"))
|
||||
|
||||
def test_LoadXML(self):
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.objects[0].is_ref = True
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
f.seek(0)
|
||||
@@ -504,23 +510,23 @@ class TestCaseResultsXML:
|
||||
assert g1[0] is self.objects[0]
|
||||
assert g1[1] is self.objects[1]
|
||||
assert g1[2] is self.objects[2]
|
||||
eq_(['foo', 'bar'], g1[0].words)
|
||||
eq_(['bar', 'bleh'], g1[1].words)
|
||||
eq_(['foo', 'bleh'], g1[2].words)
|
||||
eq_(["foo", "bar"], g1[0].words)
|
||||
eq_(["bar", "bleh"], g1[1].words)
|
||||
eq_(["foo", "bleh"], g1[2].words)
|
||||
eq_(2, len(g2))
|
||||
assert not g2[0].is_ref
|
||||
assert not g2[1].is_ref
|
||||
assert g2[0] is self.objects[3]
|
||||
assert g2[1] is self.objects[4]
|
||||
eq_(['ibabtu'], g2[0].words)
|
||||
eq_(['ibabtu'], g2[1].words)
|
||||
eq_(["ibabtu"], g2[0].words)
|
||||
eq_(["ibabtu"], g2[1].words)
|
||||
|
||||
def test_LoadXML_with_filename(self, tmpdir):
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
filename = str(tmpdir.join('dupeguru_results.xml'))
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
filename = str(tmpdir.join("dupeguru_results.xml"))
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
self.results.save_to_xml(filename)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
@@ -529,11 +535,11 @@ class TestCaseResultsXML:
|
||||
|
||||
def test_LoadXML_with_some_files_that_dont_exist_anymore(self):
|
||||
def get_file(path):
|
||||
if path.endswith('ibabtu 2'):
|
||||
if path.endswith("ibabtu 2"):
|
||||
return None
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
f = io.BytesIO()
|
||||
self.results.save_to_xml(f)
|
||||
f.seek(0)
|
||||
@@ -547,36 +553,36 @@ class TestCaseResultsXML:
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
root = ET.Element('foobar') #The root element shouldn't matter, really.
|
||||
group_node = ET.SubElement(root, 'group')
|
||||
dupe_node = ET.SubElement(group_node, 'file') #Perfectly correct file
|
||||
dupe_node.set('path', op.join('basepath', 'foo bar'))
|
||||
dupe_node.set('is_ref', 'y')
|
||||
dupe_node.set('words', 'foo, bar')
|
||||
dupe_node = ET.SubElement(group_node, 'file') #is_ref missing, default to 'n'
|
||||
dupe_node.set('path', op.join('basepath', 'foo bleh'))
|
||||
dupe_node.set('words', 'foo, bleh')
|
||||
dupe_node = ET.SubElement(group_node, 'file') #words are missing, valid.
|
||||
dupe_node.set('path', op.join('basepath', 'bar bleh'))
|
||||
dupe_node = ET.SubElement(group_node, 'file') #path is missing, invalid.
|
||||
dupe_node.set('words', 'foo, bleh')
|
||||
dupe_node = ET.SubElement(group_node, 'foobar') #Invalid element name
|
||||
dupe_node.set('path', op.join('basepath', 'bar bleh'))
|
||||
dupe_node.set('is_ref', 'y')
|
||||
dupe_node.set('words', 'bar, bleh')
|
||||
match_node = ET.SubElement(group_node, 'match') # match pointing to a bad index
|
||||
match_node.set('first', '42')
|
||||
match_node.set('second', '45')
|
||||
match_node = ET.SubElement(group_node, 'match') # match with missing attrs
|
||||
match_node = ET.SubElement(group_node, 'match') # match with non-int values
|
||||
match_node.set('first', 'foo')
|
||||
match_node.set('second', 'bar')
|
||||
match_node.set('percentage', 'baz')
|
||||
group_node = ET.SubElement(root, 'foobar') #invalid group
|
||||
group_node = ET.SubElement(root, 'group') #empty group
|
||||
root = ET.Element("foobar") # The root element shouldn't matter, really.
|
||||
group_node = ET.SubElement(root, "group")
|
||||
dupe_node = ET.SubElement(group_node, "file") # Perfectly correct file
|
||||
dupe_node.set("path", op.join("basepath", "foo bar"))
|
||||
dupe_node.set("is_ref", "y")
|
||||
dupe_node.set("words", "foo, bar")
|
||||
dupe_node = ET.SubElement(group_node, "file") # is_ref missing, default to 'n'
|
||||
dupe_node.set("path", op.join("basepath", "foo bleh"))
|
||||
dupe_node.set("words", "foo, bleh")
|
||||
dupe_node = ET.SubElement(group_node, "file") # words are missing, valid.
|
||||
dupe_node.set("path", op.join("basepath", "bar bleh"))
|
||||
dupe_node = ET.SubElement(group_node, "file") # path is missing, invalid.
|
||||
dupe_node.set("words", "foo, bleh")
|
||||
dupe_node = ET.SubElement(group_node, "foobar") # Invalid element name
|
||||
dupe_node.set("path", op.join("basepath", "bar bleh"))
|
||||
dupe_node.set("is_ref", "y")
|
||||
dupe_node.set("words", "bar, bleh")
|
||||
match_node = ET.SubElement(group_node, "match") # match pointing to a bad index
|
||||
match_node.set("first", "42")
|
||||
match_node.set("second", "45")
|
||||
match_node = ET.SubElement(group_node, "match") # match with missing attrs
|
||||
match_node = ET.SubElement(group_node, "match") # match with non-int values
|
||||
match_node.set("first", "foo")
|
||||
match_node.set("second", "bar")
|
||||
match_node.set("percentage", "baz")
|
||||
group_node = ET.SubElement(root, "foobar") # invalid group
|
||||
group_node = ET.SubElement(root, "group") # empty group
|
||||
f = io.BytesIO()
|
||||
tree = ET.ElementTree(root)
|
||||
tree.write(f, encoding='utf-8')
|
||||
tree.write(f, encoding="utf-8")
|
||||
f.seek(0)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
@@ -586,16 +592,18 @@ class TestCaseResultsXML:
|
||||
|
||||
def test_xml_non_ascii(self):
|
||||
def get_file(path):
|
||||
if path == op.join('basepath', '\xe9foo bar'):
|
||||
if path == op.join("basepath", "\xe9foo bar"):
|
||||
return objects[0]
|
||||
if path == op.join('basepath', 'bar bleh'):
|
||||
if path == op.join("basepath", "bar bleh"):
|
||||
return objects[1]
|
||||
|
||||
objects = [NamedObject("\xe9foo bar", True), NamedObject("bar bleh", True)]
|
||||
matches = engine.getmatches(objects) #we should have 5 matches
|
||||
groups = engine.get_groups(matches) #We should have 2 groups
|
||||
matches = engine.getmatches(objects) # we should have 5 matches
|
||||
groups = engine.get_groups(matches) # We should have 2 groups
|
||||
for g in groups:
|
||||
g.prioritize(lambda x: objects.index(x)) #We want the dupes to be in the same order as the list is
|
||||
g.prioritize(
|
||||
lambda x: objects.index(x)
|
||||
) # We want the dupes to be in the same order as the list is
|
||||
app = DupeGuru()
|
||||
results = Results(app)
|
||||
results.groups = groups
|
||||
@@ -607,11 +615,11 @@ class TestCaseResultsXML:
|
||||
r.load_from_xml(f, get_file)
|
||||
g = r.groups[0]
|
||||
eq_("\xe9foo bar", g[0].name)
|
||||
eq_(['efoo', 'bar'], g[0].words)
|
||||
eq_(["efoo", "bar"], g[0].words)
|
||||
|
||||
def test_load_invalid_xml(self):
|
||||
f = io.BytesIO()
|
||||
f.write(b'<this is invalid')
|
||||
f.write(b"<this is invalid")
|
||||
f.seek(0)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
@@ -623,7 +631,7 @@ class TestCaseResultsXML:
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
with raises(IOError):
|
||||
r.load_from_xml('does_not_exist.xml', None)
|
||||
r.load_from_xml("does_not_exist.xml", None)
|
||||
eq_(0, len(r.groups))
|
||||
|
||||
def test_remember_match_percentage(self):
|
||||
@@ -643,12 +651,12 @@ class TestCaseResultsXML:
|
||||
results.load_from_xml(f, self.get_file)
|
||||
group = results.groups[0]
|
||||
d1, d2, d3 = group
|
||||
match = group.get_match_of(d2) #d1 - d2
|
||||
match = group.get_match_of(d2) # d1 - d2
|
||||
eq_(42, match[2])
|
||||
match = group.get_match_of(d3) #d1 - d3
|
||||
match = group.get_match_of(d3) # d1 - d3
|
||||
eq_(43, match[2])
|
||||
group.switch_ref(d2)
|
||||
match = group.get_match_of(d3) #d2 - d3
|
||||
match = group.get_match_of(d3) # d2 - d3
|
||||
eq_(46, match[2])
|
||||
|
||||
def test_save_and_load(self):
|
||||
@@ -661,13 +669,13 @@ class TestCaseResultsXML:
|
||||
|
||||
def test_apply_filter_works_on_paths(self):
|
||||
# apply_filter() searches on the whole path, not just on the filename.
|
||||
self.results.apply_filter('basepath')
|
||||
self.results.apply_filter("basepath")
|
||||
eq_(len(self.results.groups), 2)
|
||||
|
||||
def test_save_xml_with_invalid_characters(self):
|
||||
# Don't crash when saving files that have invalid xml characters in their path
|
||||
self.objects[0].name = 'foo\x19'
|
||||
self.results.save_to_xml(io.BytesIO()) # don't crash
|
||||
self.objects[0].name = "foo\x19"
|
||||
self.results.save_to_xml(io.BytesIO()) # don't crash
|
||||
|
||||
|
||||
class TestCaseResultsFilter:
|
||||
@@ -676,7 +684,7 @@ class TestCaseResultsFilter:
|
||||
self.results = self.app.results
|
||||
self.objects, self.matches, self.groups = GetTestGroups()
|
||||
self.results.groups = self.groups
|
||||
self.results.apply_filter(r'foo')
|
||||
self.results.apply_filter(r"foo")
|
||||
|
||||
def test_groups(self):
|
||||
eq_(1, len(self.results.groups))
|
||||
@@ -694,7 +702,7 @@ class TestCaseResultsFilter:
|
||||
|
||||
def test_dupes_reconstructed_filtered(self):
|
||||
# make_ref resets self.__dupes to None. When it's reconstructed, we want it filtered
|
||||
dupe = self.results.dupes[0] #3rd object
|
||||
dupe = self.results.dupes[0] # 3rd object
|
||||
self.results.make_ref(dupe)
|
||||
eq_(1, len(self.results.dupes))
|
||||
assert self.results.dupes[0] is self.objects[0]
|
||||
@@ -702,23 +710,23 @@ class TestCaseResultsFilter:
|
||||
def test_include_ref_dupes_in_filter(self):
|
||||
# When only the ref of a group match the filter, include it in the group
|
||||
self.results.apply_filter(None)
|
||||
self.results.apply_filter(r'foo bar')
|
||||
self.results.apply_filter(r"foo bar")
|
||||
eq_(1, len(self.results.groups))
|
||||
eq_(0, len(self.results.dupes))
|
||||
|
||||
def test_filters_build_on_one_another(self):
|
||||
self.results.apply_filter(r'bar')
|
||||
self.results.apply_filter(r"bar")
|
||||
eq_(1, len(self.results.groups))
|
||||
eq_(0, len(self.results.dupes))
|
||||
|
||||
def test_stat_line(self):
|
||||
expected = '0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo'
|
||||
expected = "0 / 1 (0.00 B / 1.00 B) duplicates marked. filter: foo"
|
||||
eq_(expected, self.results.stat_line)
|
||||
self.results.apply_filter(r'bar')
|
||||
expected = '0 / 0 (0.00 B / 0.00 B) duplicates marked. filter: foo --> bar'
|
||||
self.results.apply_filter(r"bar")
|
||||
expected = "0 / 0 (0.00 B / 0.00 B) duplicates marked. filter: foo --> bar"
|
||||
eq_(expected, self.results.stat_line)
|
||||
self.results.apply_filter(None)
|
||||
expected = '0 / 3 (0.00 B / 1.01 KB) duplicates marked.'
|
||||
expected = "0 / 3 (0.00 B / 1.01 KB) duplicates marked."
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
def test_mark_count_is_filtered_as_well(self):
|
||||
@@ -726,8 +734,8 @@ class TestCaseResultsFilter:
|
||||
# We don't want to perform mark_all() because we want the mark list to contain objects
|
||||
for dupe in self.results.dupes:
|
||||
self.results.mark(dupe)
|
||||
self.results.apply_filter(r'foo')
|
||||
expected = '1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo'
|
||||
self.results.apply_filter(r"foo")
|
||||
expected = "1 / 1 (1.00 B / 1.00 B) duplicates marked. filter: foo"
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
def test_mark_all_only_affects_filtered_items(self):
|
||||
@@ -739,22 +747,22 @@ class TestCaseResultsFilter:
|
||||
|
||||
def test_sort_groups(self):
|
||||
self.results.apply_filter(None)
|
||||
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
|
||||
self.results.make_ref(self.objects[1]) # to have the 1024 b obkect as ref
|
||||
g1, g2 = self.groups
|
||||
self.results.apply_filter('a') # Matches both group
|
||||
self.results.sort_groups('size')
|
||||
self.results.apply_filter("a") # Matches both group
|
||||
self.results.sort_groups("size")
|
||||
assert self.results.groups[0] is g2
|
||||
assert self.results.groups[1] is g1
|
||||
self.results.apply_filter(None)
|
||||
assert self.results.groups[0] is g2
|
||||
assert self.results.groups[1] is g1
|
||||
self.results.sort_groups('size', False)
|
||||
self.results.apply_filter('a')
|
||||
self.results.sort_groups("size", False)
|
||||
self.results.apply_filter("a")
|
||||
assert self.results.groups[1] is g2
|
||||
assert self.results.groups[0] is g1
|
||||
|
||||
def test_set_group(self):
|
||||
#We want the new group to be filtered
|
||||
# We want the new group to be filtered
|
||||
self.objects, self.matches, self.groups = GetTestGroups()
|
||||
self.results.groups = self.groups
|
||||
eq_(1, len(self.results.groups))
|
||||
@@ -764,12 +772,12 @@ class TestCaseResultsFilter:
|
||||
def get_file(path):
|
||||
return [f for f in self.objects if str(f.path) == path][0]
|
||||
|
||||
filename = str(tmpdir.join('dupeguru_results.xml'))
|
||||
self.objects[4].name = 'ibabtu 2' #we can't have 2 files with the same path
|
||||
filename = str(tmpdir.join("dupeguru_results.xml"))
|
||||
self.objects[4].name = "ibabtu 2" # we can't have 2 files with the same path
|
||||
self.results.save_to_xml(filename)
|
||||
app = DupeGuru()
|
||||
r = Results(app)
|
||||
r.apply_filter('foo')
|
||||
r.apply_filter("foo")
|
||||
r.load_from_xml(filename, get_file)
|
||||
eq_(2, len(r.groups))
|
||||
|
||||
@@ -778,7 +786,7 @@ class TestCaseResultsFilter:
|
||||
self.results.apply_filter(None)
|
||||
eq_(2, len(self.results.groups))
|
||||
eq_(2, len(self.results.dupes))
|
||||
self.results.apply_filter('ibabtu')
|
||||
self.results.apply_filter("ibabtu")
|
||||
self.results.remove_duplicates([self.results.dupes[0]])
|
||||
self.results.apply_filter(None)
|
||||
eq_(1, len(self.results.groups))
|
||||
@@ -786,7 +794,7 @@ class TestCaseResultsFilter:
|
||||
|
||||
def test_filter_is_case_insensitive(self):
|
||||
self.results.apply_filter(None)
|
||||
self.results.apply_filter('FOO')
|
||||
self.results.apply_filter("FOO")
|
||||
eq_(1, len(self.results.dupes))
|
||||
|
||||
def test_make_ref_on_filtered_out_doesnt_mess_stats(self):
|
||||
@@ -794,13 +802,15 @@ class TestCaseResultsFilter:
|
||||
# When calling make_ref on such a dupe, the total size and dupecount stats gets messed up
|
||||
# because they are *not* counted in the stats in the first place.
|
||||
g1, g2 = self.groups
|
||||
bar_bleh = g1[1] # The "bar bleh" dupe is filtered out
|
||||
bar_bleh = g1[1] # The "bar bleh" dupe is filtered out
|
||||
self.results.make_ref(bar_bleh)
|
||||
# Now the stats should display *2* markable dupes (instead of 1)
|
||||
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked. filter: foo'
|
||||
expected = "0 / 2 (0.00 B / 2.00 B) duplicates marked. filter: foo"
|
||||
eq_(expected, self.results.stat_line)
|
||||
self.results.apply_filter(None) # Now let's make sure our unfiltered results aren't fucked up
|
||||
expected = '0 / 3 (0.00 B / 3.00 B) duplicates marked.'
|
||||
self.results.apply_filter(
|
||||
None
|
||||
) # Now let's make sure our unfiltered results aren't fucked up
|
||||
expected = "0 / 3 (0.00 B / 3.00 B) duplicates marked."
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
|
||||
@@ -814,6 +824,5 @@ class TestCaseResultsRefFile:
|
||||
self.results.groups = self.groups
|
||||
|
||||
def test_stat_line(self):
|
||||
expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked.'
|
||||
expected = "0 / 2 (0.00 B / 2.00 B) duplicates marked."
|
||||
eq_(expected, self.results.stat_line)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user