mirror of
https://github.com/arsenetar/dupeguru.git
synced 2025-05-07 17:29:50 +00:00
Integrated the jobprogress library into hscommon
I have a fix to make in it and it's really silly to pretend that this lib is of any use to anybody outside HS apps. Bringing it back here will make things more simple.
This commit is contained in:
parent
87c2fa2573
commit
ac32305532
5
build.py
5
build.py
@ -110,8 +110,9 @@ def build_cocoa(edition, dev):
|
||||
'me': ['core_me'] + appscript_pkgs + ['hsaudiotag'],
|
||||
'pe': ['core_pe'] + appscript_pkgs,
|
||||
}[edition]
|
||||
tocopy = ['core', 'hscommon', 'cocoa/inter', 'cocoalib/cocoa', 'jobprogress', 'objp',
|
||||
'send2trash'] + specific_packages
|
||||
tocopy = [
|
||||
'core', 'hscommon', 'cocoa/inter', 'cocoalib/cocoa', 'objp', 'send2trash'
|
||||
] + specific_packages
|
||||
copy_packages(tocopy, pydep_folder, create_links=dev)
|
||||
sys.path.insert(0, 'build')
|
||||
extra_deps = None
|
||||
|
@ -113,5 +113,6 @@ def patch_threaded_job_performer():
|
||||
# _async_run, under cocoa, has to be run within an autorelease pool to prevent leaks.
|
||||
# You only need this patch is you use one of CocoaProxy's function (which allocate objc
|
||||
# structures) inside a threaded job.
|
||||
from jobprogress.performer import ThreadedJobPerformer
|
||||
from hscommon.jobprogress.performer import ThreadedJobPerformer
|
||||
ThreadedJobPerformer._async_run = autoreleasepool(ThreadedJobPerformer._async_run)
|
||||
|
||||
|
@ -15,7 +15,7 @@ import time
|
||||
import shutil
|
||||
|
||||
from send2trash import send2trash
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
from hscommon.notify import Broadcaster
|
||||
from hscommon.path import Path
|
||||
from hscommon.conflict import smart_move, smart_copy
|
||||
|
@ -9,7 +9,7 @@
|
||||
from xml.etree import ElementTree as ET
|
||||
import logging
|
||||
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
from hscommon.path import Path
|
||||
from hscommon.util import FileOrPath
|
||||
|
||||
|
@ -15,7 +15,7 @@ from unicodedata import normalize
|
||||
|
||||
from hscommon.util import flatten, multi_replace
|
||||
from hscommon.trans import tr
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
|
||||
(WEIGHT_WORDS,
|
||||
MATCH_SIMILAR_WORDS,
|
||||
|
@ -12,7 +12,7 @@ import os
|
||||
import os.path as op
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from jobprogress.job import nulljob
|
||||
from hscommon.jobprogress.job import nulljob
|
||||
from hscommon.conflict import get_conflicted_name
|
||||
from hscommon.util import flatten, nonone, FileOrPath, format_size
|
||||
from hscommon.trans import tr
|
||||
|
@ -10,7 +10,7 @@ import logging
|
||||
import re
|
||||
import os.path as op
|
||||
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
from hscommon.util import dedupe, rem_file_ext, get_file_ext
|
||||
from hscommon.trans import tr
|
||||
|
||||
|
@ -15,7 +15,7 @@ from hscommon.path import Path
|
||||
import hscommon.conflict
|
||||
import hscommon.util
|
||||
from hscommon.testutil import CallLogger, eq_, log_calls
|
||||
from jobprogress.job import Job
|
||||
from hscommon.jobprogress.job import Job
|
||||
|
||||
from .base import DupeGuru, TestApp
|
||||
from .results_test import GetTestGroups
|
||||
|
@ -10,7 +10,7 @@ from hscommon.testutil import TestApp as TestAppBase, eq_, with_app
|
||||
from hscommon.path import Path
|
||||
from hscommon.util import get_file_ext, format_size
|
||||
from hscommon.gui.column import Column
|
||||
from jobprogress.job import nulljob, JobCancelled
|
||||
from hscommon.jobprogress.job import nulljob, JobCancelled
|
||||
|
||||
from .. import engine
|
||||
from .. import prioritize
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
import sys
|
||||
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
from hscommon.util import first
|
||||
from hscommon.testutil import eq_, log_calls
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.hardcoded.net/licenses/bsd_license
|
||||
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
from hscommon.path import Path
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
|
@ -12,7 +12,7 @@ from itertools import combinations
|
||||
|
||||
from hscommon.util import extract
|
||||
from hscommon.trans import tr
|
||||
from jobprogress import job
|
||||
from hscommon.jobprogress import job
|
||||
|
||||
from core.engine import Match
|
||||
from .block import avgdiff, DifferentBlockCountError, NoBlocksError
|
||||
@ -190,7 +190,12 @@ def getmatches(pictures, cache_path, threshold=75, match_scaled=False, j=job.nul
|
||||
pool.close()
|
||||
|
||||
result = []
|
||||
for ref_id, other_id, percentage in j.iter_with_progress(matches, tr("Verified %d/%d matches"), every=10):
|
||||
myiter = j.iter_with_progress(
|
||||
matches,
|
||||
tr("Verified %d/%d matches"),
|
||||
every=10
|
||||
)
|
||||
for ref_id, other_id, percentage in myiter:
|
||||
ref = id2picture[ref_id]
|
||||
other = id2picture[other_id]
|
||||
if percentage == 100 and ref.md5 != other.md5:
|
||||
@ -202,3 +207,4 @@ def getmatches(pictures, cache_path, threshold=75, match_scaled=False, j=job.nul
|
||||
return result
|
||||
|
||||
multiprocessing.freeze_support()
|
||||
|
||||
|
0
hscommon/__init__.py
Executable file → Normal file
0
hscommon/__init__.py
Executable file → Normal file
@ -5,8 +5,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.hardcoded.net/licenses/bsd_license
|
||||
|
||||
from jobprogress.performer import ThreadedJobPerformer
|
||||
|
||||
from ..jobprogress.performer import ThreadedJobPerformer
|
||||
from .base import GUIObject
|
||||
from .text_field import TextField
|
||||
|
||||
@ -41,7 +40,7 @@ class ProgressWindowView:
|
||||
class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
"""Cross-toolkit GUI-enabled progress window.
|
||||
|
||||
This class allows you to run a long running, `job enabled`_ function in a separate thread and
|
||||
This class allows you to run a long running, job enabled function in a separate thread and
|
||||
allow the user to follow its progress with a progress dialog.
|
||||
|
||||
To use it, you start your long-running job with :meth:`run` and then have your UI layer
|
||||
@ -49,13 +48,11 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
:meth:`pulse` in the main thread because GUI toolkit usually only support calling UI-related
|
||||
functions from the main thread.
|
||||
|
||||
We subclass :class:`.GUIObject` and ``ThreadedJobPerformer`` (from the ``jobprogress`` library).
|
||||
We subclass :class:`.GUIObject` and :class:`ThreadedJobPerformer`.
|
||||
Expected view: :class:`ProgressWindowView`.
|
||||
|
||||
:param finishfunc: A function ``f(jobid)`` that is called when a job is completed. ``jobid`` is
|
||||
an arbitrary id passed to :meth:`run`.
|
||||
|
||||
.. _job enabled: https://pypi.python.org/pypi/jobprogress
|
||||
"""
|
||||
def __init__(self, finish_func):
|
||||
# finish_func(jobid) is the function that is called when a job is completed.
|
||||
@ -105,8 +102,8 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
def run(self, jobid, title, target, args=()):
|
||||
"""Starts a threaded job.
|
||||
|
||||
The ``target`` function will be sent, as its first argument, a ``Job`` instance (from the
|
||||
``jobprogress`` library) which it can use to report on its progress.
|
||||
The ``target`` function will be sent, as its first argument, a :class:`Job` instance which
|
||||
it can use to report on its progress.
|
||||
|
||||
:param jobid: Arbitrary identifier which will be passed to ``finish_func()`` at the end.
|
||||
:param title: A title for the task you're starting.
|
||||
|
0
hscommon/jobprogress/__init__.py
Normal file
0
hscommon/jobprogress/__init__.py
Normal file
160
hscommon/jobprogress/job.py
Normal file
160
hscommon/jobprogress/job.py
Normal file
@ -0,0 +1,160 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2004/12/20
|
||||
# Copyright 2011 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.hardcoded.net/licenses/bsd_license
|
||||
|
||||
class JobCancelled(Exception):
|
||||
"The user has cancelled the job"
|
||||
|
||||
class JobInProgressError(Exception):
|
||||
"A job is already being performed, you can't perform more than one at the same time."
|
||||
|
||||
class JobCountError(Exception):
|
||||
"The number of jobs started have exceeded the number of jobs allowed"
|
||||
|
||||
class Job:
|
||||
"""Manages a job's progression and return it's progression through a callback.
|
||||
|
||||
Note that this class is not foolproof. For example, you could call
|
||||
start_subjob, and then call add_progress from the parent job, and nothing
|
||||
would stop you from doing it. However, it would mess your progression
|
||||
because it is the sub job that is supposed to drive the progression.
|
||||
Another example would be to start a subjob, then start another, and call
|
||||
add_progress from the old subjob. Once again, it would mess your progression.
|
||||
There are no stops because it would remove the lightweight aspect of the
|
||||
class (A Job would need to have a Parent instead of just a callback,
|
||||
and the parent could be None. A lot of checks for nothing.).
|
||||
Another one is that nothing stops you from calling add_progress right after
|
||||
SkipJob.
|
||||
"""
|
||||
#---Magic functions
|
||||
def __init__(self, job_proportions, callback):
|
||||
"""Initialize the Job with 'jobcount' jobs. Start every job with
|
||||
start_job(). Every time the job progress is updated, 'callback' is called
|
||||
'callback' takes a 'progress' int param, and a optional 'desc'
|
||||
parameter. Callback must return false if the job must be cancelled.
|
||||
"""
|
||||
if not hasattr(callback, '__call__'):
|
||||
raise TypeError("'callback' MUST be set when creating a Job")
|
||||
if isinstance(job_proportions, int):
|
||||
job_proportions = [1] * job_proportions
|
||||
self._job_proportions = list(job_proportions)
|
||||
self._jobcount = sum(job_proportions)
|
||||
self._callback = callback
|
||||
self._current_job = 0
|
||||
self._passed_jobs = 0
|
||||
self._progress = 0
|
||||
self._currmax = 1
|
||||
|
||||
#---Private
|
||||
def _subjob_callback(self, progress, desc=''):
|
||||
"""This is the callback passed to children jobs.
|
||||
"""
|
||||
self.set_progress(progress, desc)
|
||||
return True #if JobCancelled has to be raised, it will be at the highest level
|
||||
|
||||
def _do_update(self, desc):
|
||||
"""Calls the callback function with a % progress as a parameter.
|
||||
|
||||
The parameter is a int in the 0-100 range.
|
||||
"""
|
||||
if self._current_job:
|
||||
passed_progress = self._passed_jobs * self._currmax
|
||||
current_progress = self._current_job * self._progress
|
||||
total_progress = self._jobcount * self._currmax
|
||||
progress = ((passed_progress + current_progress) * 100) // total_progress
|
||||
else:
|
||||
progress = -1 # indeterminate
|
||||
# It's possible that callback doesn't support a desc arg
|
||||
result = self._callback(progress, desc) if desc else self._callback(progress)
|
||||
if not result:
|
||||
raise JobCancelled()
|
||||
|
||||
#---Public
|
||||
def add_progress(self, progress=1, desc=''):
|
||||
self.set_progress(self._progress + progress, desc)
|
||||
|
||||
def check_if_cancelled(self):
|
||||
self._do_update('')
|
||||
|
||||
def iter_with_progress(self, sequence, desc_format=None, every=1):
|
||||
''' Iterate through sequence while automatically adding progress.
|
||||
'''
|
||||
desc = ''
|
||||
if desc_format:
|
||||
desc = desc_format % (0, len(sequence))
|
||||
self.start_job(len(sequence), desc)
|
||||
for i, element in enumerate(sequence, start=1):
|
||||
yield element
|
||||
if i % every == 0:
|
||||
if desc_format:
|
||||
desc = desc_format % (i, len(sequence))
|
||||
self.add_progress(progress=every, desc=desc)
|
||||
if desc_format:
|
||||
desc = desc_format % (len(sequence), len(sequence))
|
||||
self.set_progress(100, desc)
|
||||
|
||||
def start_job(self, max_progress=100, desc=''):
|
||||
"""Begin work on the next job. You must not call start_job more than
|
||||
'jobcount' (in __init__) times.
|
||||
'max' is the job units you are to perform.
|
||||
'desc' is the description of the job.
|
||||
"""
|
||||
self._passed_jobs += self._current_job
|
||||
try:
|
||||
self._current_job = self._job_proportions.pop(0)
|
||||
except IndexError:
|
||||
raise JobCountError()
|
||||
self._progress = 0
|
||||
self._currmax = max(1, max_progress)
|
||||
self._do_update(desc)
|
||||
|
||||
def start_subjob(self, job_proportions, desc=''):
|
||||
"""Starts a sub job. Use this when you want to split a job into
|
||||
multiple smaller jobs. Pretty handy when starting a process where you
|
||||
know how many subjobs you will have, but don't know the work unit count
|
||||
for every of them.
|
||||
returns the Job object
|
||||
"""
|
||||
self.start_job(100, desc)
|
||||
return Job(job_proportions, self._subjob_callback)
|
||||
|
||||
def set_progress(self, progress, desc=''):
|
||||
"""Sets the progress of the current job to 'progress', and call the
|
||||
callback
|
||||
"""
|
||||
self._progress = progress
|
||||
if self._progress > self._currmax:
|
||||
self._progress = self._currmax
|
||||
if self._progress < 0:
|
||||
self._progress = 0
|
||||
self._do_update(desc)
|
||||
|
||||
|
||||
class NullJob:
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def add_progress(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def check_if_cancelled(self):
|
||||
pass
|
||||
|
||||
def iter_with_progress(self, sequence, *args, **kwargs):
|
||||
return iter(sequence)
|
||||
|
||||
def start_job(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def start_subjob(self, *args, **kwargs):
|
||||
return NullJob()
|
||||
|
||||
def set_progress(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
nulljob = NullJob()
|
72
hscommon/jobprogress/performer.py
Normal file
72
hscommon/jobprogress/performer.py
Normal file
@ -0,0 +1,72 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2010-11-19
|
||||
# Copyright 2011 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.hardcoded.net/licenses/bsd_license
|
||||
|
||||
from threading import Thread
|
||||
import sys
|
||||
|
||||
from .job import Job, JobInProgressError, JobCancelled
|
||||
|
||||
class ThreadedJobPerformer:
|
||||
"""Run threaded jobs and track progress.
|
||||
|
||||
To run a threaded job, first create a job with _create_job(), then call _run_threaded(), with
|
||||
your work function as a parameter.
|
||||
|
||||
Example:
|
||||
|
||||
j = self._create_job()
|
||||
self._run_threaded(self.some_work_func, (arg1, arg2, j))
|
||||
"""
|
||||
_job_running = False
|
||||
last_error = None
|
||||
|
||||
#--- Protected
|
||||
def create_job(self):
|
||||
if self._job_running:
|
||||
raise JobInProgressError()
|
||||
self.last_progress = -1
|
||||
self.last_desc = ''
|
||||
self.job_cancelled = False
|
||||
return Job(1, self._update_progress)
|
||||
|
||||
def _async_run(self, *args):
|
||||
target = args[0]
|
||||
args = tuple(args[1:])
|
||||
self._job_running = True
|
||||
self.last_error = None
|
||||
try:
|
||||
target(*args)
|
||||
except JobCancelled:
|
||||
pass
|
||||
except Exception as e:
|
||||
self.last_error = e
|
||||
self.last_traceback = sys.exc_info()[2]
|
||||
finally:
|
||||
self._job_running = False
|
||||
self.last_progress = None
|
||||
|
||||
def reraise_if_error(self):
|
||||
"""Reraises the error that happened in the thread if any.
|
||||
|
||||
Call this after the caller of run_threaded detected that self._job_running returned to False
|
||||
"""
|
||||
if self.last_error is not None:
|
||||
raise self.last_error.with_traceback(self.last_traceback)
|
||||
|
||||
def _update_progress(self, newprogress, newdesc=''):
|
||||
self.last_progress = newprogress
|
||||
if newdesc:
|
||||
self.last_desc = newdesc
|
||||
return not self.job_cancelled
|
||||
|
||||
def run_threaded(self, target, args=()):
|
||||
if self._job_running:
|
||||
raise JobInProgressError()
|
||||
args = (target, ) + args
|
||||
Thread(target=self._async_run, args=args).start()
|
||||
|
52
hscommon/jobprogress/qt.py
Normal file
52
hscommon/jobprogress/qt.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2009-09-14
|
||||
# Copyright 2011 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.hardcoded.net/licenses/bsd_license
|
||||
|
||||
from PyQt4.QtCore import pyqtSignal, Qt, QTimer
|
||||
from PyQt4.QtGui import QProgressDialog
|
||||
|
||||
from . import job, performer
|
||||
|
||||
class Progress(QProgressDialog, performer.ThreadedJobPerformer):
|
||||
finished = pyqtSignal(['QString'])
|
||||
|
||||
def __init__(self, parent):
|
||||
flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint
|
||||
QProgressDialog.__init__(self, '', "Cancel", 0, 100, parent, flags)
|
||||
self.setModal(True)
|
||||
self.setAutoReset(False)
|
||||
self.setAutoClose(False)
|
||||
self._timer = QTimer()
|
||||
self._jobid = ''
|
||||
self._timer.timeout.connect(self.updateProgress)
|
||||
|
||||
def updateProgress(self):
|
||||
# the values might change before setValue happens
|
||||
last_progress = self.last_progress
|
||||
last_desc = self.last_desc
|
||||
if not self._job_running or last_progress is None:
|
||||
self._timer.stop()
|
||||
self.close()
|
||||
if not self.job_cancelled:
|
||||
self.finished.emit(self._jobid)
|
||||
return
|
||||
if self.wasCanceled():
|
||||
self.job_cancelled = True
|
||||
return
|
||||
if last_desc:
|
||||
self.setLabelText(last_desc)
|
||||
self.setValue(last_progress)
|
||||
|
||||
def run(self, jobid, title, target, args=()):
|
||||
self._jobid = jobid
|
||||
self.reset()
|
||||
self.setLabelText('')
|
||||
self.run_threaded(target, args)
|
||||
self.setWindowTitle(title)
|
||||
self.show()
|
||||
self._timer.start(500)
|
||||
|
0
hscommon/path.py
Executable file → Normal file
0
hscommon/path.py
Executable file → Normal file
@ -129,7 +129,7 @@ def package_debian_distribution(edition, distribution):
|
||||
ed = lambda s: s.format(edition)
|
||||
destpath = op.join('build', 'dupeguru-{0}-{1}'.format(edition, version))
|
||||
srcpath = op.join(destpath, 'src')
|
||||
packages = ['hscommon', 'core', ed('core_{0}'), 'qtlib', 'qt', 'send2trash', 'jobprogress']
|
||||
packages = ['hscommon', 'core', ed('core_{0}'), 'qtlib', 'qt', 'send2trash']
|
||||
if edition == 'me':
|
||||
packages.append('hsaudiotag')
|
||||
copy_files_to_package(srcpath, packages, with_so=False)
|
||||
@ -171,7 +171,7 @@ def package_arch(edition):
|
||||
print("Packaging for Arch")
|
||||
ed = lambda s: s.format(edition)
|
||||
srcpath = op.join('build', ed('dupeguru-{}-arch'))
|
||||
packages = ['hscommon', 'core', ed('core_{0}'), 'qtlib', 'qt', 'send2trash', 'jobprogress']
|
||||
packages = ['hscommon', 'core', ed('core_{0}'), 'qtlib', 'qt', 'send2trash']
|
||||
if edition == 'me':
|
||||
packages.append('hsaudiotag')
|
||||
copy_files_to_package(srcpath, packages, with_so=True)
|
||||
|
@ -1,4 +1,3 @@
|
||||
jobprogress>=1.0.4
|
||||
Send2Trash>=1.3.0
|
||||
sphinx>=1.2.2
|
||||
polib>=1.0.4
|
||||
|
Loading…
x
Reference in New Issue
Block a user