mirror of
https://github.com/arsenetar/dupeguru.git
synced 2026-02-01 19:11:38 +00:00
Compare commits
102 Commits
143147cb8e
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 16aa6c21ff | |||
|
|
c32a1246b5 | ||
| 7f61330dda | |||
|
eb4d791434
|
|||
|
e9948d9b3f
|
|||
|
|
c9aa6f1b7a | ||
|
|
8f197ea7e1 | ||
|
3a97ba941a
|
|||
|
e3bcf9d686
|
|||
|
a81069be61
|
|||
|
08154815d0
|
|||
|
a95a9db08b
|
|||
|
3d866cec9a
|
|||
|
253dfd897c
|
|||
|
6e87f53f91
|
|||
|
95e04c4d82
|
|||
|
e3a612a704
|
|||
|
53d5ac06bf
|
|||
|
13dd00c798
|
|||
|
|
9f22835f73 | ||
|
|
85a4557525 | ||
| 70d956b4f8 | |||
|
|
007404f46a | ||
| 4385b50825 | |||
| 4ef1d24351 | |||
| 03be82c0b0 | |||
|
|
332b814c00 | ||
|
|
f56bef67e1 | ||
|
|
8160fe4fcc | ||
| 9ad84ade29 | |||
|
18f32fda19
|
|||
|
99ec4e0f27
|
|||
|
|
fe0e4bef91 | ||
| 322d29a996 | |||
|
c5a71f61b8
|
|||
|
10405ad063
|
|||
|
a257dbf0d5
|
|||
|
|
7a4506ece3 | ||
|
aade6593ac
|
|||
|
6d8b86b7eb
|
|||
| e41c91623c | |||
|
46521c8af1
|
|||
|
549eb7f153
|
|||
|
8125e3ec97
|
|||
|
8c5e18b980
|
|||
|
d81759f77f
|
|||
|
c57042fdd2
|
|||
|
057be0294a
|
|||
|
81daddd072
|
|||
| 1e651a1603 | |||
|
78f4145910
|
|||
|
46d1afb566
|
|||
| a5e31f15f0 | |||
|
0cf6c9a1a2
|
|||
|
6db2fa2be6
|
|||
|
2dd2a801cc
|
|||
|
83f5e80427
|
|||
|
091cae0cc6
|
|||
|
e30a135451
|
|||
| 1db93fd142 | |||
| 48862b6414 | |||
|
|
c920412856 | ||
|
4448b999ab
|
|||
| af1ae33598 | |||
| 265d10b261 | |||
|
|
f1153c85c0 | ||
|
|
1eee3fd7e4 | ||
|
|
1827827fdf | ||
|
|
db174d4e63 | ||
|
1f1dfa88dc
|
|||
|
916c5204cf
|
|||
|
71af825b37
|
|||
|
97f490b8b7
|
|||
|
d369bcddd7
|
|||
|
360dceca7b
|
|||
|
92b27801c3
|
|||
|
|
b9aabb8545 | ||
|
d5eeab4a17
|
|||
|
7865e4aeac
|
|||
|
58863b1728
|
|||
|
e382683f66
|
|||
|
f7ed1c801c
|
|||
|
f587c7b5d8
|
|||
|
40ff40bea8
|
|||
|
7a44c72a0a
|
|||
|
66aff9f74e
|
|||
|
5451f55219
|
|||
|
36280b01e6
|
|||
|
18359c3ea6
|
|||
|
0a4e61edf5
|
|||
|
d73a85b82e
|
|||
|
81c593399e
|
|||
|
6a732a79a8
|
|||
|
63dd4d4561
|
|||
|
e0061d7bc1
|
|||
|
c5818b1d1f
|
|||
|
a470a8de25
|
|||
|
a37b5b0eeb
|
|||
|
efd500ecc1
|
|||
|
43fcc52291
|
|||
|
50f5db1543
|
|||
|
a5b0ccdd02
|
12
.github/FUNDING.yml
vendored
12
.github/FUNDING.yml
vendored
@@ -1,13 +1 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: arsenetar
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
|
||||
9
.github/workflows/codeql-analysis.yml
vendored
9
.github/workflows/codeql-analysis.yml
vendored
@@ -25,11 +25,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -44,7 +43,7 @@ jobs:
|
||||
make modules
|
||||
- if: matrix.language == 'python'
|
||||
name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
# Analysis
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
|
||||
75
.github/workflows/default.yml
vendored
75
.github/workflows/default.yml
vendored
@@ -4,71 +4,42 @@ name: Default CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt -r requirements-extra.txt
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
flake8 .
|
||||
format:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt -r requirements-extra.txt
|
||||
- name: Check format with black
|
||||
run: |
|
||||
black .
|
||||
python-version: "3.12"
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
test:
|
||||
needs: [lint, format]
|
||||
needs: [pre-commit]
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
python-version: [3.7, 3.8, 3.9, "3.10"]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
python-version: 3.7
|
||||
- os: macos-latest
|
||||
python-version: 3.8
|
||||
- os: macos-latest
|
||||
python-version: 3.9
|
||||
os: [ubuntu-latest]
|
||||
python-version: [3.8, 3.9, "3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
include:
|
||||
- os: windows-latest
|
||||
python-version: 3.7
|
||||
- os: windows-latest
|
||||
python-version: 3.8
|
||||
- os: windows-latest
|
||||
python-version: 3.9
|
||||
python-version: "3.12"
|
||||
- os: macos-latest
|
||||
python-version: "3.12"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools
|
||||
pip install -r requirements.txt -r requirements-extra.txt
|
||||
- name: Build python modules
|
||||
run: |
|
||||
@@ -78,7 +49,17 @@ jobs:
|
||||
pytest core hscommon
|
||||
- name: Upload Artifacts
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: modules ${{ matrix.python-version }}
|
||||
path: ${{ github.workspace }}/**/*.so
|
||||
path: build/**/*.so
|
||||
merge-artifacts:
|
||||
needs: [test]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Merge Artifacts
|
||||
uses: actions/upload-artifact/merge@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: modules
|
||||
pattern: modules*
|
||||
delete-merged: true
|
||||
|
||||
26
.github/workflows/tx-push.yml
vendored
Normal file
26
.github/workflows/tx-push.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# Push translation source to Transifex
|
||||
name: Transifex Sync
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- locale/*.pot
|
||||
|
||||
env:
|
||||
TX_VERSION: "v1.6.10"
|
||||
|
||||
jobs:
|
||||
push-source:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Get Transifex Client
|
||||
run: |
|
||||
curl -o- https://raw.githubusercontent.com/transifex/cli/master/install.sh | bash -s -- $TX_VERSION
|
||||
- name: Update & Push Translation Sources
|
||||
env:
|
||||
TX_TOKEN: ${{ secrets.TX_TOKEN }}
|
||||
run: |
|
||||
./tx push -s --use-git-timestamps
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -87,8 +87,8 @@ cython_debug/
|
||||
# Visual Studio Code
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
#!.vscode/tasks.json
|
||||
#!.vscode/launch.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/launch.json
|
||||
!.vscode/extensions.json
|
||||
!.vscode/*.code-snippets
|
||||
|
||||
|
||||
24
.pre-commit-config.yaml
Normal file
24
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
exclude: ".*.json"
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.2.0
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
exclude: ^(.tox|env|build|dist|help|qt/dg_rc.py|pkg).*
|
||||
- repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook
|
||||
rev: v9.11.0
|
||||
hooks:
|
||||
- id: commitlint
|
||||
stages: [commit-msg]
|
||||
additional_dependencies: ["@commitlint/config-conventional"]
|
||||
@@ -1 +1 @@
|
||||
sonar.python.version=3.7, 3.8, 3.9, 3.10
|
||||
sonar.python.version=3.7, 3.8, 3.9, 3.10, 3.11
|
||||
|
||||
@@ -13,15 +13,8 @@ source_file = locale/core.pot
|
||||
source_lang = en
|
||||
type = PO
|
||||
|
||||
[o:voltaicideas:p:dupeguru-1:r:qtlib]
|
||||
file_filter = qtlib/locale/<lang>/LC_MESSAGES/qtlib.po
|
||||
source_file = qtlib/locale/qtlib.pot
|
||||
source_lang = en
|
||||
type = PO
|
||||
|
||||
[o:voltaicideas:p:dupeguru-1:r:ui]
|
||||
file_filter = locale/<lang>/LC_MESSAGES/ui.po
|
||||
source_file = locale/ui.pot
|
||||
source_lang = en
|
||||
type = PO
|
||||
|
||||
|
||||
6
.vscode/extensions.json
vendored
6
.vscode/extensions.json
vendored
@@ -3,8 +3,10 @@
|
||||
"recommendations": [
|
||||
"redhat.vscode-yaml",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.python"
|
||||
"ms-python.python",
|
||||
"ms-python.black-formatter",
|
||||
],
|
||||
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
|
||||
// List of extensions recommended by VS Code that should not be recommended for
|
||||
// users of this workspace.
|
||||
"unwantedRecommendations": []
|
||||
}
|
||||
17
.vscode/launch.json
vendored
Normal file
17
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "DupuGuru",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "run.py",
|
||||
"console": "integratedTerminal",
|
||||
"subProcess": true,
|
||||
"justMyCode": false
|
||||
},
|
||||
]
|
||||
}
|
||||
13
.vscode/settings.json
vendored
13
.vscode/settings.json
vendored
@@ -1,12 +1,17 @@
|
||||
{
|
||||
"python.formatting.provider": "black",
|
||||
"cSpell.words": [
|
||||
"Dupras",
|
||||
"hscommon"
|
||||
],
|
||||
"editor.rulers": [
|
||||
88,
|
||||
120
|
||||
],
|
||||
"python.languageServer": "Pylance",
|
||||
"yaml.schemaStore.enable": true,
|
||||
"yaml.schemas": {
|
||||
"https://json.schemastore.org/github-workflow.json": ".github/workflows/*.yml"
|
||||
}
|
||||
"[python]": {
|
||||
"editor.formatOnSave": true,
|
||||
"editor.defaultFormatter": "ms-python.black-formatter"
|
||||
},
|
||||
"python.testing.pytestEnabled": true
|
||||
}
|
||||
1
LICENSE
1
LICENSE
@@ -619,4 +619,3 @@ Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
@@ -3,4 +3,3 @@ recursive-include core *.m
|
||||
include run.py
|
||||
graft locale
|
||||
graft help
|
||||
graft qtlib/locale
|
||||
2
Makefile
2
Makefile
@@ -35,7 +35,7 @@ endif
|
||||
# Our build scripts are not very "make like" yet and perform their task in a bundle. For now, we
|
||||
# use one of each file to act as a representative, a target, of these groups.
|
||||
|
||||
packages = hscommon qtlib core qt
|
||||
packages = hscommon core qt
|
||||
localedirs = $(wildcard locale/*/LC_MESSAGES)
|
||||
pofiles = $(wildcard locale/*/LC_MESSAGES/*.po)
|
||||
mofiles = $(patsubst %.po,%.mo,$(pofiles))
|
||||
|
||||
@@ -22,7 +22,6 @@ This folder contains the source for dupeGuru. Its documentation is in `help`, bu
|
||||
* help: Help document, written for Sphinx.
|
||||
* locale: .po files for localization.
|
||||
* hscommon: A collection of helpers used across HS applications.
|
||||
* qtlib: A collection of helpers used across Qt UI codebases of HS applications.
|
||||
|
||||
## How to build dupeGuru from source
|
||||
|
||||
|
||||
19
build.py
19
build.py
@@ -10,7 +10,6 @@ from optparse import OptionParser
|
||||
import shutil
|
||||
from multiprocessing import Pool
|
||||
|
||||
from setuptools import sandbox
|
||||
from hscommon import sphinxgen
|
||||
from hscommon.build import (
|
||||
add_to_pythonpath,
|
||||
@@ -18,6 +17,7 @@ from hscommon.build import (
|
||||
fix_qt_resource_file,
|
||||
)
|
||||
from hscommon import loc
|
||||
import subprocess
|
||||
|
||||
|
||||
def parse_args():
|
||||
@@ -61,7 +61,7 @@ def parse_args():
|
||||
|
||||
|
||||
def build_one_help(language):
|
||||
print("Generating Help in {}".format(language))
|
||||
print(f"Generating Help in {language}")
|
||||
current_path = Path(".").absolute()
|
||||
changelog_path = current_path.joinpath("help", "changelog")
|
||||
tixurl = "https://github.com/arsenetar/dupeguru/issues/{}"
|
||||
@@ -88,14 +88,8 @@ def build_help():
|
||||
p.map(build_one_help, languages)
|
||||
|
||||
|
||||
def build_qt_localizations():
|
||||
loc.compile_all_po(Path("qtlib", "locale"))
|
||||
loc.merge_locale_dir(Path("qtlib", "locale"), "locale")
|
||||
|
||||
|
||||
def build_localizations():
|
||||
loc.compile_all_po("locale")
|
||||
build_qt_localizations()
|
||||
locale_dest = Path("build", "locale")
|
||||
if locale_dest.exists():
|
||||
shutil.rmtree(locale_dest)
|
||||
@@ -110,25 +104,21 @@ def build_updatepot():
|
||||
loc.generate_pot(["core"], Path("locale", "columns.pot"), ["coltr"])
|
||||
print("Building ui.pot")
|
||||
loc.generate_pot(["qt"], Path("locale", "ui.pot"), ["tr"], merge=True)
|
||||
print("Building qtlib.pot")
|
||||
loc.generate_pot(["qtlib"], Path("qtlib", "locale", "qtlib.pot"), ["tr"])
|
||||
|
||||
|
||||
def build_mergepot():
|
||||
print("Updating .po files using .pot files")
|
||||
loc.merge_pots_into_pos("locale")
|
||||
loc.merge_pots_into_pos(Path("qtlib", "locale"))
|
||||
|
||||
|
||||
def build_normpo():
|
||||
loc.normalize_all_pos("locale")
|
||||
loc.normalize_all_pos(Path("qtlib", "locale"))
|
||||
|
||||
|
||||
def build_pe_modules():
|
||||
print("Building PE Modules")
|
||||
# Leverage setup.py to build modules
|
||||
sandbox.run_setup("setup.py", ["build_ext", "--inplace"])
|
||||
subprocess.check_call([sys.executable, "setup.py", "build_ext", "--inplace"])
|
||||
|
||||
|
||||
def build_normal():
|
||||
@@ -139,7 +129,8 @@ def build_normal():
|
||||
print("Building localizations")
|
||||
build_localizations()
|
||||
print("Building Qt stuff")
|
||||
print_and_do("pyrcc5 {0} > {1}".format(Path("qt", "dg.qrc"), Path("qt", "dg_rc.py")))
|
||||
Path("qt", "dg_rc.py").unlink(missing_ok=True)
|
||||
print_and_do("pyrcc5 {} > {}".format(Path("qt", "dg.qrc"), Path("qt", "dg_rc.py")))
|
||||
fix_qt_resource_file(Path("qt", "dg_rc.py"))
|
||||
build_help()
|
||||
|
||||
|
||||
17
commitlint.config.js
Normal file
17
commitlint.config.js
Normal file
@@ -0,0 +1,17 @@
|
||||
const Configuration = {
|
||||
/*
|
||||
* Resolve and load @commitlint/config-conventional from node_modules.
|
||||
* Referenced packages must be installed
|
||||
*/
|
||||
extends: ['@commitlint/config-conventional'],
|
||||
/*
|
||||
* Any rules defined here will override rules from @commitlint/config-conventional
|
||||
*/
|
||||
rules: {
|
||||
'header-max-length': [2, 'always', 72],
|
||||
'subject-case': [2, 'always', 'sentence-case'],
|
||||
'scope-enum': [2, 'always'],
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = Configuration;
|
||||
@@ -1,2 +1,2 @@
|
||||
__version__ = "4.2.1"
|
||||
__version__ = "4.3.1"
|
||||
__appname__ = "dupeGuru"
|
||||
|
||||
63
core/app.py
63
core/app.py
@@ -4,6 +4,8 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import cProfile
|
||||
import datetime
|
||||
import os
|
||||
import os.path as op
|
||||
import logging
|
||||
@@ -21,20 +23,20 @@ from hscommon.util import delete_if_empty, first, escape, nonone, allsame
|
||||
from hscommon.trans import tr
|
||||
from hscommon import desktop
|
||||
|
||||
from . import se, me, pe
|
||||
from .pe.photo import get_delta_dimensions
|
||||
from .util import cmp_value, fix_surrogate_encoding
|
||||
from . import directories, results, export, fs, prioritize
|
||||
from .ignore import IgnoreList
|
||||
from .exclude import ExcludeDict as ExcludeList
|
||||
from .scanner import ScanType
|
||||
from .gui.deletion_options import DeletionOptions
|
||||
from .gui.details_panel import DetailsPanel
|
||||
from .gui.directory_tree import DirectoryTree
|
||||
from .gui.ignore_list_dialog import IgnoreListDialog
|
||||
from .gui.exclude_list_dialog import ExcludeListDialogCore
|
||||
from .gui.problem_dialog import ProblemDialog
|
||||
from .gui.stats_label import StatsLabel
|
||||
from core import se, me, pe
|
||||
from core.pe.photo import get_delta_dimensions
|
||||
from core.util import cmp_value, fix_surrogate_encoding
|
||||
from core import directories, results, export, fs, prioritize
|
||||
from core.ignore import IgnoreList
|
||||
from core.exclude import ExcludeDict as ExcludeList
|
||||
from core.scanner import ScanType
|
||||
from core.gui.deletion_options import DeletionOptions
|
||||
from core.gui.details_panel import DetailsPanel
|
||||
from core.gui.directory_tree import DirectoryTree
|
||||
from core.gui.ignore_list_dialog import IgnoreListDialog
|
||||
from core.gui.exclude_list_dialog import ExcludeListDialogCore
|
||||
from core.gui.problem_dialog import ProblemDialog
|
||||
from core.gui.stats_label import StatsLabel
|
||||
|
||||
HAD_FIRST_LAUNCH_PREFERENCE = "HadFirstLaunch"
|
||||
DEBUG_MODE_PREFERENCE = "DebugMode"
|
||||
@@ -124,15 +126,13 @@ class DupeGuru(Broadcaster):
|
||||
|
||||
NAME = PROMPT_NAME = "dupeGuru"
|
||||
|
||||
PICTURE_CACHE_TYPE = "sqlite" # set to 'shelve' for a ShelveCache
|
||||
|
||||
def __init__(self, view, portable=False):
|
||||
if view.get_default(DEBUG_MODE_PREFERENCE):
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
logging.debug("Debug mode enabled")
|
||||
Broadcaster.__init__(self)
|
||||
self.view = view
|
||||
self.appdata = desktop.special_folder_path(desktop.SpecialFolder.APPDATA, appname=self.NAME, portable=portable)
|
||||
self.appdata = desktop.special_folder_path(desktop.SpecialFolder.APPDATA, portable=portable)
|
||||
if not op.exists(self.appdata):
|
||||
os.makedirs(self.appdata)
|
||||
self.app_mode = AppMode.STANDARD
|
||||
@@ -151,7 +151,8 @@ class DupeGuru(Broadcaster):
|
||||
"clean_empty_dirs": False,
|
||||
"ignore_hardlink_matches": False,
|
||||
"copymove_dest_type": DestType.RELATIVE,
|
||||
"picture_cache_type": self.PICTURE_CACHE_TYPE,
|
||||
"include_exists_check": True,
|
||||
"rehash_ignore_mtime": False,
|
||||
}
|
||||
self.selected_dupes = []
|
||||
self.details_panel = DetailsPanel(self)
|
||||
@@ -181,8 +182,7 @@ class DupeGuru(Broadcaster):
|
||||
self.view.create_results_window()
|
||||
|
||||
def _get_picture_cache_path(self):
|
||||
cache_type = self.options["picture_cache_type"]
|
||||
cache_name = "cached_pictures.shelve" if cache_type == "shelve" else "cached_pictures.db"
|
||||
cache_name = "cached_pictures.db"
|
||||
return op.join(self.appdata, cache_name)
|
||||
|
||||
def _get_dupe_sort_key(self, dupe, get_group, key, delta):
|
||||
@@ -248,7 +248,7 @@ class DupeGuru(Broadcaster):
|
||||
ref = group.ref
|
||||
linkfunc = os.link if use_hardlinks else os.symlink
|
||||
linkfunc(str(ref.path), str_path)
|
||||
self.clean_empty_dirs(dupe.path.parent())
|
||||
self.clean_empty_dirs(dupe.path.parent)
|
||||
|
||||
def _create_file(self, path):
|
||||
# We add fs.Folder to fileclasses in case the file we're loading contains folder paths.
|
||||
@@ -262,7 +262,7 @@ class DupeGuru(Broadcaster):
|
||||
try:
|
||||
f._read_all_info(attrnames=self.METADATA_TO_READ)
|
||||
return f
|
||||
except EnvironmentError:
|
||||
except OSError:
|
||||
return None
|
||||
|
||||
def _get_export_data(self):
|
||||
@@ -553,9 +553,15 @@ class DupeGuru(Broadcaster):
|
||||
# a workaround to make the damn thing work.
|
||||
exepath, args = match.groups()
|
||||
path, exename = op.split(exepath)
|
||||
subprocess.Popen(exename + args, shell=True, cwd=path)
|
||||
p = subprocess.Popen(
|
||||
exename + args, shell=True, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
)
|
||||
output = p.stdout.read()
|
||||
logging.info("Custom command %s %s: %s", exename, args, output)
|
||||
else:
|
||||
subprocess.Popen(dupe_cmd, shell=True)
|
||||
p = subprocess.Popen(dupe_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
output = p.stdout.read()
|
||||
logging.info("Custom command %s: %s", dupe_cmd, output)
|
||||
|
||||
def load(self):
|
||||
"""Load directory selection and ignore list from files in appdata.
|
||||
@@ -780,12 +786,13 @@ class DupeGuru(Broadcaster):
|
||||
except OSError as e:
|
||||
self.view.show_message(tr("Couldn't write to file: {}").format(str(e)))
|
||||
|
||||
def start_scanning(self):
|
||||
def start_scanning(self, profile_scan=False):
|
||||
"""Starts an async job to scan for duplicates.
|
||||
|
||||
Scans folders selected in :attr:`directories` and put the results in :attr:`results`
|
||||
"""
|
||||
scanner = self.SCANNER_CLASS()
|
||||
fs.filesdb.ignore_mtime = self.options["rehash_ignore_mtime"] is True
|
||||
if not self.directories.has_any_file():
|
||||
self.view.show_message(tr("The selected directories contain no scannable file."))
|
||||
return
|
||||
@@ -800,6 +807,9 @@ class DupeGuru(Broadcaster):
|
||||
self._results_changed()
|
||||
|
||||
def do(j):
|
||||
if profile_scan:
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
j.set_progress(0, tr("Collecting files to scan"))
|
||||
if scanner.scan_type == ScanType.FOLDERS:
|
||||
files = list(self.directories.get_folders(folderclass=se.fs.Folder, j=j))
|
||||
@@ -810,6 +820,9 @@ class DupeGuru(Broadcaster):
|
||||
logging.info("Scanning %d files" % len(files))
|
||||
self.results.groups = scanner.get_dupe_groups(files, self.ignore_list, j)
|
||||
self.discarded_file_count = scanner.discarded_file_count
|
||||
if profile_scan:
|
||||
pr.disable()
|
||||
pr.dump_stats(op.join(self.appdata, f"{datetime.datetime.now():%Y-%m-%d_%H-%M-%S}.profile"))
|
||||
|
||||
self._start_job(JobType.SCAN, do)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from hscommon.jobprogress import job
|
||||
from hscommon.util import FileOrPath
|
||||
from hscommon.trans import tr
|
||||
|
||||
from . import fs
|
||||
from core import fs
|
||||
|
||||
__all__ = [
|
||||
"Directories",
|
||||
@@ -84,66 +84,64 @@ class Directories:
|
||||
for denied_path_re in self._exclude_list.compiled:
|
||||
if denied_path_re.match(str(path.name)):
|
||||
return DirectoryState.EXCLUDED
|
||||
# return # We still use the old logic to force state on hidden dirs
|
||||
return DirectoryState.NORMAL
|
||||
# Override this in subclasses to specify the state of some special folders.
|
||||
if path.name.startswith("."):
|
||||
return DirectoryState.EXCLUDED
|
||||
return DirectoryState.NORMAL
|
||||
|
||||
def _get_files(self, from_path, fileclasses, j):
|
||||
for root, dirs, files in os.walk(str(from_path)):
|
||||
j.check_if_cancelled()
|
||||
root_path = Path(root)
|
||||
state = self.get_state(root_path)
|
||||
if state == DirectoryState.EXCLUDED and not any(
|
||||
p.parts[: len(root_path.parts)] == root_path.parts for p in self.states
|
||||
):
|
||||
# Recursively get files from folders with lots of subfolder is expensive. However, there
|
||||
# might be a subfolder in this path that is not excluded. What we want to do is to skim
|
||||
# through self.states and see if we must continue, or we can stop right here to save time
|
||||
del dirs[:]
|
||||
try:
|
||||
if state != DirectoryState.EXCLUDED:
|
||||
# Old logic
|
||||
if self._exclude_list is None or not self._exclude_list.mark_count:
|
||||
found_files = [fs.get_file(root_path.joinpath(f), fileclasses=fileclasses) for f in files]
|
||||
else:
|
||||
found_files = []
|
||||
# print(f"len of files: {len(files)} {files}")
|
||||
for f in files:
|
||||
if not self._exclude_list.is_excluded(root, f):
|
||||
found_files.append(fs.get_file(root_path.joinpath(f), fileclasses=fileclasses))
|
||||
found_files = [f for f in found_files if f is not None]
|
||||
# In some cases, directories can be considered as files by dupeGuru, which is
|
||||
# why we have this line below. In fact, there only one case: Bundle files under
|
||||
# OS X... In other situations, this forloop will do nothing.
|
||||
for d in dirs[:]:
|
||||
f = fs.get_file(root_path.joinpath(d), fileclasses=fileclasses)
|
||||
if f is not None:
|
||||
found_files.append(f)
|
||||
dirs.remove(d)
|
||||
with os.scandir(from_path) as iter:
|
||||
root_path = Path(from_path)
|
||||
state = self.get_state(root_path)
|
||||
# if we have no un-excluded dirs under this directory skip going deeper
|
||||
skip_dirs = state == DirectoryState.EXCLUDED and not any(
|
||||
p.parts[: len(root_path.parts)] == root_path.parts for p in self.states
|
||||
)
|
||||
count = 0
|
||||
for item in iter:
|
||||
j.check_if_cancelled()
|
||||
try:
|
||||
if item.is_dir():
|
||||
if skip_dirs:
|
||||
continue
|
||||
yield from self._get_files(item.path, fileclasses, j)
|
||||
continue
|
||||
elif state == DirectoryState.EXCLUDED:
|
||||
continue
|
||||
# File excluding or not
|
||||
if (
|
||||
self._exclude_list is None
|
||||
or not self._exclude_list.mark_count
|
||||
or not self._exclude_list.is_excluded(str(from_path), item.name)
|
||||
):
|
||||
file = fs.get_file(item, fileclasses=fileclasses)
|
||||
if file:
|
||||
file.is_ref = state == DirectoryState.REFERENCE
|
||||
count += 1
|
||||
yield file
|
||||
except (OSError, fs.InvalidPath):
|
||||
pass
|
||||
logging.debug(
|
||||
"Collected %d files in folder %s",
|
||||
len(found_files),
|
||||
count,
|
||||
str(root_path),
|
||||
)
|
||||
for file in found_files:
|
||||
file.is_ref = state == DirectoryState.REFERENCE
|
||||
yield file
|
||||
except (EnvironmentError, fs.InvalidPath):
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _get_folders(self, from_folder, j):
|
||||
j.check_if_cancelled()
|
||||
try:
|
||||
for subfolder in from_folder.subfolders:
|
||||
for folder in self._get_folders(subfolder, j):
|
||||
yield folder
|
||||
yield from self._get_folders(subfolder, j)
|
||||
state = self.get_state(from_folder.path)
|
||||
if state != DirectoryState.EXCLUDED:
|
||||
from_folder.is_ref = state == DirectoryState.REFERENCE
|
||||
logging.debug("Yielding Folder %r state: %d", from_folder, state)
|
||||
yield from_folder
|
||||
except (EnvironmentError, fs.InvalidPath):
|
||||
except (OSError, fs.InvalidPath):
|
||||
pass
|
||||
|
||||
# ---Public
|
||||
@@ -175,7 +173,7 @@ class Directories:
|
||||
subpaths = [p for p in path.glob("*") if p.is_dir()]
|
||||
subpaths.sort(key=lambda x: x.name.lower())
|
||||
return subpaths
|
||||
except EnvironmentError:
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
def get_files(self, fileclasses=None, j=job.nulljob):
|
||||
@@ -189,7 +187,7 @@ class Directories:
|
||||
for path in self._dirs:
|
||||
for file in self._get_files(path, fileclasses=fileclasses, j=j):
|
||||
file_count += 1
|
||||
if type(j) != job.NullJob:
|
||||
if not isinstance(j, job.NullJob):
|
||||
j.set_progress(-1, tr("Collected {} files to scan").format(file_count))
|
||||
yield file
|
||||
|
||||
@@ -205,7 +203,7 @@ class Directories:
|
||||
from_folder = folderclass(path)
|
||||
for folder in self._get_folders(from_folder, j):
|
||||
folder_count += 1
|
||||
if type(j) != job.NullJob:
|
||||
if not isinstance(j, job.NullJob):
|
||||
j.set_progress(-1, tr("Collected {} folders to scan").format(folder_count))
|
||||
yield folder
|
||||
|
||||
@@ -217,19 +215,16 @@ class Directories:
|
||||
# direct match? easy result.
|
||||
if path in self.states:
|
||||
return self.states[path]
|
||||
state = self._default_state_for_path(path) or DirectoryState.NORMAL
|
||||
state = self._default_state_for_path(path)
|
||||
# Save non-default states in cache, necessary for _get_files()
|
||||
if state != DirectoryState.NORMAL:
|
||||
self.states[path] = state
|
||||
return state
|
||||
|
||||
prevlen = 0
|
||||
# we loop through the states to find the longest matching prefix
|
||||
# if the parent has a state in cache, return that state
|
||||
for p, s in self.states.items():
|
||||
if p in path.parents and len(p.parts) > prevlen:
|
||||
prevlen = len(p.parts)
|
||||
state = s
|
||||
# find the longest parent path that is in states and return that state if found
|
||||
# NOTE: path.parents is ordered longest to shortest
|
||||
for parent_path in path.parents:
|
||||
if parent_path in self.states:
|
||||
return self.states[parent_path]
|
||||
return state
|
||||
|
||||
def has_any_file(self):
|
||||
|
||||
@@ -166,7 +166,7 @@ def reduce_common_words(word_dict, threshold):
|
||||
The exception to this removal are the objects where all the words of the object are common.
|
||||
Because if we remove them, we will miss some duplicates!
|
||||
"""
|
||||
uncommon_words = set(word for word, objects in word_dict.items() if len(objects) < threshold)
|
||||
uncommon_words = {word for word, objects in word_dict.items() if len(objects) < threshold}
|
||||
for word, objects in list(word_dict.items()):
|
||||
if len(objects) < threshold:
|
||||
continue
|
||||
@@ -303,12 +303,13 @@ def getmatches_by_contents(files, bigsize=0, j=job.nulljob):
|
||||
# skip hashing for zero length files
|
||||
result.append(Match(first, second, 100))
|
||||
continue
|
||||
if first.digest_partial == second.digest_partial:
|
||||
# if digests are the same (and not None) then files match
|
||||
if first.digest_partial is not None and first.digest_partial == second.digest_partial:
|
||||
if bigsize > 0 and first.size > bigsize:
|
||||
if first.digest_samples == second.digest_samples:
|
||||
if first.digest_samples is not None and first.digest_samples == second.digest_samples:
|
||||
result.append(Match(first, second, 100))
|
||||
else:
|
||||
if first.digest == second.digest:
|
||||
if first.digest is not None and first.digest == second.digest:
|
||||
result.append(Match(first, second, 100))
|
||||
group_count += 1
|
||||
j.add_progress(desc=PROGRESS_MESSAGE % (len(result), group_count))
|
||||
@@ -409,7 +410,7 @@ class Group:
|
||||
|
||||
You can call this after the duplicate scanning process to free a bit of memory.
|
||||
"""
|
||||
discarded = set(m for m in self.matches if not all(obj in self.unordered for obj in [m.first, m.second]))
|
||||
discarded = {m for m in self.matches if not all(obj in self.unordered for obj in [m.first, m.second])}
|
||||
self.matches -= discarded
|
||||
self.candidates = defaultdict(set)
|
||||
return discarded
|
||||
@@ -456,7 +457,7 @@ class Group:
|
||||
self._matches_for_ref = None
|
||||
if (len(self) > 1) and any(not getattr(item, "is_ref", False) for item in self):
|
||||
if discard_matches:
|
||||
self.matches = set(m for m in self.matches if item not in m)
|
||||
self.matches = {m for m in self.matches if item not in m}
|
||||
else:
|
||||
self._clear()
|
||||
except ValueError:
|
||||
@@ -529,7 +530,7 @@ def get_groups(matches):
|
||||
del dupe2group
|
||||
del matches
|
||||
# should free enough memory to continue
|
||||
logging.warning("Memory Overflow. Groups: {0}".format(len(groups)))
|
||||
logging.warning(f"Memory Overflow. Groups: {len(groups)}")
|
||||
# Now that we have a group, we have to discard groups' matches and see if there're any "orphan"
|
||||
# matches, that is, matches that were candidate in a group but that none of their 2 files were
|
||||
# accepted in the group. With these orphan groups, it's safe to build additional groups
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .markable import Markable
|
||||
from core.markable import Markable
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
# TODO: perhaps use regex module for better Unicode support? https://pypi.org/project/regex/
|
||||
|
||||
144
core/fs.py
144
core/fs.py
@@ -13,6 +13,17 @@
|
||||
|
||||
import os
|
||||
|
||||
from math import floor
|
||||
import logging
|
||||
import sqlite3
|
||||
from sys import platform
|
||||
from threading import Lock
|
||||
from typing import Any, AnyStr, Union, Callable
|
||||
|
||||
from pathlib import Path
|
||||
from hscommon.util import nonone, get_file_ext
|
||||
|
||||
hasher: Callable
|
||||
try:
|
||||
import xxhash
|
||||
|
||||
@@ -22,15 +33,6 @@ except ImportError:
|
||||
|
||||
hasher = hashlib.md5
|
||||
|
||||
from math import floor
|
||||
import logging
|
||||
import sqlite3
|
||||
from threading import Lock
|
||||
from typing import Any, AnyStr, Union
|
||||
|
||||
from pathlib import Path
|
||||
from hscommon.util import nonone, get_file_ext
|
||||
|
||||
__all__ = [
|
||||
"File",
|
||||
"Folder",
|
||||
@@ -53,6 +55,9 @@ CHUNK_SIZE = 1024 * 1024 # 1 MiB
|
||||
# Minimum size below which partial hashing is not used
|
||||
MIN_FILE_SIZE = 3 * CHUNK_SIZE # 3MiB, because we take 3 samples
|
||||
|
||||
# Partial hashing offset and size
|
||||
PARTIAL_OFFSET_SIZE = (0x4000, 0x4000)
|
||||
|
||||
|
||||
class FSError(Exception):
|
||||
cls_message = "An error has occured on '{name}' in '{parent}'"
|
||||
@@ -96,60 +101,76 @@ class FilesDB:
|
||||
schema_version = 1
|
||||
schema_version_description = "Changed from md5 to xxhash if available."
|
||||
|
||||
create_table_query = "CREATE TABLE IF NOT EXISTS files (path TEXT PRIMARY KEY, size INTEGER, mtime_ns INTEGER, entry_dt DATETIME, digest BLOB, digest_partial BLOB, digest_samples BLOB)"
|
||||
create_table_query = """CREATE TABLE IF NOT EXISTS files (path TEXT PRIMARY KEY, size INTEGER, mtime_ns INTEGER,
|
||||
entry_dt DATETIME, digest BLOB, digest_partial BLOB, digest_samples BLOB)"""
|
||||
drop_table_query = "DROP TABLE IF EXISTS files;"
|
||||
select_query = "SELECT {key} FROM files WHERE path=:path AND size=:size and mtime_ns=:mtime_ns"
|
||||
select_query_ignore_mtime = "SELECT {key} FROM files WHERE path=:path AND size=:size"
|
||||
insert_query = """
|
||||
INSERT INTO files (path, size, mtime_ns, entry_dt, {key}) VALUES (:path, :size, :mtime_ns, datetime('now'), :value)
|
||||
INSERT INTO files (path, size, mtime_ns, entry_dt, {key})
|
||||
VALUES (:path, :size, :mtime_ns, datetime('now'), :value)
|
||||
ON CONFLICT(path) DO UPDATE SET size=:size, mtime_ns=:mtime_ns, entry_dt=datetime('now'), {key}=:value;
|
||||
"""
|
||||
|
||||
ignore_mtime = False
|
||||
|
||||
def __init__(self):
|
||||
self.conn = None
|
||||
self.cur = None
|
||||
self.lock = None
|
||||
|
||||
def connect(self, path: Union[AnyStr, os.PathLike]) -> None:
|
||||
if platform.startswith("gnu0"):
|
||||
self.conn = sqlite3.connect(path, check_same_thread=False, isolation_level=None)
|
||||
else:
|
||||
self.conn = sqlite3.connect(path, check_same_thread=False)
|
||||
self.cur = self.conn.cursor()
|
||||
self.lock = Lock()
|
||||
self._check_upgrade()
|
||||
|
||||
def _check_upgrade(self) -> None:
|
||||
with self.lock:
|
||||
has_schema = self.cur.execute(
|
||||
with self.lock, self.conn as conn:
|
||||
has_schema = conn.execute(
|
||||
"SELECT NAME FROM sqlite_master WHERE type='table' AND name='schema_version'"
|
||||
).fetchall()
|
||||
version = None
|
||||
if has_schema:
|
||||
version = self.cur.execute("SELECT version FROM schema_version ORDER BY version DESC").fetchone()[0]
|
||||
version = conn.execute("SELECT version FROM schema_version ORDER BY version DESC").fetchone()[0]
|
||||
else:
|
||||
self.cur.execute("CREATE TABLE schema_version (version int PRIMARY KEY, description TEXT)")
|
||||
conn.execute("CREATE TABLE schema_version (version int PRIMARY KEY, description TEXT)")
|
||||
if version != self.schema_version:
|
||||
self.cur.execute(self.drop_table_query)
|
||||
self.cur.execute(
|
||||
conn.execute(self.drop_table_query)
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO schema_version VALUES (:version, :description)",
|
||||
{"version": self.schema_version, "description": self.schema_version_description},
|
||||
)
|
||||
self.cur.execute(self.create_table_query)
|
||||
self.conn.commit()
|
||||
conn.execute(self.create_table_query)
|
||||
|
||||
def clear(self) -> None:
|
||||
with self.lock:
|
||||
self.cur.execute(self.drop_table_query)
|
||||
self.cur.execute(self.create_table_query)
|
||||
with self.lock, self.conn as conn:
|
||||
conn.execute(self.drop_table_query)
|
||||
conn.execute(self.create_table_query)
|
||||
|
||||
def get(self, path: Path, key: str) -> Union[bytes, None]:
|
||||
stat = path.stat()
|
||||
size = stat.st_size
|
||||
mtime_ns = stat.st_mtime_ns
|
||||
|
||||
with self.lock:
|
||||
self.cur.execute(self.select_query.format(key=key), {"path": str(path), "size": size, "mtime_ns": mtime_ns})
|
||||
result = self.cur.fetchone()
|
||||
try:
|
||||
with self.conn as conn:
|
||||
if self.ignore_mtime:
|
||||
cursor = conn.execute(
|
||||
self.select_query_ignore_mtime.format(key=key), {"path": str(path), "size": size}
|
||||
)
|
||||
else:
|
||||
cursor = conn.execute(
|
||||
self.select_query.format(key=key),
|
||||
{"path": str(path), "size": size, "mtime_ns": mtime_ns},
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
cursor.close()
|
||||
|
||||
if result:
|
||||
return result[0]
|
||||
except Exception as ex:
|
||||
logging.warning(f"Couldn't get {key} for {path} w/{size}, {mtime_ns}: {ex}")
|
||||
|
||||
return None
|
||||
|
||||
@@ -157,12 +178,14 @@ class FilesDB:
|
||||
stat = path.stat()
|
||||
size = stat.st_size
|
||||
mtime_ns = stat.st_mtime_ns
|
||||
|
||||
with self.lock:
|
||||
self.cur.execute(
|
||||
try:
|
||||
with self.lock, self.conn as conn:
|
||||
conn.execute(
|
||||
self.insert_query.format(key=key),
|
||||
{"path": str(path), "size": size, "mtime_ns": mtime_ns, "value": value},
|
||||
)
|
||||
except Exception as ex:
|
||||
logging.warning(f"Couldn't put {key} for {path} w/{size}, {mtime_ns}: {ex}")
|
||||
|
||||
def commit(self) -> None:
|
||||
with self.lock:
|
||||
@@ -170,7 +193,6 @@ class FilesDB:
|
||||
|
||||
def close(self) -> None:
|
||||
with self.lock:
|
||||
self.cur.close()
|
||||
self.conn.close()
|
||||
|
||||
|
||||
@@ -184,15 +206,22 @@ class File:
|
||||
# Slots for File make us save quite a bit of memory. In a memory test I've made with a lot of
|
||||
# files, I saved 35% memory usage with "unread" files (no _read_info() call) and gains become
|
||||
# even greater when we take into account read attributes (70%!). Yeah, it's worth it.
|
||||
__slots__ = ("path", "is_ref", "words") + tuple(INITIAL_INFO.keys())
|
||||
__slots__ = ("path", "unicode_path", "is_ref", "words") + tuple(INITIAL_INFO.keys())
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
for attrname in self.INITIAL_INFO:
|
||||
setattr(self, attrname, NOT_SET)
|
||||
if type(path) is os.DirEntry:
|
||||
self.path = Path(path.path)
|
||||
self.size = nonone(path.stat().st_size, 0)
|
||||
self.mtime = nonone(path.stat().st_mtime, 0)
|
||||
else:
|
||||
self.path = path
|
||||
if self.path:
|
||||
self.unicode_path = str(self.path)
|
||||
|
||||
def __repr__(self):
|
||||
return "<{} {}>".format(self.__class__.__name__, str(self.path))
|
||||
return f"<{self.__class__.__name__} {str(self.path)}>"
|
||||
|
||||
def __getattribute__(self, attrname):
|
||||
result = object.__getattribute__(self, attrname)
|
||||
@@ -223,14 +252,9 @@ class File:
|
||||
|
||||
def _calc_digest_partial(self):
|
||||
# type: () -> bytes
|
||||
|
||||
# This offset is where we should start reading the file to get a partial hash
|
||||
# For audio file, it should be where audio data starts
|
||||
offset, size = (0x4000, 0x4000)
|
||||
|
||||
with self.path.open("rb") as fp:
|
||||
fp.seek(offset)
|
||||
partial_data = fp.read(size)
|
||||
fp.seek(PARTIAL_OFFSET_SIZE[0])
|
||||
partial_data = fp.read(PARTIAL_OFFSET_SIZE[1])
|
||||
return hasher(partial_data).digest()
|
||||
|
||||
def _calc_digest_samples(self) -> bytes:
|
||||
@@ -259,34 +283,29 @@ class File:
|
||||
self.size = nonone(stats.st_size, 0)
|
||||
self.mtime = nonone(stats.st_mtime, 0)
|
||||
elif field == "digest_partial":
|
||||
try:
|
||||
self.digest_partial = filesdb.get(self.path, "digest_partial")
|
||||
if self.digest_partial is None:
|
||||
# If file is smaller than partial requirements just use the full digest
|
||||
if self.size < PARTIAL_OFFSET_SIZE[0] + PARTIAL_OFFSET_SIZE[1]:
|
||||
self.digest_partial = self.digest
|
||||
else:
|
||||
self.digest_partial = self._calc_digest_partial()
|
||||
filesdb.put(self.path, "digest_partial", self.digest_partial)
|
||||
except Exception as e:
|
||||
logging.warning("Couldn't get digest_partial for %s: %s", self.path, e)
|
||||
elif field == "digest":
|
||||
try:
|
||||
self.digest = filesdb.get(self.path, "digest")
|
||||
if self.digest is None:
|
||||
self.digest = self._calc_digest()
|
||||
filesdb.put(self.path, "digest", self.digest)
|
||||
except Exception as e:
|
||||
logging.warning("Couldn't get digest for %s: %s", self.path, e)
|
||||
elif field == "digest_samples":
|
||||
size = self.size
|
||||
# Might as well hash such small files entirely.
|
||||
if size <= MIN_FILE_SIZE:
|
||||
setattr(self, field, self.digest)
|
||||
self.digest_samples = self.digest
|
||||
return
|
||||
try:
|
||||
self.digest_samples = filesdb.get(self.path, "digest_samples")
|
||||
if self.digest_samples is None:
|
||||
self.digest_samples = self._calc_digest_samples()
|
||||
filesdb.put(self.path, "digest_samples", self.digest_samples)
|
||||
except Exception as e:
|
||||
logging.warning(f"Couldn't get digest_samples for {self.path}: {e}")
|
||||
|
||||
def _read_all_info(self, attrnames=None):
|
||||
"""Cache all possible info.
|
||||
@@ -304,6 +323,14 @@ class File:
|
||||
"""Returns whether this file wrapper class can handle ``path``."""
|
||||
return not path.is_symlink() and path.is_file()
|
||||
|
||||
def exists(self) -> bool:
|
||||
"""Safely check if the underlying file exists, treat error as non-existent"""
|
||||
try:
|
||||
return self.path.exists()
|
||||
except OSError as ex:
|
||||
logging.warning(f"Checking {self.path} raised: {ex}")
|
||||
return False
|
||||
|
||||
def rename(self, newname):
|
||||
if newname == self.name:
|
||||
return
|
||||
@@ -312,7 +339,7 @@ class File:
|
||||
raise AlreadyExistsError(newname, self.path.parent)
|
||||
try:
|
||||
self.path.rename(destpath)
|
||||
except EnvironmentError:
|
||||
except OSError:
|
||||
raise OperationError(self)
|
||||
if not destpath.exists():
|
||||
raise OperationError(self)
|
||||
@@ -346,6 +373,7 @@ class Folder(File):
|
||||
|
||||
def __init__(self, path):
|
||||
File.__init__(self, path)
|
||||
self.size = NOT_SET
|
||||
self._subfolders = None
|
||||
|
||||
def _all_items(self):
|
||||
@@ -377,7 +405,8 @@ class Folder(File):
|
||||
@property
|
||||
def subfolders(self):
|
||||
if self._subfolders is None:
|
||||
subfolders = [p for p in self.path.glob("*") if not p.is_symlink() and p.is_dir()]
|
||||
with os.scandir(self.path) as iter:
|
||||
subfolders = [p for p in iter if not p.is_symlink() and p.is_dir()]
|
||||
self._subfolders = [self.__class__(p) for p in subfolders]
|
||||
return self._subfolders
|
||||
|
||||
@@ -408,10 +437,11 @@ def get_files(path, fileclasses=[File]):
|
||||
assert all(issubclass(fileclass, File) for fileclass in fileclasses)
|
||||
try:
|
||||
result = []
|
||||
for path in path.glob("*"):
|
||||
file = get_file(path, fileclasses=fileclasses)
|
||||
with os.scandir(path) as iter:
|
||||
for item in iter:
|
||||
file = get_file(item, fileclasses=fileclasses)
|
||||
if file is not None:
|
||||
result.append(file)
|
||||
return result
|
||||
except EnvironmentError:
|
||||
except OSError:
|
||||
raise InvalidPath(path)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.gui.base import GUIObject
|
||||
from .base import DupeGuruGUIObject
|
||||
from core.gui.base import DupeGuruGUIObject
|
||||
|
||||
|
||||
class DetailsPanel(GUIObject, DupeGuruGUIObject):
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
from hscommon.gui.tree import Tree, Node
|
||||
|
||||
from ..directories import DirectoryState
|
||||
from .base import DupeGuruGUIObject
|
||||
from core.directories import DirectoryState
|
||||
from core.gui.base import DupeGuruGUIObject
|
||||
|
||||
STATE_ORDER = [DirectoryState.NORMAL, DirectoryState.REFERENCE, DirectoryState.EXCLUDED]
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .exclude_list_table import ExcludeListTable
|
||||
from core.gui.exclude_list_table import ExcludeListTable
|
||||
from core.exclude import has_sep
|
||||
from os import sep
|
||||
import logging
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import DupeGuruGUIObject
|
||||
from core.gui.base import DupeGuruGUIObject
|
||||
from hscommon.gui.table import GUITable, Row
|
||||
from hscommon.gui.column import Column, Columns
|
||||
from hscommon.trans import trget
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from hscommon.trans import tr
|
||||
from .ignore_list_table import IgnoreListTable
|
||||
from core.gui.ignore_list_table import IgnoreListTable
|
||||
|
||||
|
||||
class IgnoreListDialog:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
from hscommon import desktop
|
||||
|
||||
from .problem_table import ProblemTable
|
||||
from core.gui.problem_table import ProblemTable
|
||||
|
||||
|
||||
class ProblemDialog:
|
||||
|
||||
@@ -11,7 +11,7 @@ from operator import attrgetter
|
||||
from hscommon.gui.table import GUITable, Row
|
||||
from hscommon.gui.column import Columns
|
||||
|
||||
from .base import DupeGuruGUIObject
|
||||
from core.gui.base import DupeGuruGUIObject
|
||||
|
||||
|
||||
class DupeRow(Row):
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import DupeGuruGUIObject
|
||||
from core.gui.base import DupeGuruGUIObject
|
||||
|
||||
|
||||
class StatsLabel(DupeGuruGUIObject):
|
||||
|
||||
@@ -1 +1 @@
|
||||
from . import fs, prioritize, result_table, scanner # noqa
|
||||
from core.me import fs, prioritize, result_table, scanner # noqa
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from . import ( # noqa
|
||||
from core.pe import ( # noqa
|
||||
block,
|
||||
cache,
|
||||
exif,
|
||||
iphoto_plist,
|
||||
matchblock,
|
||||
matchexif,
|
||||
photo,
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ._block import NoBlocksError, DifferentBlockCountError, avgdiff, getblocks2 # NOQA
|
||||
from core.pe._block import NoBlocksError, DifferentBlockCountError, avgdiff, getblocks2 # NOQA
|
||||
|
||||
# Converted to C
|
||||
# def getblock(image):
|
||||
|
||||
13
core/pe/block.pyi
Normal file
13
core/pe/block.pyi
Normal file
@@ -0,0 +1,13 @@
|
||||
from typing import Tuple, List, Union, Sequence
|
||||
|
||||
_block = Tuple[int, int, int]
|
||||
|
||||
class NoBlocksError(Exception): ... # noqa: E302, E701
|
||||
class DifferentBlockCountError(Exception): ... # noqa E701
|
||||
|
||||
def getblock(image: object) -> Union[_block, None]: ... # noqa: E302
|
||||
def getblocks2(image: object, block_count_per_side: int) -> Union[List[_block], None]: ...
|
||||
def diff(first: _block, second: _block) -> int: ...
|
||||
def avgdiff( # noqa: E302
|
||||
first: Sequence[_block], second: Sequence[_block], limit: int = 768, min_iterations: int = 1
|
||||
) -> Union[int, None]: ...
|
||||
@@ -4,24 +4,13 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ._cache import string_to_colors # noqa
|
||||
from core.pe._cache import bytes_to_colors # noqa
|
||||
|
||||
|
||||
def colors_to_string(colors):
|
||||
"""Transform the 3 sized tuples 'colors' into a hex string.
|
||||
def colors_to_bytes(colors):
|
||||
"""Transform the 3 sized tuples 'colors' into a bytes string.
|
||||
|
||||
[(0,100,255)] --> 0064ff
|
||||
[(1,2,3),(4,5,6)] --> 010203040506
|
||||
[(0,100,255)] --> b'\x00d\xff'
|
||||
[(1,2,3),(4,5,6)] --> b'\x01\x02\x03\x04\x05\x06'
|
||||
"""
|
||||
return "".join("%02x%02x%02x" % (r, g, b) for r, g, b in colors)
|
||||
|
||||
|
||||
# This function is an important bottleneck of dupeGuru PE. It has been converted to C.
|
||||
# def string_to_colors(s):
|
||||
# """Transform the string 's' in a list of 3 sized tuples.
|
||||
# """
|
||||
# result = []
|
||||
# for i in xrange(0, len(s), 6):
|
||||
# number = int(s[i:i+6], 16)
|
||||
# result.append((number >> 16, (number >> 8) & 0xff, number & 0xff))
|
||||
# return result
|
||||
return b"".join(map(bytes, colors))
|
||||
|
||||
6
core/pe/cache.pyi
Normal file
6
core/pe/cache.pyi
Normal file
@@ -0,0 +1,6 @@
|
||||
from typing import Union, Tuple, List
|
||||
|
||||
_block = Tuple[int, int, int]
|
||||
|
||||
def colors_to_bytes(colors: List[_block]) -> bytes: ... # noqa: E302
|
||||
def bytes_to_colors(s: bytes) -> Union[List[_block], None]: ...
|
||||
@@ -1,141 +0,0 @@
|
||||
# Copyright 2016 Virgil Dupras
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import os
|
||||
import os.path as op
|
||||
import shelve
|
||||
import tempfile
|
||||
from collections import namedtuple
|
||||
|
||||
from .cache import string_to_colors, colors_to_string
|
||||
|
||||
|
||||
def wrap_path(path):
|
||||
return "path:{}".format(path)
|
||||
|
||||
|
||||
def unwrap_path(key):
|
||||
return key[5:]
|
||||
|
||||
|
||||
def wrap_id(path):
|
||||
return "id:{}".format(path)
|
||||
|
||||
|
||||
def unwrap_id(key):
|
||||
return int(key[3:])
|
||||
|
||||
|
||||
CacheRow = namedtuple("CacheRow", "id path blocks mtime")
|
||||
|
||||
|
||||
class ShelveCache:
|
||||
"""A class to cache picture blocks in a shelve backend."""
|
||||
|
||||
def __init__(self, db=None, readonly=False):
|
||||
self.istmp = db is None
|
||||
if self.istmp:
|
||||
self.dtmp = tempfile.mkdtemp()
|
||||
self.ftmp = db = op.join(self.dtmp, "tmpdb")
|
||||
flag = "r" if readonly else "c"
|
||||
self.shelve = shelve.open(db, flag)
|
||||
self.maxid = self._compute_maxid()
|
||||
|
||||
def __contains__(self, key):
|
||||
return wrap_path(key) in self.shelve
|
||||
|
||||
def __delitem__(self, key):
|
||||
row = self.shelve[wrap_path(key)]
|
||||
del self.shelve[wrap_path(key)]
|
||||
del self.shelve[wrap_id(row.id)]
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, int):
|
||||
skey = self.shelve[wrap_id(key)]
|
||||
else:
|
||||
skey = wrap_path(key)
|
||||
return string_to_colors(self.shelve[skey].blocks)
|
||||
|
||||
def __iter__(self):
|
||||
return (unwrap_path(k) for k in self.shelve if k.startswith("path:"))
|
||||
|
||||
def __len__(self):
|
||||
return sum(1 for k in self.shelve if k.startswith("path:"))
|
||||
|
||||
def __setitem__(self, path_str, blocks):
|
||||
blocks = colors_to_string(blocks)
|
||||
if op.exists(path_str):
|
||||
mtime = int(os.stat(path_str).st_mtime)
|
||||
else:
|
||||
mtime = 0
|
||||
if path_str in self:
|
||||
rowid = self.shelve[wrap_path(path_str)].id
|
||||
else:
|
||||
rowid = self._get_new_id()
|
||||
row = CacheRow(rowid, path_str, blocks, mtime)
|
||||
self.shelve[wrap_path(path_str)] = row
|
||||
self.shelve[wrap_id(rowid)] = wrap_path(path_str)
|
||||
|
||||
def _compute_maxid(self):
|
||||
return max((unwrap_id(k) for k in self.shelve if k.startswith("id:")), default=1)
|
||||
|
||||
def _get_new_id(self):
|
||||
self.maxid += 1
|
||||
return self.maxid
|
||||
|
||||
def clear(self):
|
||||
self.shelve.clear()
|
||||
|
||||
def close(self):
|
||||
if self.shelve is not None:
|
||||
self.shelve.close()
|
||||
if self.istmp:
|
||||
os.remove(self.ftmp)
|
||||
os.rmdir(self.dtmp)
|
||||
self.shelve = None
|
||||
|
||||
def filter(self, func):
|
||||
to_delete = [key for key in self if not func(key)]
|
||||
for key in to_delete:
|
||||
del self[key]
|
||||
|
||||
def get_id(self, path):
|
||||
if path in self:
|
||||
return self.shelve[wrap_path(path)].id
|
||||
else:
|
||||
raise ValueError(path)
|
||||
|
||||
def get_multiple(self, rowids):
|
||||
for rowid in rowids:
|
||||
try:
|
||||
skey = self.shelve[wrap_id(rowid)]
|
||||
except KeyError:
|
||||
continue
|
||||
yield (rowid, string_to_colors(self.shelve[skey].blocks))
|
||||
|
||||
def purge_outdated(self):
|
||||
"""Go through the cache and purge outdated records.
|
||||
|
||||
A record is outdated if the picture doesn't exist or if its mtime is greater than the one in
|
||||
the db.
|
||||
"""
|
||||
todelete = []
|
||||
for path in self:
|
||||
row = self.shelve[wrap_path(path)]
|
||||
if row.mtime and op.exists(path):
|
||||
picture_mtime = os.stat(path).st_mtime
|
||||
if int(picture_mtime) <= row.mtime:
|
||||
# not outdated
|
||||
continue
|
||||
todelete.append(path)
|
||||
for path in todelete:
|
||||
try:
|
||||
del self[path]
|
||||
except KeyError:
|
||||
# I have no idea why a KeyError sometimes happen, but it does, as we can see in
|
||||
# #402 and #439. I don't think it hurts to silently ignore the error, so that's
|
||||
# what we do
|
||||
pass
|
||||
@@ -9,12 +9,24 @@ import os.path as op
|
||||
import logging
|
||||
import sqlite3 as sqlite
|
||||
|
||||
from .cache import string_to_colors, colors_to_string
|
||||
from core.pe.cache import bytes_to_colors, colors_to_bytes
|
||||
|
||||
|
||||
class SqliteCache:
|
||||
"""A class to cache picture blocks in a sqlite backend."""
|
||||
|
||||
schema_version = 2
|
||||
schema_version_description = "Added blocks for all 8 orientations."
|
||||
|
||||
create_table_query = (
|
||||
"CREATE TABLE IF NOT EXISTS "
|
||||
"pictures(path TEXT, mtime_ns INTEGER, blocks BLOB, blocks2 BLOB, blocks3 BLOB, "
|
||||
"blocks4 BLOB, blocks5 BLOB, blocks6 BLOB, blocks7 BLOB, blocks8 BLOB)"
|
||||
)
|
||||
create_index_query = "CREATE INDEX IF NOT EXISTS idx_path on pictures (path)"
|
||||
drop_table_query = "DROP TABLE IF EXISTS pictures"
|
||||
drop_index_query = "DROP INDEX IF EXISTS idx_path"
|
||||
|
||||
def __init__(self, db=":memory:", readonly=False):
|
||||
# readonly is not used in the sqlite version of the cache
|
||||
self.dbname = db
|
||||
@@ -35,12 +47,20 @@ class SqliteCache:
|
||||
# Optimized
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, int):
|
||||
sql = "select blocks from pictures where rowid = ?"
|
||||
sql = (
|
||||
"select blocks, blocks2, blocks3, blocks4, blocks5, blocks6, blocks7, blocks8 "
|
||||
"from pictures "
|
||||
"where rowid = ?"
|
||||
)
|
||||
else:
|
||||
sql = "select blocks from pictures where path = ?"
|
||||
result = self.con.execute(sql, [key]).fetchone()
|
||||
if result:
|
||||
result = string_to_colors(result[0])
|
||||
sql = (
|
||||
"select blocks, blocks2, blocks3, blocks4, blocks5, blocks6, blocks7, blocks8 "
|
||||
"from pictures "
|
||||
"where path = ?"
|
||||
)
|
||||
blocks = self.con.execute(sql, [key]).fetchone()
|
||||
if blocks:
|
||||
result = [bytes_to_colors(block) for block in blocks]
|
||||
return result
|
||||
else:
|
||||
raise KeyError(key)
|
||||
@@ -56,35 +76,33 @@ class SqliteCache:
|
||||
return result[0][0]
|
||||
|
||||
def __setitem__(self, path_str, blocks):
|
||||
blocks = colors_to_string(blocks)
|
||||
blocks = [colors_to_bytes(block) for block in blocks]
|
||||
if op.exists(path_str):
|
||||
mtime = int(os.stat(path_str).st_mtime)
|
||||
else:
|
||||
mtime = 0
|
||||
if path_str in self:
|
||||
sql = "update pictures set blocks = ?, mtime = ? where path = ?"
|
||||
sql = (
|
||||
"update pictures set blocks = ?, blocks2 = ?, blocks3 = ?, blocks4 = ?, blocks5 = ?, blocks6 = ?, "
|
||||
"blocks7 = ?, blocks8 = ?, mtime_ns = ?"
|
||||
"where path = ?"
|
||||
)
|
||||
else:
|
||||
sql = "insert into pictures(blocks,mtime,path) values(?,?,?)"
|
||||
sql = (
|
||||
"insert into pictures(blocks,blocks2,blocks3,blocks4,blocks5,blocks6,blocks7,blocks8,mtime_ns,path) "
|
||||
"values(?,?,?,?,?,?,?,?,?,?)"
|
||||
)
|
||||
try:
|
||||
self.con.execute(sql, [blocks, mtime, path_str])
|
||||
self.con.execute(sql, blocks + [mtime, path_str])
|
||||
except sqlite.OperationalError:
|
||||
logging.warning("Picture cache could not set value for key %r", path_str)
|
||||
except sqlite.DatabaseError as e:
|
||||
logging.warning("DatabaseError while setting value for key %r: %s", path_str, str(e))
|
||||
|
||||
def _create_con(self, second_try=False):
|
||||
def create_tables():
|
||||
logging.debug("Creating picture cache tables.")
|
||||
self.con.execute("drop table if exists pictures")
|
||||
self.con.execute("drop index if exists idx_path")
|
||||
self.con.execute("create table pictures(path TEXT, mtime INTEGER, blocks TEXT)")
|
||||
self.con.execute("create index idx_path on pictures (path)")
|
||||
|
||||
self.con = sqlite.connect(self.dbname, isolation_level=None)
|
||||
try:
|
||||
self.con.execute("select path, mtime, blocks from pictures where 1=2")
|
||||
except sqlite.OperationalError: # new db
|
||||
create_tables()
|
||||
self.con = sqlite.connect(self.dbname, isolation_level=None)
|
||||
self._check_upgrade()
|
||||
except sqlite.DatabaseError as e: # corrupted db
|
||||
if second_try:
|
||||
raise # Something really strange is happening
|
||||
@@ -93,6 +111,25 @@ class SqliteCache:
|
||||
os.remove(self.dbname)
|
||||
self._create_con(second_try=True)
|
||||
|
||||
def _check_upgrade(self) -> None:
|
||||
with self.con as conn:
|
||||
has_schema = conn.execute(
|
||||
"SELECT NAME FROM sqlite_master WHERE type='table' AND name='schema_version'"
|
||||
).fetchall()
|
||||
version = None
|
||||
if has_schema:
|
||||
version = conn.execute("SELECT version FROM schema_version ORDER BY version DESC").fetchone()[0]
|
||||
else:
|
||||
conn.execute("CREATE TABLE schema_version (version int PRIMARY KEY, description TEXT)")
|
||||
if version != self.schema_version:
|
||||
conn.execute(self.drop_table_query)
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO schema_version VALUES (:version, :description)",
|
||||
{"version": self.schema_version, "description": self.schema_version_description},
|
||||
)
|
||||
conn.execute(self.create_table_query)
|
||||
conn.execute(self.create_index_query)
|
||||
|
||||
def clear(self):
|
||||
self.close()
|
||||
if self.dbname != ":memory:":
|
||||
@@ -118,9 +155,28 @@ class SqliteCache:
|
||||
raise ValueError(path)
|
||||
|
||||
def get_multiple(self, rowids):
|
||||
sql = "select rowid, blocks from pictures where rowid in (%s)" % ",".join(map(str, rowids))
|
||||
ids = ",".join(map(str, rowids))
|
||||
sql = (
|
||||
"select rowid, blocks, blocks2, blocks3, blocks4, blocks5, blocks6, blocks7, blocks8 "
|
||||
f"from pictures where rowid in ({ids})"
|
||||
)
|
||||
cur = self.con.execute(sql)
|
||||
return ((rowid, string_to_colors(blocks)) for rowid, blocks in cur)
|
||||
return (
|
||||
(
|
||||
rowid,
|
||||
[
|
||||
bytes_to_colors(blocks),
|
||||
bytes_to_colors(blocks2),
|
||||
bytes_to_colors(blocks3),
|
||||
bytes_to_colors(blocks4),
|
||||
bytes_to_colors(blocks5),
|
||||
bytes_to_colors(blocks6),
|
||||
bytes_to_colors(blocks7),
|
||||
bytes_to_colors(blocks8),
|
||||
],
|
||||
)
|
||||
for rowid, blocks, blocks2, blocks3, blocks4, blocks5, blocks6, blocks7, blocks8 in cur
|
||||
)
|
||||
|
||||
def purge_outdated(self):
|
||||
"""Go through the cache and purge outdated records.
|
||||
@@ -129,12 +185,12 @@ class SqliteCache:
|
||||
the db.
|
||||
"""
|
||||
todelete = []
|
||||
sql = "select rowid, path, mtime from pictures"
|
||||
sql = "select rowid, path, mtime_ns from pictures"
|
||||
cur = self.con.execute(sql)
|
||||
for rowid, path_str, mtime in cur:
|
||||
if mtime and op.exists(path_str):
|
||||
for rowid, path_str, mtime_ns in cur:
|
||||
if mtime_ns and op.exists(path_str):
|
||||
picture_mtime = os.stat(path_str).st_mtime
|
||||
if int(picture_mtime) <= mtime:
|
||||
if int(picture_mtime) <= mtime_ns:
|
||||
# not outdated
|
||||
continue
|
||||
todelete.append(rowid)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2014-03-15
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
#
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import plistlib
|
||||
|
||||
|
||||
class IPhotoPlistParser(plistlib._PlistParser):
|
||||
"""A parser for iPhoto plists.
|
||||
|
||||
iPhoto plists tend to be malformed, so we have to subclass the built-in parser to be a bit more
|
||||
lenient.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
plistlib._PlistParser.__init__(self, use_builtin_types=True, dict_type=dict)
|
||||
# For debugging purposes, we remember the last bit of data to be analyzed so that we can
|
||||
# log it in case of an exception
|
||||
self.lastdata = ""
|
||||
|
||||
def get_data(self):
|
||||
self.lastdata = plistlib._PlistParser.get_data(self)
|
||||
return self.lastdata
|
||||
|
||||
def end_integer(self):
|
||||
try:
|
||||
self.add_object(int(self.get_data()))
|
||||
except ValueError:
|
||||
self.add_object(0)
|
||||
@@ -15,7 +15,8 @@ from hscommon.trans import tr
|
||||
from hscommon.jobprogress import job
|
||||
|
||||
from core.engine import Match
|
||||
from .block import avgdiff, DifferentBlockCountError, NoBlocksError
|
||||
from core.pe.block import avgdiff, DifferentBlockCountError, NoBlocksError
|
||||
from core.pe.cache_sqlite import SqliteCache
|
||||
|
||||
# OPTIMIZATION NOTES:
|
||||
# The bottleneck of the matching phase is CPU, which is why we use multiprocessing. However, another
|
||||
@@ -27,7 +28,7 @@ from .block import avgdiff, DifferentBlockCountError, NoBlocksError
|
||||
# to files in other chunks. So chunkifying doesn't save us any actual comparison, but the advantage
|
||||
# is that instead of reading blocks from disk number_of_files**2 times, we read it
|
||||
# number_of_files*number_of_chunks times.
|
||||
# Determining the right chunk size is tricky, bceause if it's too big, too many blocks will be in
|
||||
# Determining the right chunk size is tricky, because if it's too big, too many blocks will be in
|
||||
# memory at the same time and we might end up with memory trashing, which is awfully slow. So,
|
||||
# because our *real* bottleneck is CPU, the chunk size must simply be enough so that the CPU isn't
|
||||
# starved by Disk IOs.
|
||||
@@ -50,17 +51,10 @@ except Exception:
|
||||
|
||||
|
||||
def get_cache(cache_path, readonly=False):
|
||||
if cache_path.endswith("shelve"):
|
||||
from .cache_shelve import ShelveCache
|
||||
|
||||
return ShelveCache(cache_path, readonly=readonly)
|
||||
else:
|
||||
from .cache_sqlite import SqliteCache
|
||||
|
||||
return SqliteCache(cache_path, readonly=readonly)
|
||||
|
||||
|
||||
def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
|
||||
def prepare_pictures(pictures, cache_path, with_dimensions, match_rotated, j=job.nulljob):
|
||||
# The MemoryError handlers in there use logging without first caring about whether or not
|
||||
# there is enough memory left to carry on the operation because it is assumed that the
|
||||
# MemoryError happens when trying to read an image file, which is freed from memory by the
|
||||
@@ -78,16 +72,21 @@ def prepare_pictures(pictures, cache_path, with_dimensions, j=job.nulljob):
|
||||
# entry in iPhoto library.
|
||||
logging.warning("We have a picture with a null path here")
|
||||
continue
|
||||
picture.unicode_path = str(picture.path)
|
||||
logging.debug("Analyzing picture at %s", picture.unicode_path)
|
||||
if with_dimensions:
|
||||
picture.dimensions # pre-read dimensions
|
||||
try:
|
||||
if picture.unicode_path not in cache:
|
||||
blocks = picture.get_blocks(BLOCK_COUNT_PER_SIDE)
|
||||
if picture.unicode_path not in cache or (
|
||||
match_rotated and any(block == [] for block in cache[picture.unicode_path])
|
||||
):
|
||||
if match_rotated:
|
||||
blocks = [picture.get_blocks(BLOCK_COUNT_PER_SIDE, orientation) for orientation in range(1, 9)]
|
||||
else:
|
||||
blocks = [[]] * 8
|
||||
blocks[max(picture.get_orientation() - 1, 0)] = picture.get_blocks(BLOCK_COUNT_PER_SIDE)
|
||||
cache[picture.unicode_path] = blocks
|
||||
prepared.append(picture)
|
||||
except (IOError, ValueError) as e:
|
||||
except (OSError, ValueError) as e:
|
||||
logging.warning(str(e))
|
||||
except MemoryError:
|
||||
logging.warning(
|
||||
@@ -125,13 +124,13 @@ def get_match(first, second, percentage):
|
||||
return Match(first, second, percentage)
|
||||
|
||||
|
||||
def async_compare(ref_ids, other_ids, dbname, threshold, picinfo):
|
||||
def async_compare(ref_ids, other_ids, dbname, threshold, picinfo, match_rotated=False):
|
||||
# The list of ids in ref_ids have to be compared to the list of ids in other_ids. other_ids
|
||||
# can be None. In this case, ref_ids has to be compared with itself
|
||||
# picinfo is a dictionary {pic_id: (dimensions, is_ref)}
|
||||
cache = get_cache(dbname, readonly=True)
|
||||
limit = 100 - threshold
|
||||
ref_pairs = list(cache.get_multiple(ref_ids))
|
||||
ref_pairs = list(cache.get_multiple(ref_ids)) # (rowid, [b, b2, ..., b8])
|
||||
if other_ids is not None:
|
||||
other_pairs = list(cache.get_multiple(other_ids))
|
||||
comparisons_to_do = [(r, o) for r in ref_pairs for o in other_pairs]
|
||||
@@ -144,22 +143,35 @@ def async_compare(ref_ids, other_ids, dbname, threshold, picinfo):
|
||||
if ref_is_ref and other_is_ref:
|
||||
continue
|
||||
if ref_dimensions != other_dimensions:
|
||||
if match_rotated:
|
||||
rotated_ref_dimensions = (ref_dimensions[1], ref_dimensions[0])
|
||||
if rotated_ref_dimensions != other_dimensions:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
orientation_range = 1
|
||||
if match_rotated:
|
||||
orientation_range = 8
|
||||
|
||||
for orientation_ref in range(orientation_range):
|
||||
try:
|
||||
diff = avgdiff(ref_blocks, other_blocks, limit, MIN_ITERATIONS)
|
||||
diff = avgdiff(ref_blocks[orientation_ref], other_blocks[0], limit, MIN_ITERATIONS)
|
||||
percentage = 100 - diff
|
||||
except (DifferentBlockCountError, NoBlocksError):
|
||||
percentage = 0
|
||||
if percentage >= threshold:
|
||||
results.append((ref_id, other_id, percentage))
|
||||
break
|
||||
|
||||
cache.close()
|
||||
return results
|
||||
|
||||
|
||||
def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljob):
|
||||
def getmatches(pictures, cache_path, threshold, match_scaled=False, match_rotated=False, j=job.nulljob):
|
||||
def get_picinfo(p):
|
||||
if match_scaled:
|
||||
return (None, p.is_ref)
|
||||
return ((None, None), p.is_ref)
|
||||
else:
|
||||
return (p.dimensions, p.is_ref)
|
||||
|
||||
@@ -181,7 +193,7 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
|
||||
j.set_progress(comparison_count, progress_msg)
|
||||
|
||||
j = j.start_subjob([3, 7])
|
||||
pictures = prepare_pictures(pictures, cache_path, with_dimensions=not match_scaled, j=j)
|
||||
pictures = prepare_pictures(pictures, cache_path, not match_scaled, match_rotated, j=j)
|
||||
j = j.start_subjob([9, 1], tr("Preparing for matching"))
|
||||
cache = get_cache(cache_path)
|
||||
id2picture = {}
|
||||
@@ -211,7 +223,7 @@ def getmatches(pictures, cache_path, threshold, match_scaled=False, j=job.nulljo
|
||||
picinfo.update({p.cache_id: get_picinfo(p) for p in other_chunk})
|
||||
else:
|
||||
other_ids = None
|
||||
args = (ref_ids, other_ids, cache_path, threshold, picinfo)
|
||||
args = (ref_ids, other_ids, cache_path, threshold, picinfo, match_rotated)
|
||||
async_results.append(pool.apply_async(async_compare, args))
|
||||
collect_results()
|
||||
collect_results(collect_all=True)
|
||||
|
||||
@@ -2,58 +2,36 @@
|
||||
* Created On: 2010-01-30
|
||||
* Copyright 2014 Hardcoded Software (http://www.hardcoded.net)
|
||||
*
|
||||
* This software is licensed under the "BSD" License as described in the "LICENSE" file,
|
||||
* which should be included with this package. The terms are also available at
|
||||
* http://www.hardcoded.net/licenses/bsd_license
|
||||
* This software is licensed under the "BSD" License as described in the
|
||||
* "LICENSE" file, which should be included with this package. The terms are
|
||||
* also available at http://www.hardcoded.net/licenses/bsd_license
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
/* I know that there strtol out there, but it requires a pointer to
|
||||
* a char, which would in turn require me to buffer my chars around,
|
||||
* making the whole process slower.
|
||||
*/
|
||||
static long
|
||||
xchar_to_long(char c)
|
||||
{
|
||||
if ((c >= 48) && (c <= 57)) { /* 0-9 */
|
||||
return c - 48;
|
||||
}
|
||||
else if ((c >= 65) && (c <= 70)) { /* A-F */
|
||||
return c - 55;
|
||||
}
|
||||
else if ((c >= 97) && (c <= 102)) { /* a-f */
|
||||
return c - 87;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
cache_string_to_colors(PyObject *self, PyObject *args)
|
||||
{
|
||||
char *s;
|
||||
Py_ssize_t char_count, color_count, i;
|
||||
static PyObject *cache_bytes_to_colors(PyObject *self, PyObject *args) {
|
||||
char *y;
|
||||
Py_ssize_t char_count, i, color_count;
|
||||
PyObject *result;
|
||||
unsigned long r, g, b;
|
||||
Py_ssize_t ci;
|
||||
PyObject *color_tuple;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "s#", &s, &char_count)) {
|
||||
if (!PyArg_ParseTuple(args, "y#", &y, &char_count)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
color_count = (char_count / 6);
|
||||
color_count = char_count / 3;
|
||||
result = PyList_New(color_count);
|
||||
if (result == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i=0; i<color_count; i++) {
|
||||
long r, g, b;
|
||||
Py_ssize_t ci;
|
||||
PyObject *color_tuple;
|
||||
|
||||
ci = i * 6;
|
||||
r = (xchar_to_long(s[ci]) << 4) + xchar_to_long(s[ci+1]);
|
||||
g = (xchar_to_long(s[ci+2]) << 4) + xchar_to_long(s[ci+3]);
|
||||
b = (xchar_to_long(s[ci+4]) << 4) + xchar_to_long(s[ci+5]);
|
||||
for (i = 0; i < color_count; i++) {
|
||||
ci = i * 3;
|
||||
r = (unsigned char)y[ci];
|
||||
g = (unsigned char)y[ci + 1];
|
||||
b = (unsigned char)y[ci + 2];
|
||||
|
||||
color_tuple = inttuple(3, r, g, b);
|
||||
if (color_tuple == NULL) {
|
||||
@@ -67,13 +45,12 @@ cache_string_to_colors(PyObject *self, PyObject *args)
|
||||
}
|
||||
|
||||
static PyMethodDef CacheMethods[] = {
|
||||
{"string_to_colors", cache_string_to_colors, METH_VARARGS,
|
||||
"Transform the string 's' in a list of 3 sized tuples."},
|
||||
{"bytes_to_colors", cache_bytes_to_colors, METH_VARARGS,
|
||||
"Transform the bytes 's' into a list of 3 sized tuples."},
|
||||
{NULL, NULL, 0, NULL} /* Sentinel */
|
||||
};
|
||||
|
||||
static struct PyModuleDef CacheDef = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
static struct PyModuleDef CacheDef = {PyModuleDef_HEAD_INIT,
|
||||
"_cache",
|
||||
NULL,
|
||||
-1,
|
||||
@@ -81,12 +58,9 @@ static struct PyModuleDef CacheDef = {
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL
|
||||
};
|
||||
NULL};
|
||||
|
||||
PyObject *
|
||||
PyInit__cache(void)
|
||||
{
|
||||
PyObject *PyInit__cache(void) {
|
||||
PyObject *m = PyModule_Create(&CacheDef);
|
||||
if (m == NULL) {
|
||||
return NULL;
|
||||
|
||||
@@ -32,7 +32,7 @@ PyObject* inttuple(int n, ...)
|
||||
result = PyTuple_New(n);
|
||||
|
||||
for (i=0; i<n; i++) {
|
||||
pnumber = PyLong_FromLong(va_arg(numbers, long));
|
||||
pnumber = PyLong_FromUnsignedLong(va_arg(numbers, long));
|
||||
if (pnumber == NULL) {
|
||||
Py_DECREF(result);
|
||||
return NULL;
|
||||
|
||||
@@ -9,7 +9,7 @@ from hscommon.util import get_file_ext, format_size
|
||||
|
||||
from core.util import format_timestamp, format_perc, format_dupe_count
|
||||
from core import fs
|
||||
from . import exif
|
||||
from core.pe import exif
|
||||
|
||||
# This global value is set by the platform-specific subclasser of the Photo base class
|
||||
PLAT_SPECIFIC_PHOTO_CLASS = None
|
||||
@@ -29,7 +29,7 @@ class Photo(fs.File):
|
||||
__slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys())
|
||||
|
||||
# These extensions are supported on all platforms
|
||||
HANDLED_EXTS = {"png", "jpg", "jpeg", "gif", "bmp", "tiff", "tif"}
|
||||
HANDLED_EXTS = {"png", "jpg", "jpeg", "gif", "bmp", "tiff", "tif", "webp"}
|
||||
|
||||
def _plat_get_dimensions(self):
|
||||
raise NotImplementedError()
|
||||
@@ -37,7 +37,7 @@ class Photo(fs.File):
|
||||
def _plat_get_blocks(self, block_count_per_side, orientation):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_orientation(self):
|
||||
def get_orientation(self):
|
||||
if not hasattr(self, "_cached_orientation"):
|
||||
try:
|
||||
with self.path.open("rb") as fp:
|
||||
@@ -95,10 +95,13 @@ class Photo(fs.File):
|
||||
fs.File._read_info(self, field)
|
||||
if field == "dimensions":
|
||||
self.dimensions = self._plat_get_dimensions()
|
||||
if self._get_orientation() in {5, 6, 7, 8}:
|
||||
if self.get_orientation() in {5, 6, 7, 8}:
|
||||
self.dimensions = (self.dimensions[1], self.dimensions[0])
|
||||
elif field == "exif_timestamp":
|
||||
self.exif_timestamp = self._get_exif_timestamp()
|
||||
|
||||
def get_blocks(self, block_count_per_side):
|
||||
return self._plat_get_blocks(block_count_per_side, self._get_orientation())
|
||||
def get_blocks(self, block_count_per_side, orientation: int = None):
|
||||
if orientation is None:
|
||||
return self._plat_get_blocks(block_count_per_side, self.get_orientation())
|
||||
else:
|
||||
return self._plat_get_blocks(block_count_per_side, orientation)
|
||||
|
||||
@@ -8,12 +8,13 @@ from hscommon.trans import tr
|
||||
|
||||
from core.scanner import Scanner, ScanType, ScanOption
|
||||
|
||||
from . import matchblock, matchexif
|
||||
from core.pe import matchblock, matchexif
|
||||
|
||||
|
||||
class ScannerPE(Scanner):
|
||||
cache_path = None
|
||||
match_scaled = False
|
||||
match_rotated = False
|
||||
|
||||
@staticmethod
|
||||
def get_scan_options():
|
||||
@@ -29,6 +30,7 @@ class ScannerPE(Scanner):
|
||||
cache_path=self.cache_path,
|
||||
threshold=self.min_match_percentage,
|
||||
match_scaled=self.match_scaled,
|
||||
match_rotated=self.match_rotated,
|
||||
j=j,
|
||||
)
|
||||
elif self.scan_type == ScanType.EXIFTIMESTAMP:
|
||||
|
||||
@@ -43,7 +43,7 @@ class Criterion:
|
||||
|
||||
@property
|
||||
def display(self):
|
||||
return "{} ({})".format(self.category.NAME, self.display_value)
|
||||
return f"{self.category.NAME} ({self.display_value})"
|
||||
|
||||
|
||||
class ValueListCategory(CriterionCategory):
|
||||
@@ -96,6 +96,8 @@ class FilenameCategory(CriterionCategory):
|
||||
DOESNT_END_WITH_NUMBER = 1
|
||||
LONGEST = 2
|
||||
SHORTEST = 3
|
||||
LONGEST_PATH = 4
|
||||
SHORTEST_PATH = 5
|
||||
|
||||
def format_criterion_value(self, value):
|
||||
return {
|
||||
@@ -103,6 +105,8 @@ class FilenameCategory(CriterionCategory):
|
||||
self.DOESNT_END_WITH_NUMBER: tr("Doesn't end with number"),
|
||||
self.LONGEST: tr("Longest"),
|
||||
self.SHORTEST: tr("Shortest"),
|
||||
self.LONGEST_PATH: tr("Longest Path"),
|
||||
self.SHORTEST_PATH: tr("Shortest Path"),
|
||||
}[value]
|
||||
|
||||
def extract_value(self, dupe):
|
||||
@@ -116,6 +120,10 @@ class FilenameCategory(CriterionCategory):
|
||||
return 0 if ends_with_digit else 1
|
||||
else:
|
||||
return 1 if ends_with_digit else 0
|
||||
elif crit_value == self.LONGEST_PATH:
|
||||
return len(str(dupe.folder_path)) * -1
|
||||
elif crit_value == self.SHORTEST_PATH:
|
||||
return len(str(dupe.folder_path))
|
||||
else:
|
||||
value = len(value)
|
||||
if crit_value == self.LONGEST:
|
||||
@@ -130,6 +138,8 @@ class FilenameCategory(CriterionCategory):
|
||||
self.DOESNT_END_WITH_NUMBER,
|
||||
self.LONGEST,
|
||||
self.SHORTEST,
|
||||
self.LONGEST_PATH,
|
||||
self.SHORTEST_PATH,
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import logging
|
||||
import re
|
||||
import os
|
||||
import os.path as op
|
||||
from errno import EISDIR, EACCES
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from hscommon.jobprogress.job import nulljob
|
||||
@@ -17,8 +18,8 @@ from hscommon.conflict import get_conflicted_name
|
||||
from hscommon.util import flatten, nonone, FileOrPath, format_size
|
||||
from hscommon.trans import tr
|
||||
|
||||
from . import engine
|
||||
from .markable import Markable
|
||||
from core import engine
|
||||
from core.markable import Markable
|
||||
|
||||
|
||||
class Results(Markable):
|
||||
@@ -191,7 +192,7 @@ class Results(Markable):
|
||||
self.__filters.append(filter_str)
|
||||
if self.__filtered_dupes is None:
|
||||
self.__filtered_dupes = flatten(g[:] for g in self.groups)
|
||||
self.__filtered_dupes = set(dupe for dupe in self.__filtered_dupes if filter_re.search(str(dupe.path)))
|
||||
self.__filtered_dupes = {dupe for dupe in self.__filtered_dupes if filter_re.search(str(dupe.path))}
|
||||
filtered_groups = set()
|
||||
for dupe in self.__filtered_dupes:
|
||||
filtered_groups.add(self.get_group_of_duplicate(dupe))
|
||||
@@ -301,7 +302,7 @@ class Results(Markable):
|
||||
try:
|
||||
func(dupe)
|
||||
to_remove.append(dupe)
|
||||
except (EnvironmentError, UnicodeEncodeError) as e:
|
||||
except (OSError, UnicodeEncodeError) as e:
|
||||
self.problems.append((dupe, str(e)))
|
||||
if remove_from_results:
|
||||
self.remove_duplicates(to_remove)
|
||||
@@ -324,7 +325,7 @@ class Results(Markable):
|
||||
del self.__group_of_duplicate[dupe]
|
||||
self._remove_mark_flag(dupe)
|
||||
self.__total_count -= 1
|
||||
self.__total_size -= dupe.size
|
||||
self.__total_size = max(0, self.__total_size - dupe.size)
|
||||
if not group:
|
||||
del self.__group_of_duplicate[ref]
|
||||
self.__groups.remove(group)
|
||||
@@ -374,10 +375,10 @@ class Results(Markable):
|
||||
|
||||
try:
|
||||
do_write(outfile)
|
||||
except IOError as e:
|
||||
# If our IOError is because dest is already a directory, we want to handle that. 21 is
|
||||
# the code we get on OS X and Linux, 13 is what we get on Windows.
|
||||
if e.errno in {21, 13}:
|
||||
except OSError as e:
|
||||
# If our OSError is because dest is already a directory, we want to handle that. 21 is
|
||||
# the code we get on OS X and Linux (EISDIR), 13 is what we get on Windows (EACCES).
|
||||
if e.errno in (EISDIR, EACCES):
|
||||
p = str(outfile)
|
||||
dirname, basename = op.split(p)
|
||||
otherfiles = os.listdir(dirname)
|
||||
|
||||
@@ -13,7 +13,7 @@ from hscommon.jobprogress import job
|
||||
from hscommon.util import dedupe, rem_file_ext, get_file_ext
|
||||
from hscommon.trans import tr
|
||||
|
||||
from . import engine
|
||||
from core import engine
|
||||
|
||||
# It's quite ugly to have scan types from all editions all put in the same class, but because there's
|
||||
# there will be some nasty bugs popping up (ScanType is used in core when in should exclusively be
|
||||
@@ -87,8 +87,6 @@ class Scanner:
|
||||
}
|
||||
):
|
||||
j = j.start_subjob([2, 8])
|
||||
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
|
||||
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
|
||||
if self.size_threshold:
|
||||
files = [f for f in files if f.size >= self.size_threshold]
|
||||
if self.large_size_threshold:
|
||||
@@ -171,7 +169,10 @@ class Scanner:
|
||||
matches = [m for m in matches if m.first.path not in toremove or m.second.path not in toremove]
|
||||
if not self.mix_file_kind:
|
||||
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)]
|
||||
matches = [m for m in matches if m.first.path.exists() and m.second.path.exists()]
|
||||
if self.include_exists_check:
|
||||
matches = [m for m in matches if m.first.exists() and m.second.exists()]
|
||||
# Contents already handles ref checks, other scan types might not catch during scan
|
||||
if self.scan_type != ScanType.CONTENTS:
|
||||
matches = [m for m in matches if not (m.first.is_ref and m.second.is_ref)]
|
||||
if ignore_list:
|
||||
matches = [m for m in matches if not ignore_list.are_ignored(str(m.first.path), str(m.second.path))]
|
||||
@@ -212,3 +213,4 @@ class Scanner:
|
||||
large_size_threshold = 0
|
||||
big_file_size_threshold = 0
|
||||
word_weighting = False
|
||||
include_exists_check = True
|
||||
|
||||
@@ -1 +1 @@
|
||||
from . import fs, result_table, scanner # noqa
|
||||
from core.se import fs, result_table, scanner # noqa
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import os
|
||||
import os.path as op
|
||||
import logging
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
@@ -15,10 +16,10 @@ import hscommon.util
|
||||
from hscommon.testutil import eq_, log_calls
|
||||
from hscommon.jobprogress.job import Job
|
||||
|
||||
from .base import TestApp
|
||||
from .results_test import GetTestGroups
|
||||
from .. import app, fs, engine
|
||||
from ..scanner import ScanType
|
||||
from core.tests.base import TestApp
|
||||
from core.tests.results_test import GetTestGroups
|
||||
from core import app, fs, engine
|
||||
from core.scanner import ScanType
|
||||
|
||||
|
||||
def add_fake_files_to_directories(directories, files):
|
||||
@@ -68,10 +69,11 @@ class TestCaseDupeGuru:
|
||||
dgapp = TestApp().app
|
||||
dgapp.directories.add_path(p)
|
||||
[f] = dgapp.directories.get_files()
|
||||
dgapp.copy_or_move(f, True, "some_destination", 0)
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
dgapp.copy_or_move(f, True, tmp_dir, 0)
|
||||
eq_(1, len(hscommon.conflict.smart_copy.calls))
|
||||
call = hscommon.conflict.smart_copy.calls[0]
|
||||
eq_(call["dest_path"], Path("some_destination", "foo"))
|
||||
eq_(call["dest_path"], Path(tmp_dir, "foo"))
|
||||
eq_(call["source_path"], f.path)
|
||||
|
||||
def test_copy_or_move_clean_empty_dirs(self, tmpdir, monkeypatch):
|
||||
@@ -95,7 +97,7 @@ class TestCaseDupeGuru:
|
||||
|
||||
# At some point, any() was used in a wrong way that made Scan() wrongly return 1
|
||||
app = TestApp().app
|
||||
f1, f2 = [FakeFile("foo") for _ in range(2)]
|
||||
f1, f2 = (FakeFile("foo") for _ in range(2))
|
||||
f1.is_ref, f2.is_ref = (False, False)
|
||||
assert not (bool(f1) and bool(f2))
|
||||
add_fake_files_to_directories(app.directories, [f1, f2])
|
||||
|
||||
@@ -10,12 +10,11 @@ from hscommon.util import get_file_ext, format_size
|
||||
from hscommon.gui.column import Column
|
||||
from hscommon.jobprogress.job import nulljob, JobCancelled
|
||||
|
||||
from .. import engine
|
||||
from .. import prioritize
|
||||
from ..engine import getwords
|
||||
from ..app import DupeGuru as DupeGuruBase
|
||||
from ..gui.result_table import ResultTable as ResultTableBase
|
||||
from ..gui.prioritize_dialog import PrioritizeDialog
|
||||
from core import engine, prioritize
|
||||
from core.engine import getwords
|
||||
from core.app import DupeGuru as DupeGuruBase
|
||||
from core.gui.result_table import ResultTable as ResultTableBase
|
||||
from core.gui.prioritize_dialog import PrioritizeDialog
|
||||
|
||||
|
||||
class DupeGuruView:
|
||||
|
||||
@@ -9,7 +9,7 @@ from pytest import raises, skip
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
try:
|
||||
from ..pe.block import avgdiff, getblocks2, NoBlocksError, DifferentBlockCountError
|
||||
from core.pe.block import avgdiff, getblocks2, NoBlocksError, DifferentBlockCountError
|
||||
except ImportError:
|
||||
skip("Can't import the block module, probably hasn't been compiled.")
|
||||
|
||||
|
||||
@@ -10,41 +10,41 @@ from pytest import raises, skip
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
try:
|
||||
from ..pe.cache import colors_to_string, string_to_colors
|
||||
from ..pe.cache_sqlite import SqliteCache
|
||||
from ..pe.cache_shelve import ShelveCache
|
||||
from core.pe.cache import colors_to_bytes, bytes_to_colors
|
||||
from core.pe.cache_sqlite import SqliteCache
|
||||
except ImportError:
|
||||
skip("Can't import the cache module, probably hasn't been compiled.")
|
||||
|
||||
|
||||
class TestCaseColorsToString:
|
||||
def test_no_color(self):
|
||||
eq_("", colors_to_string([]))
|
||||
eq_(b"", colors_to_bytes([]))
|
||||
|
||||
def test_single_color(self):
|
||||
eq_("000000", colors_to_string([(0, 0, 0)]))
|
||||
eq_("010101", colors_to_string([(1, 1, 1)]))
|
||||
eq_("0a141e", colors_to_string([(10, 20, 30)]))
|
||||
eq_(b"\x00\x00\x00", colors_to_bytes([(0, 0, 0)]))
|
||||
eq_(b"\x01\x01\x01", colors_to_bytes([(1, 1, 1)]))
|
||||
eq_(b"\x0a\x14\x1e", colors_to_bytes([(10, 20, 30)]))
|
||||
|
||||
def test_two_colors(self):
|
||||
eq_("000102030405", colors_to_string([(0, 1, 2), (3, 4, 5)]))
|
||||
eq_(b"\x00\x01\x02\x03\x04\x05", colors_to_bytes([(0, 1, 2), (3, 4, 5)]))
|
||||
|
||||
|
||||
class TestCaseStringToColors:
|
||||
def test_empty(self):
|
||||
eq_([], string_to_colors(""))
|
||||
eq_([], bytes_to_colors(b""))
|
||||
|
||||
def test_single_color(self):
|
||||
eq_([(0, 0, 0)], string_to_colors("000000"))
|
||||
eq_([(2, 3, 4)], string_to_colors("020304"))
|
||||
eq_([(10, 20, 30)], string_to_colors("0a141e"))
|
||||
eq_([(0, 0, 0)], bytes_to_colors(b"\x00\x00\x00"))
|
||||
eq_([(2, 3, 4)], bytes_to_colors(b"\x02\x03\x04"))
|
||||
eq_([(10, 20, 30)], bytes_to_colors(b"\x0a\x14\x1e"))
|
||||
|
||||
def test_two_colors(self):
|
||||
eq_([(10, 20, 30), (40, 50, 60)], string_to_colors("0a141e28323c"))
|
||||
eq_([(10, 20, 30), (40, 50, 60)], bytes_to_colors(b"\x0a\x14\x1e\x28\x32\x3c"))
|
||||
|
||||
def test_incomplete_color(self):
|
||||
# don't return anything if it's not a complete color
|
||||
eq_([], string_to_colors("102"))
|
||||
eq_([], bytes_to_colors(b"\x01"))
|
||||
eq_([(1, 2, 3)], bytes_to_colors(b"\x01\x02\x03\x04"))
|
||||
|
||||
|
||||
class BaseTestCaseCache:
|
||||
@@ -59,13 +59,13 @@ class BaseTestCaseCache:
|
||||
|
||||
def test_set_then_retrieve_blocks(self):
|
||||
c = self.get_cache()
|
||||
b = [(0, 0, 0), (1, 2, 3)]
|
||||
b = [[(0, 0, 0), (1, 2, 3)]] * 8
|
||||
c["foo"] = b
|
||||
eq_(b, c["foo"])
|
||||
|
||||
def test_delitem(self):
|
||||
c = self.get_cache()
|
||||
c["foo"] = ""
|
||||
c["foo"] = [[]] * 8
|
||||
del c["foo"]
|
||||
assert "foo" not in c
|
||||
with raises(KeyError):
|
||||
@@ -74,16 +74,16 @@ class BaseTestCaseCache:
|
||||
def test_persistance(self, tmpdir):
|
||||
DBNAME = tmpdir.join("hstest.db")
|
||||
c = self.get_cache(str(DBNAME))
|
||||
c["foo"] = [(1, 2, 3)]
|
||||
c["foo"] = [[(1, 2, 3)]] * 8
|
||||
del c
|
||||
c = self.get_cache(str(DBNAME))
|
||||
eq_([(1, 2, 3)], c["foo"])
|
||||
eq_([[(1, 2, 3)]] * 8, c["foo"])
|
||||
|
||||
def test_filter(self):
|
||||
c = self.get_cache()
|
||||
c["foo"] = ""
|
||||
c["bar"] = ""
|
||||
c["baz"] = ""
|
||||
c["foo"] = [[]] * 8
|
||||
c["bar"] = [[]] * 8
|
||||
c["baz"] = [[]] * 8
|
||||
c.filter(lambda p: p != "bar") # only 'bar' is removed
|
||||
eq_(2, len(c))
|
||||
assert "foo" in c
|
||||
@@ -92,9 +92,9 @@ class BaseTestCaseCache:
|
||||
|
||||
def test_clear(self):
|
||||
c = self.get_cache()
|
||||
c["foo"] = ""
|
||||
c["bar"] = ""
|
||||
c["baz"] = ""
|
||||
c["foo"] = [[]] * 8
|
||||
c["bar"] = [[]] * 8
|
||||
c["baz"] = [[]] * 8
|
||||
c.clear()
|
||||
eq_(0, len(c))
|
||||
assert "foo" not in c
|
||||
@@ -104,7 +104,7 @@ class BaseTestCaseCache:
|
||||
def test_by_id(self):
|
||||
# it's possible to use the cache by referring to the files by their row_id
|
||||
c = self.get_cache()
|
||||
b = [(0, 0, 0), (1, 2, 3)]
|
||||
b = [[(0, 0, 0), (1, 2, 3)]] * 8
|
||||
c["foo"] = b
|
||||
foo_id = c.get_id("foo")
|
||||
eq_(c[foo_id], b)
|
||||
@@ -127,15 +127,10 @@ class TestCaseSqliteCache(BaseTestCaseCache):
|
||||
fp.write("invalid sqlite content")
|
||||
fp.close()
|
||||
c = self.get_cache(dbname) # should not raise a DatabaseError
|
||||
c["foo"] = [(1, 2, 3)]
|
||||
c["foo"] = [[(1, 2, 3)]] * 8
|
||||
del c
|
||||
c = self.get_cache(dbname)
|
||||
eq_(c["foo"], [(1, 2, 3)])
|
||||
|
||||
|
||||
class TestCaseShelveCache(BaseTestCaseCache):
|
||||
def get_cache(self, dbname=None):
|
||||
return ShelveCache(dbname)
|
||||
eq_(c["foo"], [[(1, 2, 3)]] * 8)
|
||||
|
||||
|
||||
class TestCaseCacheSQLEscape:
|
||||
@@ -157,7 +152,7 @@ class TestCaseCacheSQLEscape:
|
||||
|
||||
def test_delitem(self):
|
||||
c = self.get_cache()
|
||||
c["foo'bar"] = []
|
||||
c["foo'bar"] = [[]] * 8
|
||||
try:
|
||||
del c["foo'bar"]
|
||||
except KeyError:
|
||||
|
||||
@@ -14,14 +14,14 @@ from pathlib import Path
|
||||
from hscommon.testutil import eq_
|
||||
from hscommon.plat import ISWINDOWS
|
||||
|
||||
from ..fs import File
|
||||
from ..directories import (
|
||||
from core.fs import File
|
||||
from core.directories import (
|
||||
Directories,
|
||||
DirectoryState,
|
||||
AlreadyThereError,
|
||||
InvalidPathError,
|
||||
)
|
||||
from ..exclude import ExcludeList, ExcludeDict
|
||||
from core.exclude import ExcludeList, ExcludeDict
|
||||
|
||||
|
||||
def create_fake_fs(rootpath):
|
||||
@@ -326,6 +326,7 @@ def test_default_path_state_override(tmpdir):
|
||||
def _default_state_for_path(self, path):
|
||||
if "foobar" in path.parts:
|
||||
return DirectoryState.EXCLUDED
|
||||
return DirectoryState.NORMAL
|
||||
|
||||
d = MyDirectories()
|
||||
p1 = Path(str(tmpdir))
|
||||
|
||||
@@ -10,9 +10,9 @@ from hscommon.jobprogress import job
|
||||
from hscommon.util import first
|
||||
from hscommon.testutil import eq_, log_calls
|
||||
|
||||
from .base import NamedObject
|
||||
from .. import engine
|
||||
from ..engine import (
|
||||
from core.tests.base import NamedObject
|
||||
from core import engine
|
||||
from core.engine import (
|
||||
get_match,
|
||||
getwords,
|
||||
Group,
|
||||
@@ -71,7 +71,10 @@ class TestCasegetwords:
|
||||
|
||||
def test_unicode(self):
|
||||
eq_(["e", "c", "0", "a", "o", "u", "e", "u"], getwords("é ç 0 à ö û è ¤ ù"))
|
||||
eq_(["02", "君のこころは輝いてるかい?", "国木田花丸", "solo", "ver"], getwords("02 君のこころは輝いてるかい? 国木田花丸 Solo Ver"))
|
||||
eq_(
|
||||
["02", "君のこころは輝いてるかい?", "国木田花丸", "solo", "ver"],
|
||||
getwords("02 君のこころは輝いてるかい? 国木田花丸 Solo Ver"),
|
||||
)
|
||||
|
||||
def test_splitter_chars(self):
|
||||
eq_(
|
||||
@@ -271,9 +274,9 @@ class TestCaseBuildWordDict:
|
||||
class TestCaseMergeSimilarWords:
|
||||
def test_some_similar_words(self):
|
||||
d = {
|
||||
"foobar": set([1]),
|
||||
"foobar1": set([2]),
|
||||
"foobar2": set([3]),
|
||||
"foobar": {1},
|
||||
"foobar1": {2},
|
||||
"foobar2": {3},
|
||||
}
|
||||
merge_similar_words(d)
|
||||
eq_(1, len(d))
|
||||
@@ -283,8 +286,8 @@ class TestCaseMergeSimilarWords:
|
||||
class TestCaseReduceCommonWords:
|
||||
def test_typical(self):
|
||||
d = {
|
||||
"foo": set([NamedObject("foo bar", True) for _ in range(50)]),
|
||||
"bar": set([NamedObject("foo bar", True) for _ in range(49)]),
|
||||
"foo": {NamedObject("foo bar", True) for _ in range(50)},
|
||||
"bar": {NamedObject("foo bar", True) for _ in range(49)},
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
assert "foo" not in d
|
||||
@@ -293,7 +296,7 @@ class TestCaseReduceCommonWords:
|
||||
def test_dont_remove_objects_with_only_common_words(self):
|
||||
d = {
|
||||
"common": set([NamedObject("common uncommon", True) for _ in range(50)] + [NamedObject("common", True)]),
|
||||
"uncommon": set([NamedObject("common uncommon", True)]),
|
||||
"uncommon": {NamedObject("common uncommon", True)},
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
eq_(1, len(d["common"]))
|
||||
@@ -302,7 +305,7 @@ class TestCaseReduceCommonWords:
|
||||
def test_values_still_are_set_instances(self):
|
||||
d = {
|
||||
"common": set([NamedObject("common uncommon", True) for _ in range(50)] + [NamedObject("common", True)]),
|
||||
"uncommon": set([NamedObject("common uncommon", True)]),
|
||||
"uncommon": {NamedObject("common uncommon", True)},
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
assert isinstance(d["common"], set)
|
||||
@@ -312,9 +315,9 @@ class TestCaseReduceCommonWords:
|
||||
# If a word has been removed by the reduce, an object in a subsequent common word that
|
||||
# contains the word that has been removed would cause a KeyError.
|
||||
d = {
|
||||
"foo": set([NamedObject("foo bar baz", True) for _ in range(50)]),
|
||||
"bar": set([NamedObject("foo bar baz", True) for _ in range(50)]),
|
||||
"baz": set([NamedObject("foo bar baz", True) for _ in range(49)]),
|
||||
"foo": {NamedObject("foo bar baz", True) for _ in range(50)},
|
||||
"bar": {NamedObject("foo bar baz", True) for _ in range(50)},
|
||||
"baz": {NamedObject("foo bar baz", True) for _ in range(49)},
|
||||
}
|
||||
try:
|
||||
reduce_common_words(d, 50)
|
||||
@@ -328,7 +331,7 @@ class TestCaseReduceCommonWords:
|
||||
o.words = [["foo", "bar"], ["baz"]]
|
||||
return o
|
||||
|
||||
d = {"foo": set([create_it() for _ in range(50)])}
|
||||
d = {"foo": {create_it() for _ in range(50)}}
|
||||
try:
|
||||
reduce_common_words(d, 50)
|
||||
except TypeError:
|
||||
@@ -343,7 +346,7 @@ class TestCaseReduceCommonWords:
|
||||
d = {
|
||||
"foo": set([NamedObject("foo bar baz", True) for _ in range(49)] + [only_common]),
|
||||
"bar": set([NamedObject("foo bar baz", True) for _ in range(49)] + [only_common]),
|
||||
"baz": set([NamedObject("foo bar baz", True) for _ in range(49)]),
|
||||
"baz": {NamedObject("foo bar baz", True) for _ in range(49)},
|
||||
}
|
||||
reduce_common_words(d, 50)
|
||||
eq_(1, len(d["foo"]))
|
||||
@@ -884,7 +887,7 @@ class TestCaseGetGroups:
|
||||
# If, with a (A, B, C, D) set, all match with A, but C and D don't match with B and that the
|
||||
# (A, B) match is the highest (thus resulting in an (A, B) group), still match C and D
|
||||
# in a separate group instead of discarding them.
|
||||
A, B, C, D = [NamedObject() for _ in range(4)]
|
||||
A, B, C, D = (NamedObject() for _ in range(4))
|
||||
m1 = Match(A, B, 90) # This is the strongest "A" match
|
||||
m2 = Match(A, C, 80) # Because C doesn't match with B, it won't be in the group
|
||||
m3 = Match(A, D, 80) # Same thing for D
|
||||
|
||||
@@ -10,8 +10,8 @@ from xml.etree import ElementTree as ET
|
||||
from hscommon.testutil import eq_
|
||||
from hscommon.plat import ISWINDOWS
|
||||
|
||||
from .base import DupeGuru
|
||||
from ..exclude import ExcludeList, ExcludeDict, default_regexes, AlreadyThereException
|
||||
from core.tests.base import DupeGuru
|
||||
from core.exclude import ExcludeList, ExcludeDict, default_regexes, AlreadyThereException
|
||||
|
||||
from re import error
|
||||
|
||||
@@ -289,8 +289,8 @@ class TestCaseListEmptyUnion(TestCaseListEmpty):
|
||||
compiled = [x for x in self.exclude_list.compiled]
|
||||
assert regex not in compiled
|
||||
# Need to escape both to get the same strings after compilation
|
||||
compiled_escaped = set([x.encode("unicode-escape").decode() for x in compiled[0].pattern.split("|")])
|
||||
default_escaped = set([x.encode("unicode-escape").decode() for x in default_regexes])
|
||||
compiled_escaped = {x.encode("unicode-escape").decode() for x in compiled[0].pattern.split("|")}
|
||||
default_escaped = {x.encode("unicode-escape").decode() for x in default_regexes}
|
||||
assert compiled_escaped == default_escaped
|
||||
eq_(len(default_regexes), len(compiled[0].pattern.split("|")))
|
||||
|
||||
@@ -366,8 +366,8 @@ class TestCaseDictEmptyUnion(TestCaseDictEmpty):
|
||||
compiled = [x for x in self.exclude_list.compiled]
|
||||
assert regex not in compiled
|
||||
# Need to escape both to get the same strings after compilation
|
||||
compiled_escaped = set([x.encode("unicode-escape").decode() for x in compiled[0].pattern.split("|")])
|
||||
default_escaped = set([x.encode("unicode-escape").decode() for x in default_regexes])
|
||||
compiled_escaped = {x.encode("unicode-escape").decode() for x in compiled[0].pattern.split("|")}
|
||||
default_escaped = {x.encode("unicode-escape").decode() for x in default_regexes}
|
||||
assert compiled_escaped == default_escaped
|
||||
eq_(len(default_regexes), len(compiled[0].pattern.split("|")))
|
||||
|
||||
|
||||
@@ -6,6 +6,16 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import typing
|
||||
from os import urandom
|
||||
|
||||
from pathlib import Path
|
||||
from hscommon.testutil import eq_
|
||||
from core.tests.directories_test import create_fake_fs
|
||||
|
||||
from core import fs
|
||||
|
||||
hasher: typing.Callable
|
||||
try:
|
||||
import xxhash
|
||||
|
||||
@@ -15,14 +25,6 @@ except ImportError:
|
||||
|
||||
hasher = hashlib.md5
|
||||
|
||||
from os import urandom
|
||||
|
||||
from pathlib import Path
|
||||
from hscommon.testutil import eq_
|
||||
from core.tests.directories_test import create_fake_fs
|
||||
|
||||
from .. import fs
|
||||
|
||||
|
||||
def create_fake_fs_with_random_data(rootpath):
|
||||
rootpath = rootpath.joinpath("fs")
|
||||
|
||||
@@ -10,7 +10,7 @@ from xml.etree import ElementTree as ET
|
||||
from pytest import raises
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
from ..ignore import IgnoreList
|
||||
from core.ignore import IgnoreList
|
||||
|
||||
|
||||
def test_empty():
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
from ..markable import MarkableList, Markable
|
||||
from core.markable import MarkableList, Markable
|
||||
|
||||
|
||||
def gen():
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
import os.path as op
|
||||
from itertools import combinations
|
||||
|
||||
from .base import TestApp, NamedObject, with_app, eq_
|
||||
from ..engine import Group, Match
|
||||
from core.tests.base import TestApp, NamedObject, with_app, eq_
|
||||
from core.engine import Group, Match
|
||||
|
||||
no = NamedObject
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import TestApp, GetTestGroups
|
||||
from core.tests.base import TestApp, GetTestGroups
|
||||
|
||||
|
||||
def app_with_results():
|
||||
|
||||
@@ -12,10 +12,9 @@ from xml.etree import ElementTree as ET
|
||||
from pytest import raises
|
||||
from hscommon.testutil import eq_
|
||||
from hscommon.util import first
|
||||
|
||||
from .. import engine
|
||||
from .base import NamedObject, GetTestGroups, DupeGuru
|
||||
from ..results import Results
|
||||
from core import engine
|
||||
from core.tests.base import NamedObject, GetTestGroups, DupeGuru
|
||||
from core.results import Results
|
||||
|
||||
|
||||
class TestCaseResultsEmpty:
|
||||
@@ -337,7 +336,7 @@ class TestCaseResultsMarkings:
|
||||
def log_object(o):
|
||||
log.append(o)
|
||||
if o is self.objects[1]:
|
||||
raise EnvironmentError("foobar")
|
||||
raise OSError("foobar")
|
||||
|
||||
log = []
|
||||
self.results.mark_all()
|
||||
@@ -464,7 +463,7 @@ class TestCaseResultsXML:
|
||||
eq_(6, len(g1))
|
||||
eq_(3, len([c for c in g1 if c.tag == "file"]))
|
||||
eq_(3, len([c for c in g1 if c.tag == "match"]))
|
||||
d1, d2, d3 = [c for c in g1 if c.tag == "file"]
|
||||
d1, d2, d3 = (c for c in g1 if c.tag == "file")
|
||||
eq_(op.join("basepath", "foo bar"), d1.get("path"))
|
||||
eq_(op.join("basepath", "bar bleh"), d2.get("path"))
|
||||
eq_(op.join("basepath", "foo bleh"), d3.get("path"))
|
||||
@@ -477,7 +476,7 @@ class TestCaseResultsXML:
|
||||
eq_(3, len(g2))
|
||||
eq_(2, len([c for c in g2 if c.tag == "file"]))
|
||||
eq_(1, len([c for c in g2 if c.tag == "match"]))
|
||||
d1, d2 = [c for c in g2 if c.tag == "file"]
|
||||
d1, d2 = (c for c in g2 if c.tag == "file")
|
||||
eq_(op.join("basepath", "ibabtu"), d1.get("path"))
|
||||
eq_(op.join("basepath", "ibabtu"), d2.get("path"))
|
||||
eq_("n", d1.get("is_ref"))
|
||||
|
||||
@@ -10,13 +10,14 @@ from hscommon.jobprogress import job
|
||||
from pathlib import Path
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
from .. import fs
|
||||
from ..engine import getwords, Match
|
||||
from ..ignore import IgnoreList
|
||||
from ..scanner import Scanner, ScanType
|
||||
from ..me.scanner import ScannerME
|
||||
from core import fs
|
||||
from core.engine import getwords, Match
|
||||
from core.ignore import IgnoreList
|
||||
from core.scanner import Scanner, ScanType
|
||||
from core.me.scanner import ScannerME
|
||||
|
||||
|
||||
# TODO update this to be able to inherit from fs.File
|
||||
class NamedObject:
|
||||
def __init__(self, name="foobar", size=1, path=None):
|
||||
if path is None:
|
||||
@@ -29,7 +30,10 @@ class NamedObject:
|
||||
self.words = getwords(name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<NamedObject %r %r>" % (self.name, self.path)
|
||||
return "<NamedObject {!r} {!r}>".format(self.name, self.path)
|
||||
|
||||
def exists(self):
|
||||
return self.path.exists()
|
||||
|
||||
|
||||
no = NamedObject
|
||||
@@ -238,12 +242,12 @@ def test_content_scan_doesnt_put_digest_in_words_at_the_end(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.CONTENTS
|
||||
f = [no("foo"), no("bar")]
|
||||
f[0].digest = f[0].digest_partial = f[
|
||||
0
|
||||
].digest_samples = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
f[1].digest = f[1].digest_partial = f[
|
||||
1
|
||||
].digest_samples = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
f[0].digest = f[0].digest_partial = f[0].digest_samples = (
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
)
|
||||
f[1].digest = f[1].digest_partial = f[1].digest_samples = (
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
)
|
||||
r = s.get_dupe_groups(f)
|
||||
# FIXME looks like we are missing something here?
|
||||
r[0]
|
||||
@@ -336,7 +340,7 @@ def test_tag_scan(fake_fileexists):
|
||||
def test_tag_with_album_scan(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.TAG
|
||||
s.scanned_tags = set(["artist", "album", "title"])
|
||||
s.scanned_tags = {"artist", "album", "title"}
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o3 = no("bleh")
|
||||
@@ -356,7 +360,7 @@ def test_tag_with_album_scan(fake_fileexists):
|
||||
def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.TAG
|
||||
s.scanned_tags = set(["artist", "album", "title"])
|
||||
s.scanned_tags = {"artist", "album", "title"}
|
||||
s.min_match_percentage = 50
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
@@ -373,7 +377,7 @@ def test_that_dash_in_tags_dont_create_new_fields(fake_fileexists):
|
||||
def test_tag_scan_with_different_scanned(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.TAG
|
||||
s.scanned_tags = set(["track", "year"])
|
||||
s.scanned_tags = {"track", "year"}
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.artist = "The White Stripes"
|
||||
@@ -391,7 +395,7 @@ def test_tag_scan_with_different_scanned(fake_fileexists):
|
||||
def test_tag_scan_only_scans_existing_tags(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.TAG
|
||||
s.scanned_tags = set(["artist", "foo"])
|
||||
s.scanned_tags = {"artist", "foo"}
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.artist = "The White Stripes"
|
||||
@@ -405,7 +409,7 @@ def test_tag_scan_only_scans_existing_tags(fake_fileexists):
|
||||
def test_tag_scan_converts_to_str(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.TAG
|
||||
s.scanned_tags = set(["track"])
|
||||
s.scanned_tags = {"track"}
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.track = 42
|
||||
@@ -420,7 +424,7 @@ def test_tag_scan_converts_to_str(fake_fileexists):
|
||||
def test_tag_scan_non_ascii(fake_fileexists):
|
||||
s = Scanner()
|
||||
s.scan_type = ScanType.TAG
|
||||
s.scanned_tags = set(["title"])
|
||||
s.scanned_tags = {"title"}
|
||||
o1 = no("foo")
|
||||
o2 = no("bar")
|
||||
o1.title = "foobar\u00e9"
|
||||
|
||||
@@ -1,3 +1,21 @@
|
||||
=== 4.3.1 (2022-07-08)
|
||||
* Fix issue where cache db exceptions could prevent files being hashed (#1015)
|
||||
* Add extra guard for non-zero length files without digests to prevent false duplicates
|
||||
* Update Italian translations
|
||||
|
||||
=== 4.3.0 (2022-07-01)
|
||||
* Redirect stdout from custom command to the log files (#1008)
|
||||
* Update translations
|
||||
* Fix typo in debian control file (#989)
|
||||
* Add option to profile scans
|
||||
* Update fs.py to optimize stat() calls
|
||||
* Fix Error when delete after scan (#988)
|
||||
* Update directory scanning to use os.scandir() and DirEntry objects
|
||||
* Improve performance of Directories.get_state()
|
||||
* Migrate from hscommon.path to pathlib
|
||||
* Switch file hashing to xxhash with fallback to md5
|
||||
* Add update check feature to about box
|
||||
|
||||
=== 4.2.1 (2022-03-25)
|
||||
* Default to English on unsupported system language (#976)
|
||||
* Fix image viewer zoom datatype issue (#978)
|
||||
|
||||
@@ -24,7 +24,7 @@ Development process
|
||||
* `Issue Tracker`_
|
||||
* `Issue labels meaning`_
|
||||
|
||||
dupeGuru's source code is on Github and thus managed in a Git repository. At all times, you should
|
||||
dupeGuru's source code is on GitHub and thus managed in a Git repository. At all times, you should
|
||||
be able to build from source a fresh checkout of the ``master`` branch using instructions from the
|
||||
``README.md`` file at the root of this project. If you can't, it's a bug. Please report it.
|
||||
|
||||
@@ -61,7 +61,7 @@ It's the same thing with feature requests. Description of a feature request, whe
|
||||
already been given to how such a feature would fit in the current design, are precious to developers
|
||||
and help them figure out a clear roadmap for the project.
|
||||
|
||||
So, even if you're not a developer, you can always open a Github account and create/comment issues.
|
||||
So, even if you're not a developer, you can always open a GitHub account and create/comment issues.
|
||||
Your contribution will be much appreciated.
|
||||
|
||||
**Documentation**. This is a bit trickier because dupeGuru's documentation is written with a rather
|
||||
|
||||
@@ -15,4 +15,3 @@ hscommon.gui.progress_window
|
||||
.. autoclass:: ProgressWindowView
|
||||
:members:
|
||||
:private-members:
|
||||
|
||||
|
||||
@@ -15,4 +15,3 @@ hscommon.gui.tree
|
||||
.. autoclass:: Node
|
||||
:members:
|
||||
:private-members:
|
||||
|
||||
|
||||
@@ -13,4 +13,3 @@ hscommon
|
||||
util
|
||||
jobprogress/*
|
||||
gui/*
|
||||
|
||||
|
||||
@@ -14,4 +14,3 @@ hscommon.jobprogress.job
|
||||
|
||||
.. autoclass:: NullJob
|
||||
:members:
|
||||
|
||||
|
||||
@@ -9,4 +9,3 @@ hscommon.jobprogress.performer
|
||||
|
||||
.. autoclass:: ThreadedJobPerformer
|
||||
:members:
|
||||
|
||||
|
||||
@@ -30,8 +30,8 @@ that makes sure that you will **always** keep at least one member of the duplica
|
||||
How can I report a bug a suggest a feature?
|
||||
-------------------------------------------
|
||||
|
||||
dupeGuru is hosted on `Github`_ and it's also where issues are tracked. The best way to report a
|
||||
bug or suggest a feature is to sign up on Github and `open an issue`_.
|
||||
dupeGuru is hosted on `GitHub`_ and it's also where issues are tracked. The best way to report a
|
||||
bug or suggest a feature is to sign up on GitHub and `open an issue`_.
|
||||
|
||||
The mark box of a file I want to delete is disabled. What must I do?
|
||||
--------------------------------------------------------------------
|
||||
@@ -176,6 +176,5 @@ Preferences are stored elsewhere:
|
||||
* Linux: ``~/.config/Hardcoded Software/dupeGuru.conf``
|
||||
* Mac OS X: In the built-in ``defaults`` system, as ``com.hardcoded-software.dupeguru``
|
||||
|
||||
.. _Github: https://github.com/arsenetar/dupeguru
|
||||
.. _GitHub: https://github.com/arsenetar/dupeguru
|
||||
.. _open an issue: https://github.com/arsenetar/dupeguru/wiki/issue-labels
|
||||
|
||||
|
||||
@@ -14,6 +14,10 @@ Preferences
|
||||
If you check this box, pictures of different dimensions will be allowed in the same
|
||||
duplicate group.
|
||||
|
||||
**Match pictures of different rotations:**
|
||||
If you check this box, pictures of different rotations will be allowed in the same
|
||||
duplicate group.
|
||||
|
||||
.. _filter-hardness:
|
||||
|
||||
**Filter Hardness:**
|
||||
|
||||
@@ -12,4 +12,3 @@
|
||||
* Եթե համոզված եք, որ կրկնօրինակը արդյունքներում կա, ապա սեղմեք **Խմբագրել-->Նշել բոլորը**, և ապա **Գործողություններ-->Ուղարկել Նշվածը Աղբարկղ**:
|
||||
|
||||
Սա միայն բազային ստուգում է: Կան բազմաթիվ կարգավորումներ, որոնք հնարավորություն են տալիս նշելու տարբեր արդյունքներ և մի քանի եղանակներ արդյունքների փոփոխման: Մանրամասների համար կարդացեք Օգնության ֆայլը:
|
||||
|
||||
|
||||
@@ -23,4 +23,3 @@ dupeGuru-ը փորձում է որոշել, թե որ կրկնօրինակներ
|
||||
մեծագույն ֆայլը և եթե երկու կամ ավելի ֆայլեր ունեն նույն չափը, ապա մեկը ունի ֆայլի անուն, որը
|
||||
չի ավարտվում թվով, կօգտագործվի: Երբ փաստարկի արդյունքը կապված է, կարգը, որի սխալները
|
||||
նախկինում էին, խումբը պետք է օգտագործվի:
|
||||
|
||||
|
||||
@@ -114,4 +114,3 @@
|
||||
Якщо все це не так, `контакт УГ підтримки <http://www.hardcoded.net/support>`_, ми зрозуміти це.
|
||||
|
||||
.. todo:: This FAQ qestion is outdated, see english version.
|
||||
|
||||
|
||||
5
hscommon/.gitignore
vendored
5
hscommon/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
*.pyc
|
||||
*.mo
|
||||
*.so
|
||||
.DS_Store
|
||||
/docs_html
|
||||
@@ -9,6 +9,7 @@
|
||||
"""This module is a collection of function to help in HS apps build process.
|
||||
"""
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import os
|
||||
import sys
|
||||
import os.path as op
|
||||
@@ -20,18 +21,19 @@ import re
|
||||
import importlib
|
||||
from datetime import datetime
|
||||
import glob
|
||||
from typing import Any, AnyStr, Callable, Dict, List, Union
|
||||
|
||||
from .plat import ISWINDOWS
|
||||
from hscommon.plat import ISWINDOWS
|
||||
|
||||
|
||||
def print_and_do(cmd):
|
||||
def print_and_do(cmd: str) -> int:
|
||||
"""Prints ``cmd`` and executes it in the shell."""
|
||||
print(cmd)
|
||||
p = Popen(cmd, shell=True)
|
||||
return p.wait()
|
||||
|
||||
|
||||
def _perform(src, dst, action, actionname):
|
||||
def _perform(src: os.PathLike, dst: os.PathLike, action: Callable, actionname: str) -> None:
|
||||
if not op.lexists(src):
|
||||
print("Copying %s failed: it doesn't exist." % src)
|
||||
return
|
||||
@@ -40,34 +42,26 @@ def _perform(src, dst, action, actionname):
|
||||
shutil.rmtree(dst)
|
||||
else:
|
||||
os.remove(dst)
|
||||
print("%s %s --> %s" % (actionname, src, dst))
|
||||
print("{} {} --> {}".format(actionname, src, dst))
|
||||
action(src, dst)
|
||||
|
||||
|
||||
def copy_file_or_folder(src, dst):
|
||||
def copy_file_or_folder(src: os.PathLike, dst: os.PathLike) -> None:
|
||||
if op.isdir(src):
|
||||
shutil.copytree(src, dst, symlinks=True)
|
||||
else:
|
||||
shutil.copy(src, dst)
|
||||
|
||||
|
||||
def move(src, dst):
|
||||
def move(src: os.PathLike, dst: os.PathLike) -> None:
|
||||
_perform(src, dst, os.rename, "Moving")
|
||||
|
||||
|
||||
def copy(src, dst):
|
||||
def copy(src: os.PathLike, dst: os.PathLike) -> None:
|
||||
_perform(src, dst, copy_file_or_folder, "Copying")
|
||||
|
||||
|
||||
def symlink(src, dst):
|
||||
_perform(src, dst, os.symlink, "Symlinking")
|
||||
|
||||
|
||||
def hardlink(src, dst):
|
||||
_perform(src, dst, os.link, "Hardlinking")
|
||||
|
||||
|
||||
def _perform_on_all(pattern, dst, action):
|
||||
def _perform_on_all(pattern: AnyStr, dst: os.PathLike, action: Callable) -> None:
|
||||
# pattern is a glob pattern, example "folder/foo*". The file is moved directly in dst, no folder
|
||||
# structure from src is kept.
|
||||
filenames = glob.glob(pattern)
|
||||
@@ -76,42 +70,35 @@ def _perform_on_all(pattern, dst, action):
|
||||
action(fn, destpath)
|
||||
|
||||
|
||||
def move_all(pattern, dst):
|
||||
def move_all(pattern: AnyStr, dst: os.PathLike) -> None:
|
||||
_perform_on_all(pattern, dst, move)
|
||||
|
||||
|
||||
def copy_all(pattern, dst):
|
||||
def copy_all(pattern: AnyStr, dst: os.PathLike) -> None:
|
||||
_perform_on_all(pattern, dst, copy)
|
||||
|
||||
|
||||
def ensure_empty_folder(path):
|
||||
"""Make sure that the path exists and that it's an empty folder."""
|
||||
if op.exists(path):
|
||||
shutil.rmtree(path)
|
||||
os.mkdir(path)
|
||||
|
||||
|
||||
def filereplace(filename, outfilename=None, **kwargs):
|
||||
def filereplace(filename: os.PathLike, outfilename: Union[os.PathLike, None] = None, **kwargs) -> None:
|
||||
"""Reads `filename`, replaces all {variables} in kwargs, and writes the result to `outfilename`."""
|
||||
if outfilename is None:
|
||||
outfilename = filename
|
||||
fp = open(filename, "rt", encoding="utf-8")
|
||||
fp = open(filename, encoding="utf-8")
|
||||
contents = fp.read()
|
||||
fp.close()
|
||||
# We can't use str.format() because in some files, there might be {} characters that mess with it.
|
||||
for key, item in kwargs.items():
|
||||
contents = contents.replace("{{{}}}".format(key), item)
|
||||
contents = contents.replace(f"{{{key}}}", item)
|
||||
fp = open(outfilename, "wt", encoding="utf-8")
|
||||
fp.write(contents)
|
||||
fp.close()
|
||||
|
||||
|
||||
def get_module_version(modulename):
|
||||
def get_module_version(modulename: str) -> str:
|
||||
mod = importlib.import_module(modulename)
|
||||
return mod.__version__
|
||||
|
||||
|
||||
def setup_package_argparser(parser):
|
||||
def setup_package_argparser(parser: ArgumentParser):
|
||||
parser.add_argument(
|
||||
"--sign",
|
||||
dest="sign_identity",
|
||||
@@ -138,13 +125,13 @@ def setup_package_argparser(parser):
|
||||
|
||||
|
||||
# `args` come from an ArgumentParser updated with setup_package_argparser()
|
||||
def package_cocoa_app_in_dmg(app_path, destfolder, args):
|
||||
def package_cocoa_app_in_dmg(app_path: os.PathLike, destfolder: os.PathLike, args) -> None:
|
||||
# Rather than signing our app in XCode during the build phase, we sign it during the package
|
||||
# phase because running the app before packaging can modify it and we want to be sure to have
|
||||
# a valid signature.
|
||||
if args.sign_identity:
|
||||
sign_identity = "Developer ID Application: {}".format(args.sign_identity)
|
||||
result = print_and_do('codesign --force --deep --sign "{}" "{}"'.format(sign_identity, app_path))
|
||||
sign_identity = f"Developer ID Application: {args.sign_identity}"
|
||||
result = print_and_do(f'codesign --force --deep --sign "{sign_identity}" "{app_path}"')
|
||||
if result != 0:
|
||||
print("ERROR: Signing failed. Aborting packaging.")
|
||||
return
|
||||
@@ -154,29 +141,32 @@ def package_cocoa_app_in_dmg(app_path, destfolder, args):
|
||||
build_dmg(app_path, destfolder)
|
||||
|
||||
|
||||
def build_dmg(app_path, destfolder):
|
||||
def build_dmg(app_path: os.PathLike, destfolder: os.PathLike) -> None:
|
||||
"""Builds a DMG volume with application at ``app_path`` and puts it in ``dest_path``.
|
||||
|
||||
The name of the resulting DMG volume is determined by the app's name and version.
|
||||
"""
|
||||
print(repr(op.join(app_path, "Contents", "Info.plist")))
|
||||
plist = plistlib.readPlist(op.join(app_path, "Contents", "Info.plist"))
|
||||
with open(op.join(app_path, "Contents", "Info.plist"), "rb") as fp:
|
||||
plist = plistlib.load(fp)
|
||||
workpath = tempfile.mkdtemp()
|
||||
dmgpath = op.join(workpath, plist["CFBundleName"])
|
||||
os.mkdir(dmgpath)
|
||||
print_and_do('cp -R "%s" "%s"' % (app_path, dmgpath))
|
||||
print_and_do('cp -R "{}" "{}"'.format(app_path, dmgpath))
|
||||
print_and_do('ln -s /Applications "%s"' % op.join(dmgpath, "Applications"))
|
||||
dmgname = "%s_osx_%s.dmg" % (
|
||||
dmgname = "{}_osx_{}.dmg".format(
|
||||
plist["CFBundleName"].lower().replace(" ", "_"),
|
||||
plist["CFBundleVersion"].replace(".", "_"),
|
||||
)
|
||||
print("Building %s" % dmgname)
|
||||
# UDBZ = bzip compression. UDZO (zip compression) was used before, but it compresses much less.
|
||||
print_and_do('hdiutil create "%s" -format UDBZ -nocrossdev -srcdir "%s"' % (op.join(destfolder, dmgname), dmgpath))
|
||||
print_and_do(
|
||||
'hdiutil create "{}" -format UDBZ -nocrossdev -srcdir "{}"'.format(op.join(destfolder, dmgname), dmgpath)
|
||||
)
|
||||
print("Build Complete")
|
||||
|
||||
|
||||
def add_to_pythonpath(path):
|
||||
def add_to_pythonpath(path: os.PathLike) -> None:
|
||||
"""Adds ``path`` to both ``PYTHONPATH`` env and ``sys.path``."""
|
||||
abspath = op.abspath(path)
|
||||
pythonpath = os.environ.get("PYTHONPATH", "")
|
||||
@@ -189,7 +179,12 @@ def add_to_pythonpath(path):
|
||||
# This is a method to hack around those freakingly tricky data inclusion/exlusion rules
|
||||
# in setuptools. We copy the packages *without data* in a build folder and then build the plugin
|
||||
# from there.
|
||||
def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
|
||||
def copy_packages(
|
||||
packages_names: List[str],
|
||||
dest: os.PathLike,
|
||||
create_links: bool = False,
|
||||
extra_ignores: Union[List[str], None] = None,
|
||||
) -> None:
|
||||
"""Copy python packages ``packages_names`` to ``dest``, spurious data.
|
||||
|
||||
Copy will happen without tests, testdata, mercurial data or C extension module source with it.
|
||||
@@ -216,7 +211,7 @@ def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
|
||||
os.unlink(dest_path)
|
||||
else:
|
||||
shutil.rmtree(dest_path)
|
||||
print("Copying package at {0} to {1}".format(source_path, dest_path))
|
||||
print(f"Copying package at {source_path} to {dest_path}")
|
||||
if create_links:
|
||||
os.symlink(op.abspath(source_path), dest_path)
|
||||
else:
|
||||
@@ -227,13 +222,13 @@ def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
|
||||
|
||||
|
||||
def build_debian_changelog(
|
||||
changelogpath,
|
||||
destfile,
|
||||
pkgname,
|
||||
from_version=None,
|
||||
distribution="precise",
|
||||
fix_version=None,
|
||||
):
|
||||
changelogpath: os.PathLike,
|
||||
destfile: os.PathLike,
|
||||
pkgname: str,
|
||||
from_version: Union[str, None] = None,
|
||||
distribution: str = "precise",
|
||||
fix_version: Union[str, None] = None,
|
||||
) -> None:
|
||||
"""Builds a debian changelog out of a YAML changelog.
|
||||
|
||||
Use fix_version to patch the top changelog to that version (if, for example, there was a
|
||||
@@ -286,7 +281,7 @@ def build_debian_changelog(
|
||||
re_changelog_header = re.compile(r"=== ([\d.b]*) \(([\d\-]*)\)")
|
||||
|
||||
|
||||
def read_changelog_file(filename):
|
||||
def read_changelog_file(filename: os.PathLike) -> List[Dict[str, Any]]:
|
||||
def iter_by_three(it):
|
||||
while True:
|
||||
try:
|
||||
@@ -297,7 +292,7 @@ def read_changelog_file(filename):
|
||||
return
|
||||
yield version, date, description
|
||||
|
||||
with open(filename, "rt", encoding="utf-8") as fp:
|
||||
with open(filename, encoding="utf-8") as fp:
|
||||
contents = fp.read()
|
||||
splitted = re_changelog_header.split(contents)[1:] # the first item is empty
|
||||
result = []
|
||||
@@ -313,7 +308,7 @@ def read_changelog_file(filename):
|
||||
return result
|
||||
|
||||
|
||||
def fix_qt_resource_file(path):
|
||||
def fix_qt_resource_file(path: os.PathLike) -> None:
|
||||
# pyrcc5 under Windows, if the locale is non-english, can produce a source file with a date
|
||||
# containing accented characters. If it does, the encoding is wrong and it prevents the file
|
||||
# from being correctly frozen by cx_freeze. To work around that, we open the file, strip all
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Copyright 2016 Virgil Dupras
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import argparse
|
||||
|
||||
from setuptools import setup, Extension
|
||||
|
||||
|
||||
def get_parser():
|
||||
parser = argparse.ArgumentParser(description="Build an arbitrary Python extension.")
|
||||
parser.add_argument("source_files", nargs="+", help="List of source files to compile")
|
||||
parser.add_argument("name", nargs=1, help="Name of the resulting extension")
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
args = get_parser().parse_args()
|
||||
print("Building {}...".format(args.name[0]))
|
||||
ext = Extension(args.name[0], args.source_files)
|
||||
setup(
|
||||
script_args=["build_ext", "--inplace"],
|
||||
ext_modules=[ext],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -14,7 +14,9 @@ import re
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from errno import EISDIR, EACCES
|
||||
from pathlib import Path
|
||||
from typing import Callable, List
|
||||
|
||||
# This matches [123], but not [12] (3 digits being the minimum).
|
||||
# It also matches [1234] [12345] etc..
|
||||
@@ -22,7 +24,7 @@ from pathlib import Path
|
||||
re_conflict = re.compile(r"^\[\d{3}\d*\] ")
|
||||
|
||||
|
||||
def get_conflicted_name(other_names, name):
|
||||
def get_conflicted_name(other_names: List[str], name: str) -> str:
|
||||
"""Returns name with a ``[000]`` number in front of it.
|
||||
|
||||
The number between brackets depends on how many conlicted filenames
|
||||
@@ -39,7 +41,7 @@ def get_conflicted_name(other_names, name):
|
||||
i += 1
|
||||
|
||||
|
||||
def get_unconflicted_name(name):
|
||||
def get_unconflicted_name(name: str) -> str:
|
||||
"""Returns ``name`` without ``[]`` brackets.
|
||||
|
||||
Brackets which, of course, might have been added by func:`get_conflicted_name`.
|
||||
@@ -47,12 +49,12 @@ def get_unconflicted_name(name):
|
||||
return re_conflict.sub("", name, 1)
|
||||
|
||||
|
||||
def is_conflicted(name):
|
||||
def is_conflicted(name: str) -> bool:
|
||||
"""Returns whether ``name`` is prepended with a bracketed number."""
|
||||
return re_conflict.match(name) is not None
|
||||
|
||||
|
||||
def _smart_move_or_copy(operation, source_path: Path, dest_path: Path):
|
||||
def _smart_move_or_copy(operation: Callable, source_path: Path, dest_path: Path) -> None:
|
||||
"""Use move() or copy() to move and copy file with the conflict management."""
|
||||
if dest_path.is_dir() and not source_path.is_dir():
|
||||
dest_path = dest_path.joinpath(source_path.name)
|
||||
@@ -64,20 +66,18 @@ def _smart_move_or_copy(operation, source_path: Path, dest_path: Path):
|
||||
operation(str(source_path), str(dest_path))
|
||||
|
||||
|
||||
def smart_move(source_path, dest_path):
|
||||
def smart_move(source_path: Path, dest_path: Path) -> None:
|
||||
"""Same as :func:`smart_copy`, but it moves files instead."""
|
||||
_smart_move_or_copy(shutil.move, source_path, dest_path)
|
||||
|
||||
|
||||
def smart_copy(source_path, dest_path):
|
||||
def smart_copy(source_path: Path, dest_path: Path) -> None:
|
||||
"""Copies ``source_path`` to ``dest_path``, recursively and with conflict resolution."""
|
||||
try:
|
||||
_smart_move_or_copy(shutil.copy, source_path, dest_path)
|
||||
except IOError as e:
|
||||
if e.errno in {
|
||||
21,
|
||||
13,
|
||||
}: # it's a directory, code is 21 on OS X / Linux and 13 on Windows
|
||||
except OSError as e:
|
||||
# It's a directory, code is 21 on OS X / Linux (EISDIR) and 13 on Windows (EACCES)
|
||||
if e.errno in (EISDIR, EACCES):
|
||||
_smart_move_or_copy(shutil.copytree, source_path, dest_path)
|
||||
else:
|
||||
raise
|
||||
|
||||
@@ -6,31 +6,33 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from enum import Enum
|
||||
from os import PathLike
|
||||
import os.path as op
|
||||
import logging
|
||||
|
||||
|
||||
class SpecialFolder:
|
||||
class SpecialFolder(Enum):
|
||||
APPDATA = 1
|
||||
CACHE = 2
|
||||
|
||||
|
||||
def open_url(url):
|
||||
def open_url(url: str) -> None:
|
||||
"""Open ``url`` with the default browser."""
|
||||
_open_url(url)
|
||||
|
||||
|
||||
def open_path(path):
|
||||
def open_path(path: PathLike) -> None:
|
||||
"""Open ``path`` with its associated application."""
|
||||
_open_path(str(path))
|
||||
|
||||
|
||||
def reveal_path(path):
|
||||
def reveal_path(path: PathLike) -> None:
|
||||
"""Open the folder containing ``path`` with the default file browser."""
|
||||
_reveal_path(str(path))
|
||||
|
||||
|
||||
def special_folder_path(special_folder, appname=None, portable=False):
|
||||
def special_folder_path(special_folder: SpecialFolder, portable: bool = False) -> str:
|
||||
"""Returns the path of ``special_folder``.
|
||||
|
||||
``special_folder`` is a SpecialFolder.* const. The result is the special folder for the current
|
||||
@@ -38,25 +40,25 @@ def special_folder_path(special_folder, appname=None, portable=False):
|
||||
|
||||
You can override the application name with ``appname``. This argument is ingored under Qt.
|
||||
"""
|
||||
return _special_folder_path(special_folder, appname, portable=portable)
|
||||
return _special_folder_path(special_folder, portable=portable)
|
||||
|
||||
|
||||
try:
|
||||
from PyQt5.QtCore import QUrl, QStandardPaths
|
||||
from PyQt5.QtGui import QDesktopServices
|
||||
from qtlib.util import get_appdata
|
||||
from qt.util import get_appdata
|
||||
from core.util import executable_folder
|
||||
from hscommon.plat import ISWINDOWS, ISOSX
|
||||
import subprocess
|
||||
|
||||
def _open_url(url):
|
||||
def _open_url(url: str) -> None:
|
||||
QDesktopServices.openUrl(QUrl(url))
|
||||
|
||||
def _open_path(path):
|
||||
def _open_path(path: str) -> None:
|
||||
url = QUrl.fromLocalFile(str(path))
|
||||
QDesktopServices.openUrl(url)
|
||||
|
||||
def _reveal_path(path):
|
||||
def _reveal_path(path: str) -> None:
|
||||
if ISWINDOWS:
|
||||
subprocess.run(["explorer", "/select,", op.abspath(path)])
|
||||
elif ISOSX:
|
||||
@@ -64,7 +66,7 @@ try:
|
||||
else:
|
||||
_open_path(op.dirname(str(path)))
|
||||
|
||||
def _special_folder_path(special_folder, appname=None, portable=False):
|
||||
def _special_folder_path(special_folder: SpecialFolder, portable: bool = False) -> str:
|
||||
if special_folder == SpecialFolder.CACHE:
|
||||
if ISWINDOWS and portable:
|
||||
folder = op.join(executable_folder(), "cache")
|
||||
@@ -79,13 +81,17 @@ except ImportError:
|
||||
# weird situation. Let's just have dummy fallbacks.
|
||||
logging.warning("Can't setup desktop functions!")
|
||||
|
||||
def _open_path(path):
|
||||
def _open_url(url: str) -> None:
|
||||
# Dummy for tests
|
||||
pass
|
||||
|
||||
def _reveal_path(path):
|
||||
def _open_path(path: str) -> None:
|
||||
# Dummy for tests
|
||||
pass
|
||||
|
||||
def _special_folder_path(special_folder, appname=None, portable=False):
|
||||
def _reveal_path(path: str) -> None:
|
||||
# Dummy for tests
|
||||
pass
|
||||
|
||||
def _special_folder_path(special_folder: SpecialFolder, portable: bool = False) -> str:
|
||||
return "/tmp"
|
||||
|
||||
@@ -36,11 +36,11 @@ class GUIObject:
|
||||
``multibind`` flag to ``True`` and the safeguard will be disabled.
|
||||
"""
|
||||
|
||||
def __init__(self, multibind=False):
|
||||
def __init__(self, multibind: bool = False) -> None:
|
||||
self._view = None
|
||||
self._multibind = multibind
|
||||
|
||||
def _view_updated(self):
|
||||
def _view_updated(self) -> None:
|
||||
"""(Virtual) Called after :attr:`view` has been set.
|
||||
|
||||
Doing nothing by default, this method is called after :attr:`view` has been set (it isn't
|
||||
@@ -48,7 +48,7 @@ class GUIObject:
|
||||
(which is often the whole of the initialization code).
|
||||
"""
|
||||
|
||||
def has_view(self):
|
||||
def has_view(self) -> bool:
|
||||
return (self._view is not None) and (not isinstance(self._view, NoopGUI))
|
||||
|
||||
@property
|
||||
@@ -67,7 +67,7 @@ class GUIObject:
|
||||
return self._view
|
||||
|
||||
@view.setter
|
||||
def view(self, value):
|
||||
def view(self, value) -> None:
|
||||
if self._view is None and value is None:
|
||||
# Initial view assignment
|
||||
return
|
||||
|
||||
@@ -7,8 +7,10 @@
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import copy
|
||||
from typing import Any, List, Tuple, Union
|
||||
|
||||
from .base import GUIObject
|
||||
from hscommon.gui.base import GUIObject
|
||||
from hscommon.gui.table import GUITable
|
||||
|
||||
|
||||
class Column:
|
||||
@@ -17,7 +19,7 @@ class Column:
|
||||
These attributes are then used to correctly configure the column on the "view" side.
|
||||
"""
|
||||
|
||||
def __init__(self, name, display="", visible=True, optional=False):
|
||||
def __init__(self, name: str, display: str = "", visible: bool = True, optional: bool = False) -> None:
|
||||
#: "programmatical" (not for display) name. Used as a reference in a couple of place, such
|
||||
#: as :meth:`Columns.column_by_name`.
|
||||
self.name = name
|
||||
@@ -52,14 +54,14 @@ class ColumnsView:
|
||||
callbacks.
|
||||
"""
|
||||
|
||||
def restore_columns(self):
|
||||
def restore_columns(self) -> None:
|
||||
"""Update all columns according to the model.
|
||||
|
||||
When this is called, our view has to update the columns title, order and visibility of all
|
||||
columns.
|
||||
"""
|
||||
|
||||
def set_column_visible(self, colname, visible):
|
||||
def set_column_visible(self, colname: str, visible: bool) -> None:
|
||||
"""Update visibility of column ``colname``.
|
||||
|
||||
Called when the user toggles the visibility of a column, we must update the column
|
||||
@@ -73,13 +75,13 @@ class PrefAccessInterface:
|
||||
*Not actually used in the code. For documentation purposes only.*
|
||||
"""
|
||||
|
||||
def get_default(self, key, fallback_value):
|
||||
def get_default(self, key: str, fallback_value: Union[Any, None]) -> Any:
|
||||
"""Retrieve the value for ``key`` in the currently running app's preference store.
|
||||
|
||||
If the key doesn't exist, return ``fallback_value``.
|
||||
"""
|
||||
|
||||
def set_default(self, key, value):
|
||||
def set_default(self, key: str, value: Any) -> None:
|
||||
"""Set the value ``value`` for ``key`` in the currently running app's preference store."""
|
||||
|
||||
|
||||
@@ -104,65 +106,65 @@ class Columns(GUIObject):
|
||||
have that same prefix.
|
||||
"""
|
||||
|
||||
def __init__(self, table, prefaccess=None, savename=None):
|
||||
def __init__(self, table: GUITable, prefaccess=None, savename: Union[str, None] = None):
|
||||
GUIObject.__init__(self)
|
||||
self.table = table
|
||||
self.prefaccess = prefaccess
|
||||
self.savename = savename
|
||||
# We use copy here for test isolation. If we don't, changing a column affects all tests.
|
||||
self.column_list = list(map(copy.copy, table.COLUMNS))
|
||||
self.column_list: List[Column] = list(map(copy.copy, table.COLUMNS))
|
||||
for i, column in enumerate(self.column_list):
|
||||
column.logical_index = i
|
||||
column.ordered_index = i
|
||||
self.coldata = {col.name: col for col in self.column_list}
|
||||
|
||||
# --- Private
|
||||
def _get_colname_attr(self, colname, attrname, default):
|
||||
def _get_colname_attr(self, colname: str, attrname: str, default: Any) -> Any:
|
||||
try:
|
||||
return getattr(self.coldata[colname], attrname)
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def _set_colname_attr(self, colname, attrname, value):
|
||||
def _set_colname_attr(self, colname: str, attrname: str, value: Any) -> None:
|
||||
try:
|
||||
col = self.coldata[colname]
|
||||
setattr(col, attrname, value)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def _optional_columns(self):
|
||||
def _optional_columns(self) -> List[Column]:
|
||||
return [c for c in self.column_list if c.optional]
|
||||
|
||||
# --- Override
|
||||
def _view_updated(self):
|
||||
def _view_updated(self) -> None:
|
||||
self.restore_columns()
|
||||
|
||||
# --- Public
|
||||
def column_by_index(self, index):
|
||||
def column_by_index(self, index: int):
|
||||
"""Return the :class:`Column` having the :attr:`~Column.logical_index` ``index``."""
|
||||
return self.column_list[index]
|
||||
|
||||
def column_by_name(self, name):
|
||||
def column_by_name(self, name: str):
|
||||
"""Return the :class:`Column` having the :attr:`~Column.name` ``name``."""
|
||||
return self.coldata[name]
|
||||
|
||||
def columns_count(self):
|
||||
def columns_count(self) -> int:
|
||||
"""Returns the number of columns in our set."""
|
||||
return len(self.column_list)
|
||||
|
||||
def column_display(self, colname):
|
||||
def column_display(self, colname: str) -> str:
|
||||
"""Returns display name for column named ``colname``, or ``''`` if there's none."""
|
||||
return self._get_colname_attr(colname, "display", "")
|
||||
|
||||
def column_is_visible(self, colname):
|
||||
def column_is_visible(self, colname: str) -> bool:
|
||||
"""Returns visibility for column named ``colname``, or ``True`` if there's none."""
|
||||
return self._get_colname_attr(colname, "visible", True)
|
||||
|
||||
def column_width(self, colname):
|
||||
def column_width(self, colname: str) -> int:
|
||||
"""Returns width for column named ``colname``, or ``0`` if there's none."""
|
||||
return self._get_colname_attr(colname, "width", 0)
|
||||
|
||||
def columns_to_right(self, colname):
|
||||
def columns_to_right(self, colname: str) -> List[str]:
|
||||
"""Returns the list of all columns to the right of ``colname``.
|
||||
|
||||
"right" meaning "having a higher :attr:`Column.ordered_index`" in our left-to-right
|
||||
@@ -172,7 +174,7 @@ class Columns(GUIObject):
|
||||
index = column.ordered_index
|
||||
return [col.name for col in self.column_list if (col.visible and col.ordered_index > index)]
|
||||
|
||||
def menu_items(self):
|
||||
def menu_items(self) -> List[Tuple[str, bool]]:
|
||||
"""Returns a list of items convenient for quick visibility menu generation.
|
||||
|
||||
Returns a list of ``(display_name, is_marked)`` items for each optional column in the
|
||||
@@ -184,7 +186,7 @@ class Columns(GUIObject):
|
||||
"""
|
||||
return [(c.display, c.visible) for c in self._optional_columns()]
|
||||
|
||||
def move_column(self, colname, index):
|
||||
def move_column(self, colname: str, index: int) -> None:
|
||||
"""Moves column ``colname`` to ``index``.
|
||||
|
||||
The column will be placed just in front of the column currently having that index, or to the
|
||||
@@ -195,7 +197,7 @@ class Columns(GUIObject):
|
||||
colnames.insert(index, colname)
|
||||
self.set_column_order(colnames)
|
||||
|
||||
def reset_to_defaults(self):
|
||||
def reset_to_defaults(self) -> None:
|
||||
"""Reset all columns' width and visibility to their default values."""
|
||||
self.set_column_order([col.name for col in self.column_list])
|
||||
for col in self._optional_columns():
|
||||
@@ -203,11 +205,11 @@ class Columns(GUIObject):
|
||||
col.width = col.default_width
|
||||
self.view.restore_columns()
|
||||
|
||||
def resize_column(self, colname, newwidth):
|
||||
def resize_column(self, colname: str, newwidth: int) -> None:
|
||||
"""Set column ``colname``'s width to ``newwidth``."""
|
||||
self._set_colname_attr(colname, "width", newwidth)
|
||||
|
||||
def restore_columns(self):
|
||||
def restore_columns(self) -> None:
|
||||
"""Restore's column persistent attributes from the last :meth:`save_columns`."""
|
||||
if not (self.prefaccess and self.savename and self.coldata):
|
||||
if (not self.savename) and (self.coldata):
|
||||
@@ -216,7 +218,7 @@ class Columns(GUIObject):
|
||||
self.view.restore_columns()
|
||||
return
|
||||
for col in self.column_list:
|
||||
pref_name = "{}.Columns.{}".format(self.savename, col.name)
|
||||
pref_name = f"{self.savename}.Columns.{col.name}"
|
||||
coldata = self.prefaccess.get_default(pref_name, fallback_value={})
|
||||
if "index" in coldata:
|
||||
col.ordered_index = coldata["index"]
|
||||
@@ -226,18 +228,19 @@ class Columns(GUIObject):
|
||||
col.visible = coldata["visible"]
|
||||
self.view.restore_columns()
|
||||
|
||||
def save_columns(self):
|
||||
def save_columns(self) -> None:
|
||||
"""Save column attributes in persistent storage for restoration in :meth:`restore_columns`."""
|
||||
if not (self.prefaccess and self.savename and self.coldata):
|
||||
return
|
||||
for col in self.column_list:
|
||||
pref_name = "{}.Columns.{}".format(self.savename, col.name)
|
||||
pref_name = f"{self.savename}.Columns.{col.name}"
|
||||
coldata = {"index": col.ordered_index, "width": col.width}
|
||||
if col.optional:
|
||||
coldata["visible"] = col.visible
|
||||
self.prefaccess.set_default(pref_name, coldata)
|
||||
|
||||
def set_column_order(self, colnames):
|
||||
# TODO annotate colnames
|
||||
def set_column_order(self, colnames) -> None:
|
||||
"""Change the columns order so it matches the order in ``colnames``.
|
||||
|
||||
:param colnames: A list of column names in the desired order.
|
||||
@@ -247,17 +250,17 @@ class Columns(GUIObject):
|
||||
col = self.coldata[colname]
|
||||
col.ordered_index = i
|
||||
|
||||
def set_column_visible(self, colname, visible):
|
||||
def set_column_visible(self, colname: str, visible: bool) -> None:
|
||||
"""Set the visibility of column ``colname``."""
|
||||
self.table.save_edits() # the table on the GUI side will stop editing when the columns change
|
||||
self._set_colname_attr(colname, "visible", visible)
|
||||
self.view.set_column_visible(colname, visible)
|
||||
|
||||
def set_default_width(self, colname, width):
|
||||
def set_default_width(self, colname: str, width: int) -> None:
|
||||
"""Set the default width or column ``colname``."""
|
||||
self._set_colname_attr(colname, "default_width", width)
|
||||
|
||||
def toggle_menu_item(self, index):
|
||||
def toggle_menu_item(self, index: int) -> bool:
|
||||
"""Toggles the visibility of an optional column.
|
||||
|
||||
You know, that optional column menu you've generated in :meth:`menu_items`? Well, ``index``
|
||||
@@ -271,11 +274,11 @@ class Columns(GUIObject):
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def ordered_columns(self):
|
||||
def ordered_columns(self) -> List[Column]:
|
||||
"""List of :class:`Column` in visible order."""
|
||||
return [col for col in sorted(self.column_list, key=lambda col: col.ordered_index)]
|
||||
|
||||
@property
|
||||
def colnames(self):
|
||||
def colnames(self) -> List[str]:
|
||||
"""List of column names in visible order."""
|
||||
return [col.name for col in self.ordered_columns]
|
||||
|
||||
@@ -4,9 +4,10 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..jobprogress.performer import ThreadedJobPerformer
|
||||
from .base import GUIObject
|
||||
from .text_field import TextField
|
||||
from typing import Callable, Tuple, Union
|
||||
from hscommon.jobprogress.performer import ThreadedJobPerformer
|
||||
from hscommon.gui.base import GUIObject
|
||||
from hscommon.gui.text_field import TextField
|
||||
|
||||
|
||||
class ProgressWindowView:
|
||||
@@ -20,13 +21,13 @@ class ProgressWindowView:
|
||||
It's also expected to call :meth:`ProgressWindow.cancel` when the cancel button is clicked.
|
||||
"""
|
||||
|
||||
def show(self):
|
||||
def show(self) -> None:
|
||||
"""Show the dialog."""
|
||||
|
||||
def close(self):
|
||||
def close(self) -> None:
|
||||
"""Close the dialog."""
|
||||
|
||||
def set_progress(self, progress):
|
||||
def set_progress(self, progress: int) -> None:
|
||||
"""Set the progress of the progress bar to ``progress``.
|
||||
|
||||
Not all jobs are equally responsive on their job progress report and it is recommended that
|
||||
@@ -60,7 +61,11 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
called as if the job terminated normally.
|
||||
"""
|
||||
|
||||
def __init__(self, finish_func, error_func=None):
|
||||
def __init__(
|
||||
self,
|
||||
finish_func: Callable[[Union[str, None]], None],
|
||||
error_func: Callable[[Union[str, None], Exception], bool] = None,
|
||||
) -> None:
|
||||
# finish_func(jobid) is the function that is called when a job is completed.
|
||||
GUIObject.__init__(self)
|
||||
ThreadedJobPerformer.__init__(self)
|
||||
@@ -71,9 +76,9 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
#: :class:`.TextField`. It contains the job textual update that the function might yield
|
||||
#: during its course.
|
||||
self.progressdesc_textfield = TextField()
|
||||
self.jobid = None
|
||||
self.jobid: Union[str, None] = None
|
||||
|
||||
def cancel(self):
|
||||
def cancel(self) -> None:
|
||||
"""Call for a user-initiated job cancellation."""
|
||||
# The UI is sometimes a bit buggy and calls cancel() on self.view.close(). We just want to
|
||||
# make sure that this doesn't lead us to think that the user acually cancelled the task, so
|
||||
@@ -81,7 +86,7 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
if self._job_running:
|
||||
self.job_cancelled = True
|
||||
|
||||
def pulse(self):
|
||||
def pulse(self) -> None:
|
||||
"""Update progress reports in the GUI.
|
||||
|
||||
Call this regularly from the GUI main run loop. The values might change before
|
||||
@@ -111,7 +116,7 @@ class ProgressWindow(GUIObject, ThreadedJobPerformer):
|
||||
self.progressdesc_textfield.text = last_desc
|
||||
self.view.set_progress(last_progress)
|
||||
|
||||
def run(self, jobid, title, target, args=()):
|
||||
def run(self, jobid: str, title: str, target: Callable, args: Tuple = ()):
|
||||
"""Starts a threaded job.
|
||||
|
||||
The ``target`` function will be sent, as its first argument, a :class:`.Job` instance which
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
from collections.abc import Sequence, MutableSequence
|
||||
|
||||
from .base import GUIObject
|
||||
from hscommon.gui.base import GUIObject
|
||||
|
||||
|
||||
class Selectable(Sequence):
|
||||
|
||||
@@ -8,9 +8,10 @@
|
||||
|
||||
from collections.abc import MutableSequence
|
||||
from collections import namedtuple
|
||||
from typing import Any, List, Tuple, Union
|
||||
|
||||
from .base import GUIObject
|
||||
from .selectable_list import Selectable
|
||||
from hscommon.gui.base import GUIObject
|
||||
from hscommon.gui.selectable_list import Selectable
|
||||
|
||||
|
||||
# We used to directly subclass list, but it caused problems at some point with deepcopy
|
||||
@@ -27,12 +28,16 @@ class Table(MutableSequence, Selectable):
|
||||
Subclasses :class:`.Selectable`.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
Selectable.__init__(self)
|
||||
self._rows = []
|
||||
self._header = None
|
||||
self._footer = None
|
||||
# Should be List[Column], but have circular import...
|
||||
COLUMNS: List = []
|
||||
|
||||
def __init__(self) -> None:
|
||||
Selectable.__init__(self)
|
||||
self._rows: List["Row"] = []
|
||||
self._header: Union["Row", None] = None
|
||||
self._footer: Union["Row", None] = None
|
||||
|
||||
# TODO type hint for key
|
||||
def __delitem__(self, key):
|
||||
self._rows.__delitem__(key)
|
||||
if self._header is not None and ((not self) or (self[0] is not self._header)):
|
||||
@@ -41,16 +46,18 @@ class Table(MutableSequence, Selectable):
|
||||
self._footer = None
|
||||
self._check_selection_range()
|
||||
|
||||
def __getitem__(self, key):
|
||||
# TODO type hint for key
|
||||
def __getitem__(self, key) -> Any:
|
||||
return self._rows.__getitem__(key)
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
return len(self._rows)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
# TODO type hint for key
|
||||
def __setitem__(self, key, value: Any) -> None:
|
||||
self._rows.__setitem__(key, value)
|
||||
|
||||
def append(self, item):
|
||||
def append(self, item: "Row") -> None:
|
||||
"""Appends ``item`` at the end of the table.
|
||||
|
||||
If there's a footer, the item is inserted before it.
|
||||
@@ -60,7 +67,7 @@ class Table(MutableSequence, Selectable):
|
||||
else:
|
||||
self._rows.append(item)
|
||||
|
||||
def insert(self, index, item):
|
||||
def insert(self, index: int, item: "Row") -> None:
|
||||
"""Inserts ``item`` at ``index`` in the table.
|
||||
|
||||
If there's a header, will make sure we don't insert before it, and if there's a footer, will
|
||||
@@ -72,7 +79,7 @@ class Table(MutableSequence, Selectable):
|
||||
index = len(self) - 1
|
||||
self._rows.insert(index, item)
|
||||
|
||||
def remove(self, row):
|
||||
def remove(self, row: "Row") -> None:
|
||||
"""Removes ``row`` from table.
|
||||
|
||||
If ``row`` is a header or footer, that header or footer will be set to ``None``.
|
||||
@@ -84,7 +91,7 @@ class Table(MutableSequence, Selectable):
|
||||
self._rows.remove(row)
|
||||
self._check_selection_range()
|
||||
|
||||
def sort_by(self, column_name, desc=False):
|
||||
def sort_by(self, column_name: str, desc: bool = False) -> None:
|
||||
"""Sort table by ``column_name``.
|
||||
|
||||
Sort key for each row is computed from :meth:`Row.sort_key_for_column`.
|
||||
@@ -105,7 +112,7 @@ class Table(MutableSequence, Selectable):
|
||||
|
||||
# --- Properties
|
||||
@property
|
||||
def footer(self):
|
||||
def footer(self) -> Union["Row", None]:
|
||||
"""If set, a row that always stay at the bottom of the table.
|
||||
|
||||
:class:`Row`. *get/set*.
|
||||
@@ -128,7 +135,7 @@ class Table(MutableSequence, Selectable):
|
||||
return self._footer
|
||||
|
||||
@footer.setter
|
||||
def footer(self, value):
|
||||
def footer(self, value: Union["Row", None]) -> None:
|
||||
if self._footer is not None:
|
||||
self._rows.pop()
|
||||
if value is not None:
|
||||
@@ -136,7 +143,7 @@ class Table(MutableSequence, Selectable):
|
||||
self._footer = value
|
||||
|
||||
@property
|
||||
def header(self):
|
||||
def header(self) -> Union["Row", None]:
|
||||
"""If set, a row that always stay at the bottom of the table.
|
||||
|
||||
See :attr:`footer` for details.
|
||||
@@ -144,7 +151,7 @@ class Table(MutableSequence, Selectable):
|
||||
return self._header
|
||||
|
||||
@header.setter
|
||||
def header(self, value):
|
||||
def header(self, value: Union["Row", None]) -> None:
|
||||
if self._header is not None:
|
||||
self._rows.pop(0)
|
||||
if value is not None:
|
||||
@@ -152,7 +159,7 @@ class Table(MutableSequence, Selectable):
|
||||
self._header = value
|
||||
|
||||
@property
|
||||
def row_count(self):
|
||||
def row_count(self) -> int:
|
||||
"""Number or rows in the table (without counting header and footer).
|
||||
|
||||
*int*. *read-only*.
|
||||
@@ -165,7 +172,7 @@ class Table(MutableSequence, Selectable):
|
||||
return result
|
||||
|
||||
@property
|
||||
def rows(self):
|
||||
def rows(self) -> List["Row"]:
|
||||
"""List of rows in the table, excluding header and footer.
|
||||
|
||||
List of :class:`Row`. *read-only*.
|
||||
@@ -179,7 +186,7 @@ class Table(MutableSequence, Selectable):
|
||||
return self[start:end]
|
||||
|
||||
@property
|
||||
def selected_row(self):
|
||||
def selected_row(self) -> "Row":
|
||||
"""Selected row according to :attr:`Selectable.selected_index`.
|
||||
|
||||
:class:`Row`. *get/set*.
|
||||
@@ -190,14 +197,14 @@ class Table(MutableSequence, Selectable):
|
||||
return self[self.selected_index] if self.selected_index is not None else None
|
||||
|
||||
@selected_row.setter
|
||||
def selected_row(self, value):
|
||||
def selected_row(self, value: int) -> None:
|
||||
try:
|
||||
self.selected_index = self.index(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
@property
|
||||
def selected_rows(self):
|
||||
def selected_rows(self) -> List["Row"]:
|
||||
"""List of selected rows based on :attr:`.selected_indexes`.
|
||||
|
||||
List of :class:`Row`. *read-only*.
|
||||
@@ -219,20 +226,20 @@ class GUITableView:
|
||||
Whenever the user changes the selection, we expect the view to call :meth:`Table.select`.
|
||||
"""
|
||||
|
||||
def refresh(self):
|
||||
def refresh(self) -> None:
|
||||
"""Refreshes the contents of the table widget.
|
||||
|
||||
Ensures that the contents of the table widget is synced with the model. This includes
|
||||
selection.
|
||||
"""
|
||||
|
||||
def start_editing(self):
|
||||
def start_editing(self) -> None:
|
||||
"""Start editing the currently selected row.
|
||||
|
||||
Begin whatever inline editing support that the view supports.
|
||||
"""
|
||||
|
||||
def stop_editing(self):
|
||||
def stop_editing(self) -> None:
|
||||
"""Stop editing if there's an inline editing in effect.
|
||||
|
||||
There's no "aborting" implied in this call, so it's appropriate to send whatever the user
|
||||
@@ -260,33 +267,33 @@ class GUITable(Table, GUIObject):
|
||||
:class:`GUITableView`.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
GUIObject.__init__(self)
|
||||
Table.__init__(self)
|
||||
#: The row being currently edited by the user. ``None`` if no edit is taking place.
|
||||
self.edited = None
|
||||
self._sort_descriptor = None
|
||||
self.edited: Union["Row", None] = None
|
||||
self._sort_descriptor: Union[SortDescriptor, None] = None
|
||||
|
||||
# --- Virtual
|
||||
def _do_add(self):
|
||||
def _do_add(self) -> Tuple["Row", int]:
|
||||
"""(Virtual) Creates a new row, adds it in the table.
|
||||
|
||||
Returns ``(row, insert_index)``.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _do_delete(self):
|
||||
def _do_delete(self) -> None:
|
||||
"""(Virtual) Delete the selected rows."""
|
||||
pass
|
||||
|
||||
def _fill(self):
|
||||
def _fill(self) -> None:
|
||||
"""(Virtual/Required) Fills the table with all the rows that this table is supposed to have.
|
||||
|
||||
Called by :meth:`refresh`. Does nothing by default.
|
||||
"""
|
||||
pass
|
||||
|
||||
def _is_edited_new(self):
|
||||
def _is_edited_new(self) -> bool:
|
||||
"""(Virtual) Returns whether the currently edited row should be considered "new".
|
||||
|
||||
This is used in :meth:`cancel_edits` to know whether the cancellation of the edit means a
|
||||
@@ -315,7 +322,7 @@ class GUITable(Table, GUIObject):
|
||||
self.select([len(self) - 1])
|
||||
|
||||
# --- Public
|
||||
def add(self):
|
||||
def add(self) -> None:
|
||||
"""Add a new row in edit mode.
|
||||
|
||||
Requires :meth:`do_add` to be implemented. The newly added row will be selected and in edit
|
||||
@@ -334,7 +341,7 @@ class GUITable(Table, GUIObject):
|
||||
self.edited = row
|
||||
self.view.start_editing()
|
||||
|
||||
def can_edit_cell(self, column_name, row_index):
|
||||
def can_edit_cell(self, column_name: str, row_index: int) -> bool:
|
||||
"""Returns whether the cell at ``row_index`` and ``column_name`` can be edited.
|
||||
|
||||
A row is, by default, editable as soon as it has an attr with the same name as `column`.
|
||||
@@ -346,7 +353,7 @@ class GUITable(Table, GUIObject):
|
||||
row = self[row_index]
|
||||
return row.can_edit_cell(column_name)
|
||||
|
||||
def cancel_edits(self):
|
||||
def cancel_edits(self) -> None:
|
||||
"""Cancels the current edit operation.
|
||||
|
||||
If there's an :attr:`edited` row, it will be re-initialized (with :meth:`Row.load`).
|
||||
@@ -364,7 +371,7 @@ class GUITable(Table, GUIObject):
|
||||
self.edited = None
|
||||
self.view.refresh()
|
||||
|
||||
def delete(self):
|
||||
def delete(self) -> None:
|
||||
"""Delete the currently selected rows.
|
||||
|
||||
Requires :meth:`_do_delete` for this to have any effect on the model. Cancels editing if
|
||||
@@ -377,7 +384,7 @@ class GUITable(Table, GUIObject):
|
||||
if self:
|
||||
self._do_delete()
|
||||
|
||||
def refresh(self, refresh_view=True):
|
||||
def refresh(self, refresh_view: bool = True) -> None:
|
||||
"""Empty the table and re-create its rows.
|
||||
|
||||
:meth:`_fill` is called after we emptied the table to create our rows. Previous sort order
|
||||
@@ -399,7 +406,7 @@ class GUITable(Table, GUIObject):
|
||||
if refresh_view:
|
||||
self.view.refresh()
|
||||
|
||||
def save_edits(self):
|
||||
def save_edits(self) -> None:
|
||||
"""Commit user edits to the model.
|
||||
|
||||
This is done by calling :meth:`Row.save`.
|
||||
@@ -410,7 +417,7 @@ class GUITable(Table, GUIObject):
|
||||
self.edited = None
|
||||
row.save()
|
||||
|
||||
def sort_by(self, column_name, desc=False):
|
||||
def sort_by(self, column_name: str, desc: bool = False) -> None:
|
||||
"""Sort table by ``column_name``.
|
||||
|
||||
Overrides :meth:`Table.sort_by`. After having performed sorting, calls
|
||||
@@ -450,18 +457,18 @@ class Row:
|
||||
Of course, this is only default behavior. This can be overriden.
|
||||
"""
|
||||
|
||||
def __init__(self, table):
|
||||
super(Row, self).__init__()
|
||||
def __init__(self, table: GUITable) -> None:
|
||||
super().__init__()
|
||||
self.table = table
|
||||
|
||||
def _edit(self):
|
||||
def _edit(self) -> None:
|
||||
if self.table.edited is self:
|
||||
return
|
||||
assert self.table.edited is None
|
||||
self.table.edited = self
|
||||
|
||||
# --- Virtual
|
||||
def can_edit(self):
|
||||
def can_edit(self) -> bool:
|
||||
"""(Virtual) Whether the whole row can be edited.
|
||||
|
||||
By default, always returns ``True``. This is for the *whole* row. For individual cells, it's
|
||||
@@ -469,7 +476,7 @@ class Row:
|
||||
"""
|
||||
return True
|
||||
|
||||
def load(self):
|
||||
def load(self) -> None:
|
||||
"""(Virtual/Required) Loads up values from the model to be presented in the table.
|
||||
|
||||
Usually, our model instances contain values that are not quite ready for display. If you
|
||||
@@ -478,7 +485,7 @@ class Row:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def save(self):
|
||||
def save(self) -> None:
|
||||
"""(Virtual/Required) Saves user edits into your model.
|
||||
|
||||
If your table is editable, this is called when the user commits his changes. Usually, these
|
||||
@@ -487,7 +494,7 @@ class Row:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def sort_key_for_column(self, column_name):
|
||||
def sort_key_for_column(self, column_name: str) -> Any:
|
||||
"""(Virtual) Return the value that is to be used to sort by column ``column_name``.
|
||||
|
||||
By default, looks for an attribute with the same name as ``column_name``, but with an
|
||||
@@ -500,7 +507,7 @@ class Row:
|
||||
return getattr(self, column_name)
|
||||
|
||||
# --- Public
|
||||
def can_edit_cell(self, column_name):
|
||||
def can_edit_cell(self, column_name: str) -> bool:
|
||||
"""Returns whether cell for column ``column_name`` can be edited.
|
||||
|
||||
By the default, the check is done in many steps:
|
||||
@@ -530,7 +537,7 @@ class Row:
|
||||
return False
|
||||
return bool(getattr(prop, "fset", None))
|
||||
|
||||
def get_cell_value(self, attrname):
|
||||
def get_cell_value(self, attrname: str) -> Any:
|
||||
"""Get cell value for ``attrname``.
|
||||
|
||||
By default, does a simple ``getattr()``, but it is used to allow subclasses to have
|
||||
@@ -540,7 +547,7 @@ class Row:
|
||||
attrname = "from_"
|
||||
return getattr(self, attrname)
|
||||
|
||||
def set_cell_value(self, attrname, value):
|
||||
def set_cell_value(self, attrname: str, value: Any) -> None:
|
||||
"""Set cell value to ``value`` for ``attrname``.
|
||||
|
||||
By default, does a simple ``setattr()``, but it is used to allow subclasses to have
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from .base import GUIObject
|
||||
from ..util import nonone
|
||||
from hscommon.gui.base import GUIObject
|
||||
from hscommon.util import nonone
|
||||
|
||||
|
||||
class TextFieldView:
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
from collections.abc import MutableSequence
|
||||
|
||||
from .base import GUIObject
|
||||
from hscommon.gui.base import GUIObject
|
||||
|
||||
|
||||
class Node(MutableSequence):
|
||||
@@ -77,8 +77,7 @@ class Node(MutableSequence):
|
||||
if include_self and predicate(self):
|
||||
yield self
|
||||
for child in self:
|
||||
for found in child.findall(predicate, include_self=True):
|
||||
yield found
|
||||
yield from child.findall(predicate, include_self=True)
|
||||
|
||||
def get_node(self, index_path):
|
||||
"""Returns the node at ``index_path``.
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
|
||||
from typing import Any, Callable, Generator, List, Union
|
||||
|
||||
|
||||
class JobCancelled(Exception):
|
||||
"The user has cancelled the job"
|
||||
|
||||
@@ -36,7 +39,7 @@ class Job:
|
||||
"""
|
||||
|
||||
# ---Magic functions
|
||||
def __init__(self, job_proportions, callback):
|
||||
def __init__(self, job_proportions: Union[List[int], int], callback: Callable) -> None:
|
||||
"""Initialize the Job with 'jobcount' jobs. Start every job with
|
||||
start_job(). Every time the job progress is updated, 'callback' is called
|
||||
'callback' takes a 'progress' int param, and a optional 'desc'
|
||||
@@ -55,12 +58,12 @@ class Job:
|
||||
self._currmax = 1
|
||||
|
||||
# ---Private
|
||||
def _subjob_callback(self, progress, desc=""):
|
||||
def _subjob_callback(self, progress: int, desc: str = "") -> bool:
|
||||
"""This is the callback passed to children jobs."""
|
||||
self.set_progress(progress, desc)
|
||||
return True # if JobCancelled has to be raised, it will be at the highest level
|
||||
|
||||
def _do_update(self, desc):
|
||||
def _do_update(self, desc: str) -> None:
|
||||
"""Calls the callback function with a % progress as a parameter.
|
||||
|
||||
The parameter is a int in the 0-100 range.
|
||||
@@ -78,13 +81,16 @@ class Job:
|
||||
raise JobCancelled()
|
||||
|
||||
# ---Public
|
||||
def add_progress(self, progress=1, desc=""):
|
||||
def add_progress(self, progress: int = 1, desc: str = "") -> None:
|
||||
self.set_progress(self._progress + progress, desc)
|
||||
|
||||
def check_if_cancelled(self):
|
||||
def check_if_cancelled(self) -> None:
|
||||
self._do_update("")
|
||||
|
||||
def iter_with_progress(self, iterable, desc_format=None, every=1, count=None):
|
||||
# TODO type hint iterable
|
||||
def iter_with_progress(
|
||||
self, iterable, desc_format: Union[str, None] = None, every: int = 1, count: Union[int, None] = None
|
||||
) -> Generator[Any, None, None]:
|
||||
"""Iterate through ``iterable`` while automatically adding progress.
|
||||
|
||||
WARNING: We need our iterable's length. If ``iterable`` is not a sequence (that is,
|
||||
@@ -107,7 +113,7 @@ class Job:
|
||||
desc = desc_format % (count, count)
|
||||
self.set_progress(100, desc)
|
||||
|
||||
def start_job(self, max_progress=100, desc=""):
|
||||
def start_job(self, max_progress: int = 100, desc: str = "") -> None:
|
||||
"""Begin work on the next job. You must not call start_job more than
|
||||
'jobcount' (in __init__) times.
|
||||
'max' is the job units you are to perform.
|
||||
@@ -122,7 +128,7 @@ class Job:
|
||||
self._currmax = max(1, max_progress)
|
||||
self._do_update(desc)
|
||||
|
||||
def start_subjob(self, job_proportions, desc=""):
|
||||
def start_subjob(self, job_proportions: Union[List[int], int], desc: str = "") -> "Job":
|
||||
"""Starts a sub job. Use this when you want to split a job into
|
||||
multiple smaller jobs. Pretty handy when starting a process where you
|
||||
know how many subjobs you will have, but don't know the work unit count
|
||||
@@ -132,7 +138,7 @@ class Job:
|
||||
self.start_job(100, desc)
|
||||
return Job(job_proportions, self._subjob_callback)
|
||||
|
||||
def set_progress(self, progress, desc=""):
|
||||
def set_progress(self, progress: int, desc: str = "") -> None:
|
||||
"""Sets the progress of the current job to 'progress', and call the
|
||||
callback
|
||||
"""
|
||||
@@ -142,30 +148,27 @@ class Job:
|
||||
self._do_update(desc)
|
||||
|
||||
|
||||
class NullJob:
|
||||
def __init__(self, *args, **kwargs):
|
||||
class NullJob(Job):
|
||||
def __init__(self, *args, **kwargs) -> None:
|
||||
# Null job does nothing
|
||||
pass
|
||||
|
||||
def add_progress(self, *args, **kwargs):
|
||||
def add_progress(self, *args, **kwargs) -> None:
|
||||
# Null job does nothing
|
||||
pass
|
||||
|
||||
def check_if_cancelled(self):
|
||||
def check_if_cancelled(self) -> None:
|
||||
# Null job does nothing
|
||||
pass
|
||||
|
||||
def iter_with_progress(self, sequence, *args, **kwargs):
|
||||
return iter(sequence)
|
||||
|
||||
def start_job(self, *args, **kwargs):
|
||||
def start_job(self, *args, **kwargs) -> None:
|
||||
# Null job does nothing
|
||||
pass
|
||||
|
||||
def start_subjob(self, *args, **kwargs):
|
||||
def start_subjob(self, *args, **kwargs) -> "NullJob":
|
||||
return NullJob()
|
||||
|
||||
def set_progress(self, *args, **kwargs):
|
||||
def set_progress(self, *args, **kwargs) -> None:
|
||||
# Null job does nothing
|
||||
pass
|
||||
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
|
||||
from threading import Thread
|
||||
import sys
|
||||
from typing import Callable, Tuple, Union
|
||||
|
||||
from .job import Job, JobInProgressError, JobCancelled
|
||||
from hscommon.jobprogress.job import Job, JobInProgressError, JobCancelled
|
||||
|
||||
|
||||
class ThreadedJobPerformer:
|
||||
@@ -28,15 +29,15 @@ class ThreadedJobPerformer:
|
||||
last_error = None
|
||||
|
||||
# --- Protected
|
||||
def create_job(self):
|
||||
def create_job(self) -> Job:
|
||||
if self._job_running:
|
||||
raise JobInProgressError()
|
||||
self.last_progress = -1
|
||||
self.last_progress: Union[int, None] = -1
|
||||
self.last_desc = ""
|
||||
self.job_cancelled = False
|
||||
return Job(1, self._update_progress)
|
||||
|
||||
def _async_run(self, *args):
|
||||
def _async_run(self, *args) -> None:
|
||||
target = args[0]
|
||||
args = tuple(args[1:])
|
||||
self._job_running = True
|
||||
@@ -52,7 +53,7 @@ class ThreadedJobPerformer:
|
||||
self._job_running = False
|
||||
self.last_progress = None
|
||||
|
||||
def reraise_if_error(self):
|
||||
def reraise_if_error(self) -> None:
|
||||
"""Reraises the error that happened in the thread if any.
|
||||
|
||||
Call this after the caller of run_threaded detected that self._job_running returned to False
|
||||
@@ -60,13 +61,13 @@ class ThreadedJobPerformer:
|
||||
if self.last_error is not None:
|
||||
raise self.last_error.with_traceback(self.last_traceback)
|
||||
|
||||
def _update_progress(self, newprogress, newdesc=""):
|
||||
def _update_progress(self, newprogress: int, newdesc: str = "") -> bool:
|
||||
self.last_progress = newprogress
|
||||
if newdesc:
|
||||
self.last_desc = newdesc
|
||||
return not self.job_cancelled
|
||||
|
||||
def run_threaded(self, target, args=()):
|
||||
def run_threaded(self, target: Callable, args: Tuple = ()) -> None:
|
||||
if self._job_running:
|
||||
raise JobInProgressError()
|
||||
args = (target,) + args
|
||||
|
||||
@@ -2,34 +2,24 @@ import os
|
||||
import os.path as op
|
||||
import shutil
|
||||
import tempfile
|
||||
from typing import Any, List
|
||||
|
||||
import polib
|
||||
|
||||
from . import pygettext
|
||||
from hscommon import pygettext
|
||||
|
||||
LC_MESSAGES = "LC_MESSAGES"
|
||||
|
||||
# There isn't a 1-on-1 exact fit between .po language codes and cocoa ones
|
||||
PO2COCOA = {
|
||||
"pl_PL": "pl",
|
||||
"pt_BR": "pt-BR",
|
||||
"zh_CN": "zh-Hans",
|
||||
}
|
||||
|
||||
COCOA2PO = {v: k for k, v in PO2COCOA.items()}
|
||||
|
||||
STRING_EXT = ".strings"
|
||||
|
||||
|
||||
def get_langs(folder):
|
||||
def get_langs(folder: str) -> List[str]:
|
||||
return [name for name in os.listdir(folder) if op.isdir(op.join(folder, name))]
|
||||
|
||||
|
||||
def files_with_ext(folder, ext):
|
||||
def files_with_ext(folder: str, ext: str) -> List[str]:
|
||||
return [op.join(folder, fn) for fn in os.listdir(folder) if fn.endswith(ext)]
|
||||
|
||||
|
||||
def generate_pot(folders, outpath, keywords, merge=False):
|
||||
def generate_pot(folders: List[str], outpath: str, keywords: Any, merge: bool = False) -> None:
|
||||
if merge and not op.exists(outpath):
|
||||
merge = False
|
||||
if merge:
|
||||
@@ -50,7 +40,7 @@ def generate_pot(folders, outpath, keywords, merge=False):
|
||||
print("Exception while removing temporary folder %s\n", genpath)
|
||||
|
||||
|
||||
def compile_all_po(base_folder):
|
||||
def compile_all_po(base_folder: str) -> None:
|
||||
langs = get_langs(base_folder)
|
||||
for lang in langs:
|
||||
pofolder = op.join(base_folder, lang, LC_MESSAGES)
|
||||
@@ -60,7 +50,7 @@ def compile_all_po(base_folder):
|
||||
p.save_as_mofile(pofile[:-3] + ".mo")
|
||||
|
||||
|
||||
def merge_locale_dir(target, mergeinto):
|
||||
def merge_locale_dir(target: str, mergeinto: str) -> None:
|
||||
langs = get_langs(target)
|
||||
for lang in langs:
|
||||
if not op.exists(op.join(mergeinto, lang)):
|
||||
@@ -71,7 +61,7 @@ def merge_locale_dir(target, mergeinto):
|
||||
shutil.copy(mofile, op.join(mergeinto, lang, LC_MESSAGES))
|
||||
|
||||
|
||||
def merge_pots_into_pos(folder):
|
||||
def merge_pots_into_pos(folder: str) -> None:
|
||||
# We're going to take all pot files in `folder` and for each lang, merge it with the po file
|
||||
# with the same name.
|
||||
potfiles = files_with_ext(folder, ".pot")
|
||||
@@ -84,7 +74,7 @@ def merge_pots_into_pos(folder):
|
||||
po.save()
|
||||
|
||||
|
||||
def merge_po_and_preserve(source, dest):
|
||||
def merge_po_and_preserve(source: str, dest: str) -> None:
|
||||
# Merges source entries into dest, but keep old entries intact
|
||||
sourcepo = polib.pofile(source)
|
||||
destpo = polib.pofile(dest)
|
||||
@@ -96,7 +86,7 @@ def merge_po_and_preserve(source, dest):
|
||||
destpo.save()
|
||||
|
||||
|
||||
def normalize_all_pos(base_folder):
|
||||
def normalize_all_pos(base_folder: str) -> None:
|
||||
"""Normalize the format of .po files in base_folder.
|
||||
|
||||
When getting POs from external sources, such as Transifex, we end up with spurious diffs because
|
||||
|
||||
@@ -13,6 +13,7 @@ the method with the same name as the broadcasted message is called on the listen
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import Callable, DefaultDict, List
|
||||
|
||||
|
||||
class Broadcaster:
|
||||
@@ -21,10 +22,10 @@ class Broadcaster:
|
||||
def __init__(self):
|
||||
self.listeners = set()
|
||||
|
||||
def add_listener(self, listener):
|
||||
def add_listener(self, listener: "Listener") -> None:
|
||||
self.listeners.add(listener)
|
||||
|
||||
def notify(self, msg):
|
||||
def notify(self, msg: str) -> None:
|
||||
"""Notify all connected listeners of ``msg``.
|
||||
|
||||
That means that each listeners will have their method with the same name as ``msg`` called.
|
||||
@@ -33,18 +34,18 @@ class Broadcaster:
|
||||
if listener in self.listeners: # disconnected during notification
|
||||
listener.dispatch(msg)
|
||||
|
||||
def remove_listener(self, listener):
|
||||
def remove_listener(self, listener: "Listener") -> None:
|
||||
self.listeners.discard(listener)
|
||||
|
||||
|
||||
class Listener:
|
||||
"""A listener is initialized with the broadcaster it's going to listen to. Initially, it is not connected."""
|
||||
|
||||
def __init__(self, broadcaster):
|
||||
def __init__(self, broadcaster: Broadcaster) -> None:
|
||||
self.broadcaster = broadcaster
|
||||
self._bound_notifications = defaultdict(list)
|
||||
self._bound_notifications: DefaultDict[str, List[Callable]] = defaultdict(list)
|
||||
|
||||
def bind_messages(self, messages, func):
|
||||
def bind_messages(self, messages: str, func: Callable) -> None:
|
||||
"""Binds multiple message to the same function.
|
||||
|
||||
Often, we perform the same thing on multiple messages. Instead of having the same function
|
||||
@@ -54,15 +55,15 @@ class Listener:
|
||||
for message in messages:
|
||||
self._bound_notifications[message].append(func)
|
||||
|
||||
def connect(self):
|
||||
def connect(self) -> None:
|
||||
"""Connects the listener to its broadcaster."""
|
||||
self.broadcaster.add_listener(self)
|
||||
|
||||
def disconnect(self):
|
||||
def disconnect(self) -> None:
|
||||
"""Disconnects the listener from its broadcaster."""
|
||||
self.broadcaster.remove_listener(self)
|
||||
|
||||
def dispatch(self, msg):
|
||||
def dispatch(self, msg: str) -> None:
|
||||
if msg in self._bound_notifications:
|
||||
for func in self._bound_notifications[msg]:
|
||||
func()
|
||||
@@ -74,14 +75,14 @@ class Listener:
|
||||
class Repeater(Broadcaster, Listener):
|
||||
REPEATED_NOTIFICATIONS = None
|
||||
|
||||
def __init__(self, broadcaster):
|
||||
def __init__(self, broadcaster: Broadcaster) -> None:
|
||||
Broadcaster.__init__(self)
|
||||
Listener.__init__(self, broadcaster)
|
||||
|
||||
def _repeat_message(self, msg):
|
||||
def _repeat_message(self, msg: str) -> None:
|
||||
if not self.REPEATED_NOTIFICATIONS or msg in self.REPEATED_NOTIFICATIONS:
|
||||
self.notify(msg)
|
||||
|
||||
def dispatch(self, msg):
|
||||
def dispatch(self, msg: str) -> None:
|
||||
Listener.dispatch(self, msg)
|
||||
self._repeat_message(msg)
|
||||
|
||||
@@ -47,7 +47,7 @@ def log_io_error(func):
|
||||
def wrapper(path, *args, **kwargs):
|
||||
try:
|
||||
return func(path, *args, **kwargs)
|
||||
except (IOError, OSError) as e:
|
||||
except OSError as e:
|
||||
msg = 'Error "{0}" during operation "{1}" on "{2}": "{3}"'
|
||||
classname = e.__class__.__name__
|
||||
funcname = func.__name__
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
#
|
||||
|
||||
import os
|
||||
import imp
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
import sys
|
||||
import glob
|
||||
import token
|
||||
@@ -110,7 +111,7 @@ def _visit_pyfiles(list, dirname, names):
|
||||
# get extension for python source files
|
||||
if "_py_ext" not in globals():
|
||||
global _py_ext
|
||||
_py_ext = [triple[0] for triple in imp.get_suffixes() if triple[2] == imp.PY_SOURCE][0]
|
||||
_py_ext = importlib.machinery.SOURCE_SUFFIXES[0]
|
||||
|
||||
# don't recurse into CVS directories
|
||||
if "CVS" in names:
|
||||
@@ -120,45 +121,6 @@ def _visit_pyfiles(list, dirname, names):
|
||||
list.extend([os.path.join(dirname, file) for file in names if os.path.splitext(file)[1] == _py_ext])
|
||||
|
||||
|
||||
def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
"""Get the filesystem path for a module or a package.
|
||||
|
||||
Return the file system path to a file for a module, and to a directory for
|
||||
a package. Return None if the name is not found, or is a builtin or
|
||||
extension module.
|
||||
"""
|
||||
# split off top-most name
|
||||
parts = dotted_name.split(".", 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
# we have a dotted path, import top-level package
|
||||
try:
|
||||
file, pathname, description = imp.find_module(parts[0], pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
# check if it's indeed a package
|
||||
if description[2] == imp.PKG_DIRECTORY:
|
||||
# recursively handle the remaining name parts
|
||||
pathname = _get_modpkg_path(parts[1], [pathname])
|
||||
else:
|
||||
pathname = None
|
||||
else:
|
||||
# plain name
|
||||
try:
|
||||
file, pathname, description = imp.find_module(dotted_name, pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
|
||||
pathname = None
|
||||
except ImportError:
|
||||
pathname = None
|
||||
|
||||
return pathname
|
||||
|
||||
|
||||
def getFilesForName(name):
|
||||
"""Get a list of module files for a filename, a module or package name,
|
||||
or a directory.
|
||||
@@ -173,7 +135,11 @@ def getFilesForName(name):
|
||||
return file_list
|
||||
|
||||
# try to find module or package
|
||||
name = _get_modpkg_path(name)
|
||||
try:
|
||||
spec = importlib.util.find_spec(name)
|
||||
name = spec.origin
|
||||
except ImportError:
|
||||
name = None
|
||||
if not name:
|
||||
return []
|
||||
|
||||
@@ -374,7 +340,7 @@ def main(source_files, outpath, keywords=None):
|
||||
fp = open(options.excludefilename, encoding="utf-8")
|
||||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
except OSError:
|
||||
print(
|
||||
"Can't read --exclude-file: %s" % options.excludefilename,
|
||||
file=sys.stderr,
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Callable, Dict, Union
|
||||
|
||||
from .build import read_changelog_file, filereplace
|
||||
from hscommon.build import read_changelog_file, filereplace
|
||||
from sphinx.cmd.build import build_main as sphinx_build
|
||||
|
||||
CHANGELOG_FORMAT = """
|
||||
@@ -18,25 +19,25 @@ CHANGELOG_FORMAT = """
|
||||
"""
|
||||
|
||||
|
||||
def tixgen(tixurl):
|
||||
def tixgen(tixurl: str) -> Callable[[str], str]:
|
||||
"""This is a filter *generator*. tixurl is a url pattern for the tix with a {0} placeholder
|
||||
for the tix #
|
||||
"""
|
||||
urlpattern = tixurl.format("\\1") # will be replaced buy the content of the first group in re
|
||||
R = re.compile(r"#(\d+)")
|
||||
repl = "`#\\1 <{}>`__".format(urlpattern)
|
||||
repl = f"`#\\1 <{urlpattern}>`__"
|
||||
return lambda text: R.sub(repl, text)
|
||||
|
||||
|
||||
def gen(
|
||||
basepath,
|
||||
destpath,
|
||||
changelogpath,
|
||||
tixurl,
|
||||
confrepl=None,
|
||||
confpath=None,
|
||||
changelogtmpl=None,
|
||||
):
|
||||
basepath: Path,
|
||||
destpath: Path,
|
||||
changelogpath: Path,
|
||||
tixurl: str,
|
||||
confrepl: Union[Dict[str, str], None] = None,
|
||||
confpath: Union[Path, None] = None,
|
||||
changelogtmpl: Union[Path, None] = None,
|
||||
) -> None:
|
||||
"""Generate sphinx docs with all bells and whistles.
|
||||
|
||||
basepath: The base sphinx source path.
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2007/05/19
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import os
|
||||
import os.path as op
|
||||
import threading
|
||||
from queue import Queue
|
||||
import sqlite3 as sqlite
|
||||
|
||||
STOP = object()
|
||||
COMMIT = object()
|
||||
ROLLBACK = object()
|
||||
|
||||
|
||||
class FakeCursor(list):
|
||||
# It's not possible to use sqlite cursors on another thread than the connection. Thus,
|
||||
# we can't directly return the cursor. We have to fatch all results, and support its interface.
|
||||
def fetchall(self):
|
||||
return self
|
||||
|
||||
def fetchone(self):
|
||||
try:
|
||||
return self.pop(0)
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
class _ActualThread(threading.Thread):
|
||||
"""We can't use this class directly because thread object are not automatically freed when
|
||||
nothing refers to it, making it hang the application if not explicitely closed.
|
||||
"""
|
||||
|
||||
def __init__(self, dbname, autocommit):
|
||||
threading.Thread.__init__(self)
|
||||
self._queries = Queue()
|
||||
self._results = Queue()
|
||||
self._dbname = dbname
|
||||
self._autocommit = autocommit
|
||||
self._waiting_list = set()
|
||||
self._lock = threading.Lock()
|
||||
self._run = True
|
||||
self.lastrowid = -1
|
||||
self.daemon = True
|
||||
self.start()
|
||||
|
||||
def _query(self, query):
|
||||
with self._lock:
|
||||
wait_token = object()
|
||||
self._waiting_list.add(wait_token)
|
||||
self._queries.put(query)
|
||||
self._waiting_list.remove(wait_token)
|
||||
result = self._results.get()
|
||||
return result
|
||||
|
||||
def close(self):
|
||||
if not self._run:
|
||||
return
|
||||
self._query(STOP)
|
||||
|
||||
def commit(self):
|
||||
if not self._run:
|
||||
return None # Connection closed
|
||||
self._query(COMMIT)
|
||||
|
||||
def execute(self, sql, values=()):
|
||||
if not self._run:
|
||||
return None # Connection closed
|
||||
result = self._query((sql, values))
|
||||
if isinstance(result, Exception):
|
||||
raise result
|
||||
return result
|
||||
|
||||
def rollback(self):
|
||||
if not self._run:
|
||||
return None # Connection closed
|
||||
self._query(ROLLBACK)
|
||||
|
||||
def run(self):
|
||||
# The whole chdir thing is because sqlite doesn't handle directory names with non-asci char in the AT ALL.
|
||||
oldpath = os.getcwd()
|
||||
dbdir, dbname = op.split(self._dbname)
|
||||
if dbdir:
|
||||
os.chdir(dbdir)
|
||||
if self._autocommit:
|
||||
con = sqlite.connect(dbname, isolation_level=None)
|
||||
else:
|
||||
con = sqlite.connect(dbname)
|
||||
os.chdir(oldpath)
|
||||
while self._run or self._waiting_list:
|
||||
query = self._queries.get()
|
||||
result = None
|
||||
if query is STOP:
|
||||
self._run = False
|
||||
elif query is COMMIT:
|
||||
con.commit()
|
||||
elif query is ROLLBACK:
|
||||
con.rollback()
|
||||
else:
|
||||
sql, values = query
|
||||
try:
|
||||
cur = con.execute(sql, values)
|
||||
self.lastrowid = cur.lastrowid
|
||||
result = FakeCursor(cur.fetchall())
|
||||
result.lastrowid = cur.lastrowid
|
||||
except Exception as e:
|
||||
result = e
|
||||
self._results.put(result)
|
||||
con.close()
|
||||
|
||||
|
||||
class ThreadedConn:
|
||||
"""``sqlite`` connections can't be used across threads. ``TheadedConn`` opens a sqlite
|
||||
connection in its own thread and sends it queries through a queue, making it suitable in
|
||||
multi-threaded environment.
|
||||
"""
|
||||
|
||||
def __init__(self, dbname, autocommit):
|
||||
self._t = _ActualThread(dbname, autocommit)
|
||||
self.lastrowid = -1
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
self._t.close()
|
||||
|
||||
def commit(self):
|
||||
self._t.commit()
|
||||
|
||||
def execute(self, sql, values=()):
|
||||
result = self._t.execute(sql, values)
|
||||
self.lastrowid = self._t.lastrowid
|
||||
return result
|
||||
|
||||
def rollback(self):
|
||||
self._t.rollback()
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from ..conflict import (
|
||||
from hscommon.conflict import (
|
||||
get_conflicted_name,
|
||||
get_unconflicted_name,
|
||||
is_conflicted,
|
||||
@@ -16,7 +16,7 @@ from ..conflict import (
|
||||
smart_move,
|
||||
)
|
||||
from pathlib import Path
|
||||
from ..testutil import eq_
|
||||
from hscommon.testutil import eq_
|
||||
|
||||
|
||||
class TestCaseGetConflictedName:
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..testutil import eq_
|
||||
from ..notify import Broadcaster, Listener, Repeater
|
||||
from hscommon.testutil import eq_
|
||||
from hscommon.notify import Broadcaster, Listener, Repeater
|
||||
|
||||
|
||||
class HelloListener(Listener):
|
||||
@@ -113,7 +113,7 @@ def test_repeater_with_repeated_notifications():
|
||||
# If REPEATED_NOTIFICATIONS is not empty, only notifs in this set are repeated (but they're
|
||||
# still dispatched locally).
|
||||
class MyRepeater(HelloRepeater):
|
||||
REPEATED_NOTIFICATIONS = set(["hello"])
|
||||
REPEATED_NOTIFICATIONS = {"hello"}
|
||||
|
||||
def __init__(self, broadcaster):
|
||||
HelloRepeater.__init__(self, broadcaster)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..path import pathify
|
||||
from hscommon.path import pathify
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
from ..testutil import eq_, callcounter, CallLogger
|
||||
from ..gui.selectable_list import SelectableList, GUISelectableList
|
||||
from hscommon.testutil import eq_, callcounter, CallLogger
|
||||
from hscommon.gui.selectable_list import SelectableList, GUISelectableList
|
||||
|
||||
|
||||
def test_in():
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
# Created By: Virgil Dupras
|
||||
# Created On: 2007/05/19
|
||||
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
|
||||
|
||||
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
|
||||
# which should be included with this package. The terms are also available at
|
||||
# http://www.gnu.org/licenses/gpl-3.0.html
|
||||
|
||||
import time
|
||||
import threading
|
||||
import os
|
||||
import sqlite3 as sqlite
|
||||
|
||||
from pytest import raises
|
||||
|
||||
from ..testutil import eq_
|
||||
from ..sqlite import ThreadedConn
|
||||
|
||||
# Threading is hard to test. In a lot of those tests, a failure means that the test run will
|
||||
# hang forever. Well... I don't know a better alternative.
|
||||
|
||||
|
||||
def test_can_access_from_multiple_threads():
|
||||
def run():
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
t = threading.Thread(target=run)
|
||||
t.start()
|
||||
t.join()
|
||||
result = con.execute("select * from foo")
|
||||
eq_(1, len(result))
|
||||
eq_("baz", result[0][0])
|
||||
|
||||
|
||||
def test_exception_during_query():
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
with raises(sqlite.OperationalError):
|
||||
con.execute("select * from bleh")
|
||||
|
||||
|
||||
def test_not_autocommit(tmpdir):
|
||||
dbpath = str(tmpdir.join("foo.db"))
|
||||
con = ThreadedConn(dbpath, False)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
del con
|
||||
# The data shouldn't have been inserted
|
||||
con = ThreadedConn(dbpath, False)
|
||||
result = con.execute("select * from foo")
|
||||
eq_(0, len(result))
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
con.commit()
|
||||
del con
|
||||
# Now the data should be there
|
||||
con = ThreadedConn(dbpath, False)
|
||||
result = con.execute("select * from foo")
|
||||
eq_(1, len(result))
|
||||
|
||||
|
||||
def test_rollback():
|
||||
con = ThreadedConn(":memory:", False)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
con.rollback()
|
||||
result = con.execute("select * from foo")
|
||||
eq_(0, len(result))
|
||||
|
||||
|
||||
def test_query_palceholders():
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values(?)", ["baz"])
|
||||
result = con.execute("select * from foo")
|
||||
eq_(1, len(result))
|
||||
eq_("baz", result[0][0])
|
||||
|
||||
|
||||
def test_make_sure_theres_no_messup_between_queries():
|
||||
def run(expected_rowid):
|
||||
time.sleep(0.1)
|
||||
result = con.execute("select rowid from foo where rowid = ?", [expected_rowid])
|
||||
assert expected_rowid == result[0][0]
|
||||
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
for i in range(100):
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
threads = []
|
||||
for i in range(1, 101):
|
||||
t = threading.Thread(target=run, args=(i,))
|
||||
t.start()
|
||||
threads.append(t)
|
||||
while threads:
|
||||
time.sleep(0.1)
|
||||
threads = [t for t in threads if t.is_alive()]
|
||||
|
||||
|
||||
def test_query_after_close():
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.close()
|
||||
con.execute("select 1")
|
||||
|
||||
|
||||
def test_lastrowid():
|
||||
# It's not possible to return a cursor because of the threading, but lastrowid should be
|
||||
# fetchable from the connection itself
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz')")
|
||||
eq_(1, con.lastrowid)
|
||||
|
||||
|
||||
def test_add_fetchone_fetchall_interface_to_results():
|
||||
con = ThreadedConn(":memory:", True)
|
||||
con.execute("create table foo(bar TEXT)")
|
||||
con.execute("insert into foo(bar) values('baz1')")
|
||||
con.execute("insert into foo(bar) values('baz2')")
|
||||
result = con.execute("select * from foo")
|
||||
ref = result[:]
|
||||
eq_(ref, result.fetchall())
|
||||
eq_(ref[0], result.fetchone())
|
||||
eq_(ref[1], result.fetchone())
|
||||
assert result.fetchone() is None
|
||||
|
||||
|
||||
def test_non_ascii_dbname(tmpdir):
|
||||
ThreadedConn(str(tmpdir.join("foo\u00e9.db")), True)
|
||||
|
||||
|
||||
def test_non_ascii_dbdir(tmpdir):
|
||||
# when this test fails, it doesn't fail gracefully, it brings the whole test suite with it.
|
||||
dbdir = tmpdir.join("foo\u00e9")
|
||||
os.mkdir(str(dbdir))
|
||||
ThreadedConn(str(dbdir.join("foo.db")), True)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user