From 5a603246d237234ec076566061edbf583e49c6f8 Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Fri, 14 Apr 2023 17:41:52 +0300 Subject: [PATCH 01/83] Make column widths dynamic in output of list-projects --- annif/cli.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index 6a6d3585b..45b668b18 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -43,16 +43,26 @@ def run_list_projects(): for details. """ - template = "{0: <25}{1: <45}{2: <10}{3: <7}" - header = template.format("Project ID", "Project Name", "Language", "Trained") + entries = [ + (proj.project_id, proj.name, proj.language, str(proj.is_trained)) + for proj in annif.registry.get_projects(min_access=Access.private).values() + ] + header_fields = ("Project ID", "Project Name", "Language", "Trained") + + max_field_lengths = collections.defaultdict(int) + for entry in (*entries, header_fields): + for ind, field in enumerate(entry): + max_field_lengths[ind] = max(max_field_lengths[ind], len(field)) + + template = "{{0: <{0}}} {{1: <{1}}} {{2: <{2}}} {{3: <{3}}}".format( + *max_field_lengths.values() + ) + + header = template.format(*header_fields) click.echo(header) click.echo("-" * len(header)) - for proj in annif.registry.get_projects(min_access=Access.private).values(): - click.echo( - template.format( - proj.project_id, proj.name, proj.language, str(proj.is_trained) - ) - ) + for entry in entries: + click.echo(template.format(*entry)) @cli.command("show-project") From eb15067bc1a048b136a45028bf05416e6b055309 Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Fri, 14 Apr 2023 17:54:30 +0300 Subject: [PATCH 02/83] Add vocabulary and modification time in output of list-projects --- annif/cli.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index 45b668b18..7d37c26df 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -44,19 +44,34 @@ def run_list_projects(): """ entries = [ - (proj.project_id, proj.name, proj.language, str(proj.is_trained)) + ( + proj.project_id, + proj.name, + proj.vocab.vocab_id, + proj.language, + str(proj.is_trained), + str(proj.modification_time), + ) for proj in annif.registry.get_projects(min_access=Access.private).values() ] - header_fields = ("Project ID", "Project Name", "Language", "Trained") + header_fields = ( + "Project ID", + "Project Name", + "Vocabulary ID", + "Language", + "Trained", + "Modification time", + ) max_field_lengths = collections.defaultdict(int) for entry in (*entries, header_fields): for ind, field in enumerate(entry): max_field_lengths[ind] = max(max_field_lengths[ind], len(field)) - template = "{{0: <{0}}} {{1: <{1}}} {{2: <{2}}} {{3: <{3}}}".format( - *max_field_lengths.values() - ) + template = ( + "{{0: <{0}}} {{1: <{1}}} {{2: <{2}}} {{3: <{3}}} " + "{{4: <{4}}} {{5: <{5}}}" + ).format(*max_field_lengths.values()) header = template.format(*header_fields) click.echo(header) From daded2c0870bf44345a0e51d328c1512b9260453 Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Thu, 20 Apr 2023 14:32:35 +0300 Subject: [PATCH 03/83] Add make_list_template helper function --- annif/cli.py | 38 ++++++++++++++------------------------ annif/cli_util.py | 18 ++++++++++++++++++ 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index 7d37c26df..c3e59f838 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -43,41 +43,31 @@ def run_list_projects(): for details. """ - entries = [ + column_headings = ( + "Project ID", + "Project Name", + "Vocabulary ID", + "Language", + "Trained", + "Modification time", + ) + table = [ ( proj.project_id, proj.name, - proj.vocab.vocab_id, + proj.vocab.vocab_id if proj.vocab_spec else "-", proj.language, str(proj.is_trained), str(proj.modification_time), ) for proj in annif.registry.get_projects(min_access=Access.private).values() ] - header_fields = ( - "Project ID", - "Project Name", - "Vocabulary ID", - "Language", - "Trained", - "Modification time", - ) - - max_field_lengths = collections.defaultdict(int) - for entry in (*entries, header_fields): - for ind, field in enumerate(entry): - max_field_lengths[ind] = max(max_field_lengths[ind], len(field)) - - template = ( - "{{0: <{0}}} {{1: <{1}}} {{2: <{2}}} {{3: <{3}}} " - "{{4: <{4}}} {{5: <{5}}}" - ).format(*max_field_lengths.values()) - - header = template.format(*header_fields) + template = cli_util.make_list_template(column_headings, *table) + header = template.format(*column_headings) click.echo(header) click.echo("-" * len(header)) - for entry in entries: - click.echo(template.format(*entry)) + for row in table: + click.echo(template.format(*row)) @cli.command("show-project") diff --git a/annif/cli_util.py b/annif/cli_util.py index b2de396ad..e7d4d835e 100644 --- a/annif/cli_util.py +++ b/annif/cli_util.py @@ -82,6 +82,24 @@ def get_vocab(vocab_id): sys.exit(1) +def make_list_template(*rows): + """Helper function to create a template for a list of entries with fields of + variable width. The width of each field is determined by the longest item in the + field in the given rows.""" + + max_field_widths = collections.defaultdict(int) + for row in rows: + for field_ind, item in enumerate(row): + max_field_widths[field_ind] = max(max_field_widths[field_ind], len(item)) + + return " ".join( + [ + f"{{{field_ind}: <{field_width}}}" + for field_ind, field_width in max_field_widths.items() + ] + ) + + def open_documents(paths, subject_index, vocab_lang, docs_limit): """Helper function to open a document corpus from a list of pathnames, each of which is either a TSV file or a directory of TXT files. For From 54b5e85f9a13c8aa651c9564d71653537f016b22 Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Thu, 20 Apr 2023 17:10:26 +0300 Subject: [PATCH 04/83] Make column widths dynamic in output of list-vocabs --- annif/cli.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index c3e59f838..44ab2e111 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -108,10 +108,8 @@ def run_list_vocabs(): List available vocabularies. """ - template = "{0: <20}{1: <20}{2: >10} {3: <6}" - header = template.format("Vocabulary ID", "Languages", "Size", "Loaded") - click.echo(header) - click.echo("-" * len(header)) + column_headings = ("Vocabulary ID", "Languages", "Size", "Loaded") + table = [] for vocab in annif.registry.get_vocabs(min_access=Access.private).values(): try: languages = ",".join(sorted(vocab.languages)) @@ -121,7 +119,15 @@ def run_list_vocabs(): languages = "-" size = "-" loaded = False - click.echo(template.format(vocab.vocab_id, languages, size, str(loaded))) + row = (vocab.vocab_id, languages, str(size), str(loaded)) + table.append(row) + + template = cli_util.make_list_template(column_headings, *table) + header = template.format(*column_headings) + click.echo(header) + click.echo("-" * len(header)) + for row in table: + click.echo(template.format(*row)) @cli.command("load-vocab") From b47e021bc75a3f019993808807d34fcba26bf42f Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Fri, 21 Apr 2023 11:34:43 +0300 Subject: [PATCH 05/83] Convert modif. timestamps to current TZ & format to be more readable --- annif/cli.py | 4 ++-- annif/cli_util.py | 7 +++++++ tests/test_cli.py | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index 44ab2e111..89d4f165e 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -58,7 +58,7 @@ def run_list_projects(): proj.vocab.vocab_id if proj.vocab_spec else "-", proj.language, str(proj.is_trained), - str(proj.modification_time), + cli_util.format_datetime(proj.modification_time), ) for proj in annif.registry.get_projects(min_access=Access.private).values() ] @@ -86,7 +86,7 @@ def run_show_project(project_id): click.echo(f"Vocab language: {proj.vocab_lang}") click.echo(f"Access: {proj.access.name}") click.echo(f"Trained: {proj.is_trained}") - click.echo(f"Modification time: {proj.modification_time}") + click.echo(f"Modification time: {cli_util.format_datetime(proj.modification_time)}") @cli.command("clear") diff --git a/annif/cli_util.py b/annif/cli_util.py index e7d4d835e..d088d8495 100644 --- a/annif/cli_util.py +++ b/annif/cli_util.py @@ -100,6 +100,13 @@ def make_list_template(*rows): ) +def format_datetime(dt): + """Helper function to format a datetime object as a string in the local time.""" + if dt is None: + return "-" + return dt.astimezone().strftime("%Y-%m-%d %H:%M:%S") + + def open_documents(paths, subject_index, vocab_lang, docs_limit): """Helper function to open a document corpus from a list of pathnames, each of which is either a TSV file or a directory of TXT files. For diff --git a/tests/test_cli.py b/tests/test_cli.py index 603b1c49b..8b405a74b 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -82,7 +82,7 @@ def test_show_project(): is_trained = re.search(r"Trained:\s+(.+)", result.output) assert is_trained.group(1) == "True" modification_time = re.search(r"Modification time:\s+(.+)", result.output) - assert modification_time.group(1) == "None" + assert modification_time.group(1) == "-" def test_show_project_nonexistent(): From 64375ef1b81053ae61a11d5e8c03e72e304075b1 Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Fri, 21 Apr 2023 12:36:06 +0300 Subject: [PATCH 06/83] Test output of formatted modification time --- tests/test_cli.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_cli.py b/tests/test_cli.py index 8b405a74b..a6180ee39 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -7,6 +7,7 @@ import random import re import shutil +from datetime import datetime, timedelta from click.testing import CliRunner @@ -85,6 +86,21 @@ def test_show_project(): assert modification_time.group(1) == "-" +def test_show_project_modification_time(testdatadir): + dirpath = os.path.join(str(testdatadir), "projects", "tfidf-fi") + fpath = os.path.join(str(dirpath), "test_show_project_datafile") + os.makedirs(dirpath) + open(fpath, "a").close() + + result = runner.invoke(annif.cli.cli, ["show-project", "tfidf-fi"]) + assert not result.exception + modification_time = re.search(r"Modification time:\s+(.+)", result.output) + modification_time_obj = datetime.strptime( + modification_time.group(1), "%Y-%m-%d %H:%M:%S" + ) + assert datetime.now() - modification_time_obj < timedelta(1) + + def test_show_project_nonexistent(): assert runner.invoke(annif.cli.cli, ["show-project", TEMP_PROJECT]).exit_code != 0 # Test should not fail even if the user queries for a non-existent project. From 267ee5950a66adea85ca42ae70e4dcffefd92da8 Mon Sep 17 00:00:00 2001 From: Juho Inkinen Date: Fri, 21 Apr 2023 13:07:50 +0300 Subject: [PATCH 07/83] Add Backend line in output of show-project --- annif/cli.py | 1 + tests/test_cli.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/annif/cli.py b/annif/cli.py index 89d4f165e..ad0030289 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -85,6 +85,7 @@ def run_show_project(project_id): click.echo(f"Vocabulary: {proj.vocab.vocab_id}") click.echo(f"Vocab language: {proj.vocab_lang}") click.echo(f"Access: {proj.access.name}") + click.echo(f"Backend: {proj.backend.name}") click.echo(f"Trained: {proj.is_trained}") click.echo(f"Modification time: {cli_util.format_datetime(proj.modification_time)}") diff --git a/tests/test_cli.py b/tests/test_cli.py index a6180ee39..ec87cf6e5 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -80,6 +80,8 @@ def test_show_project(): assert project_lang.group(1) == "en" access = re.search(r"Access:\s+(.+)", result.output) assert access.group(1) == "hidden" + access = re.search(r"Backend:\s+(.+)", result.output) + assert access.group(1) == "dummy" is_trained = re.search(r"Trained:\s+(.+)", result.output) assert is_trained.group(1) == "True" modification_time = re.search(r"Modification time:\s+(.+)", result.output) From 8c77a813759b7bd2bf2789dfcd9b557e9249639c Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Fri, 5 May 2023 16:14:03 +0200 Subject: [PATCH 08/83] Make stwfsapy optional --- annif/backend/__init__.py | 7 +++++-- pyproject.toml | 3 ++- tests/test_backend_stwfsa.py | 32 ++++++++++++++++++-------------- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/annif/backend/__init__.py b/annif/backend/__init__.py index a53913ae0..80ede0720 100644 --- a/annif/backend/__init__.py +++ b/annif/backend/__init__.py @@ -62,9 +62,12 @@ def _pav(): def _stwfsa(): - from . import stwfsa + try: + from . import stwfsa - return stwfsa.StwfsaBackend + return stwfsa.StwfsaBackend + except ImportError: + raise ValueError("STWFSA not available, cannot use stwfsa backend") def _svc(): diff --git a/pyproject.toml b/pyproject.toml index 1dba9286d..008b388e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,6 @@ rdflib = ">=4.2,<7.0" gunicorn = "20.1.*" numpy = "1.24.*" optuna = "2.10.*" -stwfsapy = "0.3.*" python-dateutil = "2.8.*" tomli = "2.0.*" simplemma = "0.9.*" @@ -57,6 +56,7 @@ lmdb = {version = "1.4.1", optional = true} omikuji = {version = "0.5.*", optional = true} yake = {version = "0.4.5", optional = true} spacy = {version = "3.5.*", optional = true} +stwfsapy = {version="0.3.*", optional = true} [tool.poetry.dev-dependencies] py = "*" @@ -78,6 +78,7 @@ nn = ["tensorflow-cpu", "lmdb"] omikuji = ["omikuji"] yake = ["yake"] spacy = ["spacy"] +stwfsa = ["stwfsapy"] [tool.poetry.scripts] annif = "annif.cli:cli" diff --git a/tests/test_backend_stwfsa.py b/tests/test_backend_stwfsa.py index 455040269..ecac18f38 100644 --- a/tests/test_backend_stwfsa.py +++ b/tests/test_backend_stwfsa.py @@ -2,9 +2,13 @@ import annif.corpus from annif.backend import get_backend -from annif.backend.stwfsa import StwfsaBackend from annif.exception import NotInitializedException, NotSupportedException +stwfsa = pytest.importorskip("annif.backend.stwfsa") + +stwfsa_backend_name = stwfsa.StwfsaBackend.name + + _backend_conf = { "language": "fi", "concept_type_uri": "http://www.w3.org/2004/02/skos/core#Concept", @@ -15,9 +19,9 @@ def test_stwfsa_default_params(project): - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type( - backend_id=StwfsaBackend.name, config_params={}, project=project + backend_id=stwfsa_backend_name, config_params={}, project=project ) expected_default_params = { "concept_type_uri": "http://www.w3.org/2004/02/skos/core#Concept", @@ -38,16 +42,16 @@ def test_stwfsa_default_params(project): def test_stwfsa_not_initialized(project): - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type(backend_id="stwfsa", config_params={}, project=project) with pytest.raises(NotInitializedException): stwfsa.suggest(["example text"])[0] def test_stwfsa_train(document_corpus, project, datadir): - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type( - backend_id=StwfsaBackend.name, config_params=_backend_conf, project=project + backend_id=stwfsa_backend_name, config_params=_backend_conf, project=project ) stwfsa.train(document_corpus) assert stwfsa._model is not None @@ -58,9 +62,9 @@ def test_stwfsa_train(document_corpus, project, datadir): def test_empty_corpus(project): corpus = annif.corpus.DocumentList([]) - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type( - backend_id=StwfsaBackend.name, config_params={"limit": 10}, project=project + backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project ) with pytest.raises(NotSupportedException): stwfsa.train(corpus) @@ -68,27 +72,27 @@ def test_empty_corpus(project): def test_cached_corpus(project): corpus = "cached" - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type( - backend_id=StwfsaBackend.name, config_params={"limit": 10}, project=project + backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project ) with pytest.raises(NotSupportedException): stwfsa.train(corpus) def test_stwfsa_suggest_unknown(project): - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type( - backend_id=StwfsaBackend.name, config_params={"limit": 10}, project=project + backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project ) results = stwfsa.suggest(["1234"])[0] assert len(results) == 0 def test_stwfsa_suggest(project, datadir): - stwfsa_type = get_backend(StwfsaBackend.name) + stwfsa_type = get_backend(stwfsa_backend_name) stwfsa = stwfsa_type( - backend_id=StwfsaBackend.name, config_params={"limit": 10}, project=project + backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project ) # Just some randomly selected words, taken from YSO archaeology group. # And "random" words between them From 1b317b3bd2ef70c3ce56e6dbd769a239fdd27cc5 Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Fri, 5 May 2023 16:39:06 +0200 Subject: [PATCH 09/83] Add stwfsa to CI test selectively --- .github/workflows/cicd.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 89c0a2cbb..f415ebc91 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -61,7 +61,7 @@ jobs: # Selectively install the optional dependencies for some Python versions # For Python 3.8: if [[ ${{ matrix.python-version }} == '3.8' ]]; then - poetry install -E "nn omikuji yake voikko"; + poetry install -E "nn omikuji yake voikko stwfsa"; fi # For Python 3.9: if [[ ${{ matrix.python-version }} == '3.9' ]]; then @@ -71,7 +71,7 @@ jobs: fi # For Python 3.10: if [[ ${{ matrix.python-version }} == '3.10' ]]; then - poetry install -E "nn omikuji yake"; + poetry install -E "nn omikuji yake stwfsa"; fi poetry run python -m nltk.downloader punkt - name: Test with pytest From ede349a6b4d4c91a96eb681fd361eba502003ecb Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Mon, 8 May 2023 10:38:35 +0200 Subject: [PATCH 10/83] Add stwfsa to optional_dependencies in Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index dae45997f..ef8af6fbf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM python:3.10-slim-bullseye LABEL org.opencontainers.image.authors="grp-natlibfi-annif@helsinki.fi" SHELL ["/bin/bash", "-c"] -ARG optional_dependencies="fasttext voikko fasttext nn omikuji yake spacy" +ARG optional_dependencies="fasttext voikko fasttext nn omikuji yake spacy stwfsa" ARG POETRY_VIRTUALENVS_CREATE=false # Install system dependencies needed at runtime: From 84fa56dc61ea358f9bf3a20007980a14278caedb Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Mon, 8 May 2023 10:47:47 +0200 Subject: [PATCH 11/83] Add "stwfsa not installed" test --- tests/test_backend.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test_backend.py b/tests/test_backend.py index 19ff2028c..378cb78b8 100644 --- a/tests/test_backend.py +++ b/tests/test_backend.py @@ -91,3 +91,13 @@ def test_get_backend_yake_not_installed(): with pytest.raises(ValueError) as excinfo: annif.backend.get_backend("yake") assert "YAKE not available" in str(excinfo.value) + + +@pytest.mark.skipif( + importlib.util.find_spec("stwfsapy") is not None, + reason="test requires that STWFSA is NOT installed", +) +def test_get_backend_stwfsa_not_installed(): + with pytest.raises(ValueError) as excinfo: + annif.backend.get_backend("stwfsa") + assert "STWFSA not available" in str(excinfo.value) From 076b85916d00936d40bb2bec1d8b5390151c4644 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 8 May 2023 13:58:20 +0300 Subject: [PATCH 12/83] Remove duplicated fasttext entry in optional dependencies (#701) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ef8af6fbf..d1cea10e7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM python:3.10-slim-bullseye LABEL org.opencontainers.image.authors="grp-natlibfi-annif@helsinki.fi" SHELL ["/bin/bash", "-c"] -ARG optional_dependencies="fasttext voikko fasttext nn omikuji yake spacy stwfsa" +ARG optional_dependencies="voikko fasttext nn omikuji yake spacy stwfsa" ARG POETRY_VIRTUALENVS_CREATE=false # Install system dependencies needed at runtime: From 76e59d513203839409d4fbcb5d03c92159a8b941 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Thu, 11 May 2023 11:27:33 +0300 Subject: [PATCH 13/83] Delete .codecov.yml This is nowdays unnecessary --- .codecov.yml | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .codecov.yml diff --git a/.codecov.yml b/.codecov.yml deleted file mode 100644 index 9b7c16fc6..000000000 --- a/.codecov.yml +++ /dev/null @@ -1,2 +0,0 @@ -ignore: - - "setup.py" From e6e06b8c9f2bf497eb9c1dbf2f5bda03031d022b Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Thu, 11 May 2023 13:23:27 +0300 Subject: [PATCH 14/83] Add tests for routes cmd when using Flask and Connexion --- tests/test_cli.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_cli.py b/tests/test_cli.py index 944764439..387f10e2f 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1016,6 +1016,20 @@ def test_run(): assert "Run a local development server." in result.output +def test_routes_with_flask_app(): + # When using plain Flask only the static endpoint exists + result = runner.invoke(annif.cli.cli, ["routes"]) + assert re.search(r"static\s+GET\s+\/static\/\", result.output) + assert not re.search(r"app.home\s+GET\s+\/", result.output) + + +def test_routes_with_connexion_app(): + # When using Connexion all endpoints exist + result = os.popen("python annif/cli.py routes").read() + assert re.search(r"static\s+GET\s+\/static\/", result) + assert re.search(r"app.home\s+GET\s+\/", result) + + def test_completion_script_generation(): result = runner.invoke(annif.cli.cli, ["completion", "--bash"]) assert not result.exception From 1366fefaa50b44adfcdd582ed82317ea3b040a9f Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 12 May 2023 13:49:48 +0300 Subject: [PATCH 15/83] Use 4 decimal places in scores of eval output --- annif/cli.py | 8 ++++++-- tests/test_cli.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index 10d632c2c..d24527050 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -427,12 +427,16 @@ def run_eval( ): eval_batch.evaluate_many(hit_sets[project_id], subject_sets) - template = "{0:<30}\t{1}" + template = "{0:<30}\t{1:{fmt_spec}}" metrics = eval_batch.results( metrics=metric, results_file=results_file, language=project.vocab_lang ) for metric, score in metrics.items(): - click.echo(template.format(metric + ":", score)) + if isinstance(score, int): + fmt_spec = "d" + elif isinstance(score, float): + fmt_spec = ".04f" + click.echo(template.format(metric + ":", score, fmt_spec=fmt_spec)) if metrics_file: json.dump( {metric_code(mname): val for mname, val in metrics.items()}, diff --git a/tests/test_cli.py b/tests/test_cli.py index 0ef12b63d..19786ec83 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -827,7 +827,7 @@ def test_eval_resultsfile(tmpdir): denominator += 1 assert precision_numerator / denominator == precision assert recall_numerator / denominator == recall - assert f_measure_numerator / denominator == f_measure + assert round(f_measure_numerator / denominator, 4) == f_measure def test_eval_badresultsfile(tmpdir): From ada1419b6cba5b28c773bbbb7e36972d1faf2e45 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 12 May 2023 14:01:53 +0300 Subject: [PATCH 16/83] Use 4 decimal places in scores of suggest & index output --- annif/cli_util.py | 3 ++- tests/test_cli.py | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/annif/cli_util.py b/annif/cli_util.py index b6e0b4e0d..72da0d46c 100644 --- a/annif/cli_util.py +++ b/annif/cli_util.py @@ -166,9 +166,10 @@ def show_hits(hits, project, lang, file=None): a table, with one row per hit. Each row contains the URI, label, possible notation, and score of the suggestion. The label is given in the specified language. """ + template = "<{}>\t{}\t{:.04f}" for hit in hits: subj = project.subjects[hit.subject_id] - line = "<{}>\t{}\t{}".format( + line = template.format( subj.uri, "\t".join(filter(None, (subj.labels[lang], subj.notation))), hit.score, diff --git a/tests/test_cli.py b/tests/test_cli.py index 19786ec83..496cd8b13 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -398,7 +398,7 @@ def test_learn_nonexistent_path(): def test_suggest(): result = runner.invoke(annif.cli.cli, ["suggest", "dummy-fi"], input="kissa") assert not result.exception - assert result.output == "\tdummy-fi\t1.0\n" + assert result.output == "\tdummy-fi\t1.0000\n" assert result.exit_code == 0 @@ -407,7 +407,7 @@ def test_suggest_with_language_override(): annif.cli.cli, ["suggest", "--language", "en", "dummy-fi"], input="kissa" ) assert not result.exception - assert result.output == "\tdummy\t1.0\n" + assert result.output == "\tdummy\t1.0000\n" assert result.exit_code == 0 @@ -427,7 +427,7 @@ def test_suggest_with_different_vocab_language(): annif.cli.cli, ["suggest", "dummy-vocablang"], input="the cat sat on the mat" ) assert not result.exception - assert result.output == "\tdummy-fi\t1.0\n" + assert result.output == "\tdummy-fi\t1.0000\n" assert result.exit_code == 0 @@ -438,7 +438,7 @@ def test_suggest_with_notations(): input="kissa", ) assert not result.exception - assert result.output == "\tnone-fi\t42.42\t1.0\n" + assert result.output == "\tnone-fi\t42.42\t1.0000\n" assert result.exit_code == 0 @@ -481,7 +481,7 @@ def test_suggest_ensemble(): annif.cli.cli, ["suggest", "ensemble"], input="the cat sat on the mat" ) assert not result.exception - assert result.output == "\tdummy\t1.0\n" + assert result.output == "\tdummy\t1.0000\n" assert result.exit_code == 0 @@ -493,7 +493,7 @@ def test_suggest_file(tmpdir): assert not result.exception assert f"Suggestions for {docfile}" in result.output - assert "\tdummy-fi\t1.0\n" in result.output + assert "\tdummy-fi\t1.0000\n" in result.output assert result.exit_code == 0 @@ -510,7 +510,7 @@ def test_suggest_two_files(tmpdir): assert not result.exception assert f"Suggestions for {docfile1}" in result.output assert f"Suggestions for {docfile2}" in result.output - assert result.output.count("\tdummy-fi\t1.0\n") == 2 + assert result.output.count("\tdummy-fi\t1.0000\n") == 2 assert result.exit_code == 0 @@ -528,7 +528,7 @@ def test_suggest_two_files_docs_limit(tmpdir): assert not result.exception assert f"Suggestions for {docfile1}" in result.output assert f"Suggestions for {docfile2}" not in result.output - assert result.output.count("\tdummy-fi\t1.0\n") == 1 + assert result.output.count("\tdummy-fi\t1.0000\n") == 1 assert result.exit_code == 0 @@ -543,7 +543,7 @@ def test_suggest_file_and_stdin(tmpdir): assert not result.exception assert f"Suggestions for {docfile1}" in result.output assert "Suggestions for -" in result.output - assert result.output.count("\tdummy-fi\t1.0\n") == 2 + assert result.output.count("\tdummy-fi\t1.0000\n") == 2 assert result.exit_code == 0 @@ -564,7 +564,7 @@ def test_suggest_dash_path(): annif.cli.cli, ["suggest", "dummy-fi", "-"], input="the cat sat on the mat" ) assert not result.exception - assert result.output == "\tdummy-fi\t1.0\n" + assert result.output == "\tdummy-fi\t1.0000\n" assert result.exit_code == 0 @@ -578,7 +578,7 @@ def test_index(tmpdir): assert tmpdir.join("doc1.annif").exists() assert ( tmpdir.join("doc1.annif").read_text("utf-8") - == "\tdummy\t1.0\n" + == "\tdummy\t1.0000\n" ) # make sure that preexisting subject files are not overwritten @@ -593,7 +593,7 @@ def test_index(tmpdir): assert "Not overwriting" not in result.output assert ( tmpdir.join("doc1.annif").read_text("utf-8") - == "\tdummy-fi\t1.0\n" + == "\tdummy-fi\t1.0000\n" ) @@ -609,7 +609,7 @@ def test_index_with_language_override(tmpdir): assert tmpdir.join("doc1.annif").exists() assert ( tmpdir.join("doc1.annif").read_text("utf-8") - == "\tdummy-fi\t1.0\n" + == "\tdummy-fi\t1.0000\n" ) From 626ee926fcdef8e01504d1f790eebd5aad95f228 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 15 May 2023 10:28:40 +0300 Subject: [PATCH 17/83] Make test_index check that subject files in target dir do not crash --- tests/test_cli.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_cli.py b/tests/test_cli.py index 0ef12b63d..2b8dad961 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -570,6 +570,9 @@ def test_suggest_dash_path(): def test_index(tmpdir): tmpdir.join("doc1.txt").write("nothing special") + # Existing subject files should not have an effect + tmpdir.join("doc1.tsv").write("\tdummy") + tmpdir.join("doc1.key").write("\tdummy") result = runner.invoke(annif.cli.cli, ["index", "dummy-en", str(tmpdir)]) assert not result.exception From b56b31e0c49e5b9270f5067af135a44536a2aa5e Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 15 May 2023 10:59:20 +0300 Subject: [PATCH 18/83] Rename vars to better correspond their origin/usage: keyfile(name) -> subjfile(name) --- annif/corpus/document.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/annif/corpus/document.py b/annif/corpus/document.py index c26c75122..258285a26 100644 --- a/annif/corpus/document.py +++ b/annif/corpus/document.py @@ -42,15 +42,15 @@ def __iter__(self): @property def documents(self): - for docfilename, keyfilename in self: + for docfilename, subjfilename in self: with open(docfilename, errors="replace", encoding="utf-8-sig") as docfile: text = docfile.read() - if keyfilename is None: + if subjfilename is None: yield Document(text=text, subject_set=None) continue - with open(keyfilename, encoding="utf-8-sig") as keyfile: + with open(subjfilename, encoding="utf-8-sig") as subjfile: subjects = SubjectSet.from_string( - keyfile.read(), self.subject_index, self.language + subjfile.read(), self.subject_index, self.language ) yield Document(text=text, subject_set=subjects) From 971202817d5187b5ecacf3b5e984a07d374af662 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 15 May 2023 11:15:34 +0300 Subject: [PATCH 19/83] Fix crashing index cmd when subject files are present in target dir --- annif/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/cli.py b/annif/cli.py index 10d632c2c..7ab26998c 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -319,7 +319,7 @@ def run_index( backend_params = cli_util.parse_backend_params(backend_param, project) documents = annif.corpus.DocumentDirectory( - directory, None, None, require_subjects=False + directory, project.subjects, lang, require_subjects=False ) results = project.suggest_corpus(documents, backend_params).filter(limit, threshold) From b602ea246c8de003faf40599cf5849b328ef114e Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 16 May 2023 12:35:59 +0300 Subject: [PATCH 20/83] Parse tsv/key files in DocumentCorpus only if required --- annif/cli.py | 4 +--- annif/corpus/document.py | 25 +++++++++++++------------ tests/conftest.py | 4 +++- tests/test_backend.py | 4 +++- tests/test_corpus.py | 20 +++++++++++--------- tests/test_project.py | 4 +++- 6 files changed, 34 insertions(+), 27 deletions(-) diff --git a/annif/cli.py b/annif/cli.py index 7ab26998c..66b723794 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -318,9 +318,7 @@ def run_index( raise click.BadParameter(f'language "{lang}" not supported by vocabulary') backend_params = cli_util.parse_backend_params(backend_param, project) - documents = annif.corpus.DocumentDirectory( - directory, project.subjects, lang, require_subjects=False - ) + documents = annif.corpus.DocumentDirectory(directory, require_subjects=False) results = project.suggest_corpus(documents, backend_params).filter(limit, threshold) for (docfilename, _), suggestions in zip(documents, results): diff --git a/annif/corpus/document.py b/annif/corpus/document.py index 258285a26..54a0a3ba6 100644 --- a/annif/corpus/document.py +++ b/annif/corpus/document.py @@ -17,7 +17,7 @@ class DocumentDirectory(DocumentCorpus): """A directory of files as a full text document corpus""" - def __init__(self, path, subject_index, language, require_subjects=False): + def __init__(self, path, subject_index=None, language=None, require_subjects=False): self.path = path self.subject_index = subject_index self.language = language @@ -25,19 +25,20 @@ def __init__(self, path, subject_index, language, require_subjects=False): def __iter__(self): """Iterate through the directory, yielding tuples of (docfile, - subjectfile) containing file paths. If there is no key file and - require_subjects is False, the subjectfile will be returned as None.""" + subjectfile) containing file paths. If require_subjects is False, the + subjectfile will be returned as None.""" for filename in sorted(glob.glob(os.path.join(self.path, "*.txt"))): - tsvfilename = re.sub(r"\.txt$", ".tsv", filename) - if os.path.exists(tsvfilename): - yield (filename, tsvfilename) - continue - keyfilename = re.sub(r"\.txt$", ".key", filename) - if os.path.exists(keyfilename): - yield (filename, keyfilename) - continue - if not self.require_subjects: + if self.require_subjects: + tsvfilename = re.sub(r"\.txt$", ".tsv", filename) + if os.path.exists(tsvfilename): + yield (filename, tsvfilename) + continue + keyfilename = re.sub(r"\.txt$", ".key", filename) + if os.path.exists(keyfilename): + yield (filename, keyfilename) + continue + else: yield (filename, None) @property diff --git a/tests/conftest.py b/tests/conftest.py index fcccb268f..76378a98d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -106,7 +106,9 @@ def fulltext_corpus(subject_index): ftdir = os.path.join( os.path.dirname(__file__), "corpora", "archaeology", "fulltext" ) - ft_corpus = annif.corpus.DocumentDirectory(ftdir, subject_index, "fi") + ft_corpus = annif.corpus.DocumentDirectory( + ftdir, subject_index, "fi", require_subjects=True + ) return ft_corpus diff --git a/tests/test_backend.py b/tests/test_backend.py index 378cb78b8..b7e583d1c 100644 --- a/tests/test_backend.py +++ b/tests/test_backend.py @@ -32,7 +32,9 @@ def test_learn_dummy(project, tmpdir): tmpdir.join("doc1.tsv").write("\tarchaeologists") tmpdir.join("doc2.txt").write("doc2") tmpdir.join("doc2.tsv").write("\tdummy") - docdir = annif.corpus.DocumentDirectory(str(tmpdir), project.subjects, "en") + docdir = annif.corpus.DocumentDirectory( + str(tmpdir), project.subjects, "en", require_subjects=True + ) dummy.learn(docdir) diff --git a/tests/test_corpus.py b/tests/test_corpus.py index 6580477bb..6e7db5158 100644 --- a/tests/test_corpus.py +++ b/tests/test_corpus.py @@ -80,38 +80,38 @@ def test_subjectset_as_vector_destination(subject_index): assert vector is destination -def test_docdir_key(tmpdir, subject_index): +def test_docdir_key(tmpdir): tmpdir.join("doc1.txt").write("doc1") tmpdir.join("doc1.key").write("key1") tmpdir.join("doc2.txt").write("doc2") tmpdir.join("doc2.key").write("key2") tmpdir.join("doc3.txt").write("doc3") - docdir = annif.corpus.DocumentDirectory(str(tmpdir), subject_index, "en") + docdir = annif.corpus.DocumentDirectory(str(tmpdir), require_subjects=False) files = sorted(list(docdir)) assert len(files) == 3 assert files[0][0] == str(tmpdir.join("doc1.txt")) - assert files[0][1] == str(tmpdir.join("doc1.key")) + assert files[0][1] is None assert files[1][0] == str(tmpdir.join("doc2.txt")) - assert files[1][1] == str(tmpdir.join("doc2.key")) + assert files[1][1] is None assert files[2][0] == str(tmpdir.join("doc3.txt")) assert files[2][1] is None -def test_docdir_tsv(tmpdir, subject_index): +def test_docdir_tsv(tmpdir): tmpdir.join("doc1.txt").write("doc1") tmpdir.join("doc1.tsv").write("\tkey1") tmpdir.join("doc2.txt").write("doc2") tmpdir.join("doc2.tsv").write("\tkey2") tmpdir.join("doc3.txt").write("doc3") - docdir = annif.corpus.DocumentDirectory(str(tmpdir), subject_index, "en") + docdir = annif.corpus.DocumentDirectory(str(tmpdir), require_subjects=False) files = sorted(list(docdir)) assert len(files) == 3 assert files[0][0] == str(tmpdir.join("doc1.txt")) - assert files[0][1] == str(tmpdir.join("doc1.tsv")) + assert files[0][1] is None assert files[1][0] == str(tmpdir.join("doc2.txt")) - assert files[1][1] == str(tmpdir.join("doc2.tsv")) + assert files[1][1] is None assert files[2][0] == str(tmpdir.join("doc3.txt")) assert files[2][1] is None @@ -126,7 +126,9 @@ def test_docdir_tsv_bom(tmpdir, subject_index): "\trautakausi".encode("utf-8-sig") ) - docdir = annif.corpus.DocumentDirectory(str(tmpdir), subject_index, "fi") + docdir = annif.corpus.DocumentDirectory( + str(tmpdir), subject_index, "fi", require_subjects=True + ) docs = list(docdir.documents) assert docs[0].text == "doc1" assert ( diff --git a/tests/test_project.py b/tests/test_project.py index 6600d664c..a6294edb6 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -183,7 +183,9 @@ def test_project_learn(registry, tmpdir): tmpdir.join("doc2.tsv").write("\tdummy") project = registry.get_project("dummy-fi") - docdir = annif.corpus.DocumentDirectory(str(tmpdir), project.subjects, "en") + docdir = annif.corpus.DocumentDirectory( + str(tmpdir), project.subjects, "en", require_subjects=True + ) project.learn(docdir) result = project.suggest(["this is some text"])[0] assert len(result) == 1 From 4f6994b6bf9417b33fbfa0c5d01df9aa4fadd429 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 22 May 2023 13:31:40 +0300 Subject: [PATCH 21/83] Add script and CICD job for testing CLI startup time (#706) --- .github/workflows/cicd.yml | 18 ++++++++++++++++++ tests/time-startup.sh | 27 +++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100755 tests/time-startup.sh diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index f415ebc91..fd28a25c0 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -36,6 +36,24 @@ jobs: run: | poetry run flake8 + time-startup: + runs-on: ubuntu-22.04 + name: check CLI startup time + steps: + - uses: actions/checkout@v3 + - name: "Prepare: restore caches, install Poetry, set up Python" + id: prepare + uses: ./.github/actions/prepare + with: + python-version: "3.9" + poetry-version: ${{ env.POETRY_VERSION }} + - name: Install Python dependencies + run: | + poetry install + - name: Check startup time + run: | + poetry run tests/time-startup.sh + test: runs-on: ubuntu-22.04 timeout-minutes: 15 diff --git a/tests/time-startup.sh b/tests/time-startup.sh new file mode 100755 index 000000000..dabb56134 --- /dev/null +++ b/tests/time-startup.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Function to measure startup time +measure_startup_time() { + startup_time=$( { time -p annif --help >/dev/null; } 2>&1 | awk '/^user/{u=$2}/^sys/{s=$2} END{print u+s}' ) + echo "$startup_time" +} + +startup_time1=$(measure_startup_time) +startup_time2=$(measure_startup_time) +startup_time3=$(measure_startup_time) +startup_time4=$(measure_startup_time) + +# Calculate the average startup time +average_startup_time=$(echo "scale=3; ($startup_time1 + $startup_time2 + $startup_time3 + $startup_time4) / 4" | bc) + +# Print the average startup time +echo "Average Startup time: $average_startup_time seconds" + +# Set the threshold for acceptable startup time in seconds +threshold=0.300 + +# Compare the average startup time with the threshold +if (( $(echo "$average_startup_time > $threshold" | bc -l) )); then + echo "Startup time (user + sys time) exceeds the threshold of $threshold s. Test failed." + exit 1 +fi From fb512928b455f785f4799e0c6b96db5ecbfb12b3 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 24 May 2023 13:32:48 +0300 Subject: [PATCH 22/83] Run apt-get upgrade to ensure image being up-to-date (#707) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d1cea10e7..2372cc044 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ ARG optional_dependencies="voikko fasttext nn omikuji yake spacy stwfsa" ARG POETRY_VIRTUALENVS_CREATE=false # Install system dependencies needed at runtime: -RUN apt-get update && \ +RUN apt-get update && apt-get upgrade && \ if [[ $optional_dependencies =~ "voikko" ]]; then \ apt-get install -y --no-install-recommends \ libvoikko1 \ From 579701ce7c2b0f360ddabf64874d50f940a45ffb Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 5 Jun 2023 11:47:29 +0300 Subject: [PATCH 23/83] Issue690 use python type hints (#708) * Run "monkeytype apply --pep_563" on all modules * Use dict instead of OrderedDict * Make imports that were too eagerly made conditional default again * Fix flake8 errors * Turn forward references non-strings * Use less specific types Types used in tests do not cover all cases. This also avoids many Union[] sets. * Remove "Union[Any," in hints These would allow all types * Move comment back to its original place * Fix some omissions and errors by monkeytype * Simplify hints using float for Union[int, float] * Simplify hints using Sequence for Union[Tuple, List] * Remove too wide usage of Any (e.g. in Unions, Lists, Iterators) * Unify type of params to Dict[str, Any] or DefaultDict[str, Dict * Simplify overly complex types * Fix erronously passing whole Error obj to ClickException instead of just msg * Annotate (manually) annif/backend/hyperopt.py * Manually annotate annif/backend/mixins.py * Manually annotate annif/corpus/document.py * Upgrade to PEP 585 and PEP 604 typing features/syntax - Use standard collection types instead of types from Typing (PEP 585) - Write union types as X | Y (PEP 604) - Write optional values as X | None (PEP 604) * Manually annotate annif/corpus/parallel.py * Manually annotate annif/util.py * Fix easily fixable errors noted by Mypy * Exclude TYPE_CHECKING blocks from test coverage * Narrow down TokenSet tokens type to np.ndarray only * Use int instead of int | np.int32 * Move imports for type typechecking only to TYPE_CHECKING blocks * Restore accidentally removed annif.suggestion import * Make type optional as it should be --- annif/__init__.py | 12 +++- annif/analyzer/__init__.py | 7 ++- annif/analyzer/analyzer.py | 9 +-- annif/analyzer/simple.py | 5 +- annif/analyzer/simplemma.py | 5 +- annif/analyzer/snowball.py | 5 +- annif/analyzer/spacy.py | 5 +- annif/analyzer/voikko.py | 7 ++- annif/backend/__init__.py | 32 +++++----- annif/backend/backend.py | 66 ++++++++++++++++----- annif/backend/dummy.py | 17 ++++-- annif/backend/ensemble.py | 54 ++++++++++++----- annif/backend/fasttext.py | 44 ++++++++++---- annif/backend/http.py | 16 +++-- annif/backend/hyperopt.py | 31 +++++++--- annif/backend/mixins.py | 24 ++++++-- annif/backend/mllm.py | 45 ++++++++++---- annif/backend/nn_ensemble.py | 61 ++++++++++++++----- annif/backend/omikuji.py | 28 ++++++--- annif/backend/pav.py | 34 ++++++++--- annif/backend/stwfsa.py | 19 ++++-- annif/backend/svc.py | 31 +++++++--- annif/backend/tfidf.py | 36 ++++++++---- annif/backend/yake.py | 45 ++++++++------ annif/cli_util.py | 57 +++++++++++++----- annif/config.py | 28 ++++----- annif/corpus/combine.py | 9 ++- annif/corpus/document.py | 25 ++++++-- annif/corpus/skos.py | 31 ++++++---- annif/corpus/subject.py | 77 ++++++++++++++---------- annif/corpus/types.py | 5 +- annif/datadir.py | 5 +- annif/eval.py | 64 ++++++++++++++++---- annif/exception.py | 11 +++- annif/lexical/mllm.py | 89 +++++++++++++++++++++------- annif/lexical/tokenset.py | 26 ++++++--- annif/lexical/util.py | 17 +++++- annif/openapi/validation.py | 9 ++- annif/parallel.py | 44 ++++++++++---- annif/project.py | 100 ++++++++++++++++++++++++-------- annif/registry.py | 36 +++++++----- annif/rest.py | 62 +++++++++++++++----- annif/suggestion.py | 50 +++++++++++----- annif/transform/__init__.py | 12 +++- annif/transform/inputlimiter.py | 12 +++- annif/transform/langfilter.py | 18 ++++-- annif/transform/transform.py | 27 +++++++-- annif/util.py | 20 ++++--- annif/vocab.py | 31 +++++++--- setup.cfg | 8 ++- 50 files changed, 1091 insertions(+), 420 deletions(-) diff --git a/annif/__init__.py b/annif/__init__.py index f4a5831f5..bb196b4ee 100644 --- a/annif/__init__.py +++ b/annif/__init__.py @@ -1,8 +1,11 @@ #!/usr/bin/env python3 +from __future__ import annotations + import logging import os import os.path +from typing import TYPE_CHECKING logging.basicConfig() logger = logging.getLogger("annif") @@ -10,8 +13,11 @@ import annif.backend # noqa +if TYPE_CHECKING: + from flask.app import Flask + -def create_flask_app(config_name=None): +def create_flask_app(config_name: str | None = None) -> Flask: """Create a Flask app to be used by the CLI.""" from flask import Flask @@ -23,7 +29,7 @@ def create_flask_app(config_name=None): return app -def create_app(config_name=None): +def create_app(config_name: str | None = None) -> Flask: """Create a Connexion app to be used for the API.""" # 'cxapp' here is the Connexion application that has a normal Flask app # as a property (cxapp.app) @@ -60,7 +66,7 @@ def create_app(config_name=None): return cxapp.app -def _get_config_name(config_name): +def _get_config_name(config_name: str | None) -> str: if config_name is None: config_name = os.environ.get("ANNIF_CONFIG") if config_name is None: diff --git a/annif/analyzer/__init__.py b/annif/analyzer/__init__.py index eacf3d001..a0f93ced3 100644 --- a/annif/analyzer/__init__.py +++ b/annif/analyzer/__init__.py @@ -1,12 +1,17 @@ """Collection of language-specific analyzers and analyzer registry for Annif""" +from __future__ import annotations import re +from typing import TYPE_CHECKING import annif from annif.util import parse_args from . import simple, simplemma, snowball +if TYPE_CHECKING: + from annif.analyzer.analyzer import Analyzer + _analyzers = {} @@ -14,7 +19,7 @@ def register_analyzer(analyzer): _analyzers[analyzer.name] = analyzer -def get_analyzer(analyzerspec): +def get_analyzer(analyzerspec: str) -> Analyzer: match = re.match(r"(\w+)(\((.*)\))?", analyzerspec) if match is None: raise ValueError("Invalid analyzer specification {}".format(analyzerspec)) diff --git a/annif/analyzer/analyzer.py b/annif/analyzer/analyzer.py index 37457069d..5ba876f9d 100644 --- a/annif/analyzer/analyzer.py +++ b/annif/analyzer/analyzer.py @@ -1,4 +1,5 @@ """Common functionality for analyzers.""" +from __future__ import annotations import abc import functools @@ -15,18 +16,18 @@ class Analyzer(metaclass=abc.ABCMeta): name = None token_min_length = 3 # default value, can be overridden in instances - def __init__(self, **kwargs): + def __init__(self, **kwargs) -> None: if _KEY_TOKEN_MIN_LENGTH in kwargs: self.token_min_length = int(kwargs[_KEY_TOKEN_MIN_LENGTH]) - def tokenize_sentences(self, text): + def tokenize_sentences(self, text: str) -> list[str]: """Tokenize a piece of text (e.g. a document) into sentences.""" import nltk.tokenize return nltk.tokenize.sent_tokenize(text) @functools.lru_cache(maxsize=50000) - def is_valid_token(self, word): + def is_valid_token(self, word: str) -> bool: """Return True if the word is an acceptable token.""" if len(word) < self.token_min_length: return False @@ -36,7 +37,7 @@ def is_valid_token(self, word): return True return False - def tokenize_words(self, text, filter=True): + def tokenize_words(self, text: str, filter: bool = True) -> list[str]: """Tokenize a piece of text (e.g. a sentence) into words. If filter=True (default), only return valid tokens (e.g. not punctuation, numbers or very short words)""" diff --git a/annif/analyzer/simple.py b/annif/analyzer/simple.py index 46a8f92f3..4cc35e6f1 100644 --- a/annif/analyzer/simple.py +++ b/annif/analyzer/simple.py @@ -1,4 +1,5 @@ """Simple analyzer for Annif. Only folds words to lower case.""" +from __future__ import annotations from . import analyzer @@ -6,9 +7,9 @@ class SimpleAnalyzer(analyzer.Analyzer): name = "simple" - def __init__(self, param, **kwargs): + def __init__(self, param: None, **kwargs) -> None: self.param = param super().__init__(**kwargs) - def _normalize_word(self, word): + def _normalize_word(self, word: str) -> str: return word.lower() diff --git a/annif/analyzer/simplemma.py b/annif/analyzer/simplemma.py index 02976982b..e535b25de 100644 --- a/annif/analyzer/simplemma.py +++ b/annif/analyzer/simplemma.py @@ -1,4 +1,5 @@ """Simplemma analyzer for Annif, based on simplemma lemmatizer.""" +from __future__ import annotations import simplemma @@ -8,9 +9,9 @@ class SimplemmaAnalyzer(analyzer.Analyzer): name = "simplemma" - def __init__(self, param, **kwargs): + def __init__(self, param: str, **kwargs) -> None: self.lang = param super().__init__(**kwargs) - def _normalize_word(self, word): + def _normalize_word(self, word: str) -> str: return simplemma.lemmatize(word, lang=self.lang) diff --git a/annif/analyzer/snowball.py b/annif/analyzer/snowball.py index c13c4e904..57990c2a1 100644 --- a/annif/analyzer/snowball.py +++ b/annif/analyzer/snowball.py @@ -1,4 +1,5 @@ """Snowball analyzer for Annif, based on nltk Snowball stemmer.""" +from __future__ import annotations import functools @@ -8,7 +9,7 @@ class SnowballAnalyzer(analyzer.Analyzer): name = "snowball" - def __init__(self, param, **kwargs): + def __init__(self, param: str, **kwargs) -> None: self.param = param import nltk.stem.snowball @@ -16,5 +17,5 @@ def __init__(self, param, **kwargs): super().__init__(**kwargs) @functools.lru_cache(maxsize=500000) - def _normalize_word(self, word): + def _normalize_word(self, word: str) -> str: return self.stemmer.stem(word.lower()) diff --git a/annif/analyzer/spacy.py b/annif/analyzer/spacy.py index 212a3a5f6..b5e9cbc55 100644 --- a/annif/analyzer/spacy.py +++ b/annif/analyzer/spacy.py @@ -1,4 +1,5 @@ """spaCy analyzer for Annif which uses spaCy for lemmatization""" +from __future__ import annotations import annif.util from annif.exception import OperationFailedException @@ -11,7 +12,7 @@ class SpacyAnalyzer(analyzer.Analyzer): name = "spacy" - def __init__(self, param, **kwargs): + def __init__(self, param: str, **kwargs) -> None: import spacy self.param = param @@ -28,7 +29,7 @@ def __init__(self, param, **kwargs): self.lowercase = False super().__init__(**kwargs) - def tokenize_words(self, text, filter=True): + def tokenize_words(self, text: str, filter: bool = True) -> list[str]: lemmas = [ lemma for lemma in (token.lemma_ for token in self.nlp(text.strip())) diff --git a/annif/analyzer/voikko.py b/annif/analyzer/voikko.py index d111da25e..e6e693d65 100644 --- a/annif/analyzer/voikko.py +++ b/annif/analyzer/voikko.py @@ -1,4 +1,5 @@ """Voikko analyzer for Annif, based on libvoikko library.""" +from __future__ import annotations import functools @@ -10,12 +11,12 @@ class VoikkoAnalyzer(analyzer.Analyzer): name = "voikko" - def __init__(self, param, **kwargs): + def __init__(self, param: str, **kwargs) -> None: self.param = param self.voikko = None super().__init__(**kwargs) - def __getstate__(self): + def __getstate__(self) -> dict[str, str | None]: """Return the state of the object for pickling purposes. The Voikko instance is set to None because as a ctypes object it cannot be pickled.""" @@ -23,7 +24,7 @@ def __getstate__(self): return {"param": self.param, "voikko": None} @functools.lru_cache(maxsize=500000) - def _normalize_word(self, word): + def _normalize_word(self, word: str) -> str: if self.voikko is None: self.voikko = voikko.libvoikko.Voikko(self.param) result = self.voikko.analyze(word) diff --git a/annif/backend/__init__.py b/annif/backend/__init__.py index 80ede0720..cbeeb648e 100644 --- a/annif/backend/__init__.py +++ b/annif/backend/__init__.py @@ -1,20 +1,26 @@ """Registry of backend types for Annif""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Type + +if TYPE_CHECKING: + from annif.backend.backend import AnnifBackend # define functions for lazily importing each backend (alphabetical order) -def _dummy(): +def _dummy() -> Type[AnnifBackend]: from . import dummy return dummy.DummyBackend -def _ensemble(): +def _ensemble() -> Type[AnnifBackend]: from . import ensemble return ensemble.EnsembleBackend -def _fasttext(): +def _fasttext() -> Type[AnnifBackend]: try: from . import fasttext @@ -23,19 +29,19 @@ def _fasttext(): raise ValueError("fastText not available, cannot use fasttext backend") -def _http(): +def _http() -> Type[AnnifBackend]: from . import http return http.HTTPBackend -def _mllm(): +def _mllm() -> Type[AnnifBackend]: from . import mllm return mllm.MLLMBackend -def _nn_ensemble(): +def _nn_ensemble() -> Type[AnnifBackend]: try: from . import nn_ensemble @@ -46,7 +52,7 @@ def _nn_ensemble(): ) -def _omikuji(): +def _omikuji() -> Type[AnnifBackend]: try: from . import omikuji @@ -55,13 +61,13 @@ def _omikuji(): raise ValueError("Omikuji not available, cannot use omikuji backend") -def _pav(): +def _pav() -> Type[AnnifBackend]: from . import pav return pav.PAVBackend -def _stwfsa(): +def _stwfsa() -> Type[AnnifBackend]: try: from . import stwfsa @@ -70,19 +76,19 @@ def _stwfsa(): raise ValueError("STWFSA not available, cannot use stwfsa backend") -def _svc(): +def _svc() -> Type[AnnifBackend]: from . import svc return svc.SVCBackend -def _tfidf(): +def _tfidf() -> Type[AnnifBackend]: from . import tfidf return tfidf.TFIDFBackend -def _yake(): +def _yake() -> Type[AnnifBackend]: try: from . import yake @@ -108,7 +114,7 @@ def _yake(): } -def get_backend(backend_id): +def get_backend(backend_id: str) -> Type[AnnifBackend]: if backend_id in _backend_fns: return _backend_fns[backend_id]() else: diff --git a/annif/backend/backend.py b/annif/backend/backend.py index 754d66111..f35b0a312 100644 --- a/annif/backend/backend.py +++ b/annif/backend/backend.py @@ -1,13 +1,21 @@ """Common functionality for backends.""" +from __future__ import annotations import abc import os.path from datetime import datetime, timezone from glob import glob +from typing import TYPE_CHECKING, Any from annif import logger from annif.suggestion import SuggestionBatch +if TYPE_CHECKING: + from configparser import SectionProxy + + from annif.corpus.document import DocumentCorpus + from annif.project import AnnifProject + class AnnifBackend(metaclass=abc.ABCMeta): """Base class for Annif backends that perform analysis. The @@ -17,7 +25,12 @@ class AnnifBackend(metaclass=abc.ABCMeta): DEFAULT_PARAMETERS = {"limit": 100} - def __init__(self, backend_id, config_params, project): + def __init__( + self, + backend_id: str, + config_params: dict[str, Any] | SectionProxy, + project: AnnifProject, + ) -> None: """Initialize backend with specific parameters. The parameters are a dict. Keys and values depend on the specific backend type.""" @@ -26,22 +39,22 @@ def __init__(self, backend_id, config_params, project): self.project = project self.datadir = project.datadir - def default_params(self): + def default_params(self) -> dict[str, Any]: return self.DEFAULT_PARAMETERS @property - def params(self): + def params(self) -> dict[str, Any]: params = {} params.update(self.default_params()) params.update(self.config_params) return params @property - def is_trained(self): + def is_trained(self) -> bool: return bool(glob(os.path.join(self.datadir, "*"))) @property - def modification_time(self): + def modification_time(self) -> datetime | None: mtimes = [ datetime.utcfromtimestamp(os.path.getmtime(p)) for p in glob(os.path.join(self.datadir, "*")) @@ -51,23 +64,36 @@ def modification_time(self): return None return most_recent.replace(tzinfo=timezone.utc) - def _get_backend_params(self, params): + def _get_backend_params( + self, + params: dict[str, Any] | None, + ) -> dict[str, Any]: backend_params = dict(self.params) if params is not None: backend_params.update(params) return backend_params - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: """This method can be overridden by backends. It implements the train functionality, with pre-processed parameters.""" pass # default is to do nothing, subclasses may override - def train(self, corpus, params=None, jobs=0): + def train( + self, + corpus: DocumentCorpus, + params: dict[str, Any] | None = None, + jobs: int = 0, + ) -> None: """Train the model on the given document or subject corpus.""" beparams = self._get_backend_params(params) return self._train(corpus, params=beparams, jobs=jobs) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: """This method can be overridden by backends. It should cause the backend to pre-load all data it needs during operation. If parallel is True, the backend should expect to be used for @@ -80,7 +106,9 @@ def _suggest(self, text, params): document, with pre-processed parameters.""" pass # pragma: no cover - def _suggest_batch(self, texts, params): + def _suggest_batch( + self, texts: list[str], params: dict[str, Any] + ) -> SuggestionBatch: """This method can be implemented by backends to use batching of documents in their operations. This default implementation uses the regular suggest functionality.""" @@ -90,22 +118,26 @@ def _suggest_batch(self, texts, params): limit=int(params.get("limit")), ) - def suggest(self, texts, params=None): + def suggest( + self, + texts: list[str], + params: dict[str, Any] | None = None, + ) -> SuggestionBatch: """Suggest subjects for the input documents and return a list of subject sets represented as a list of SubjectSuggestion objects.""" beparams = self._get_backend_params(params) self.initialize() return self._suggest_batch(texts, params=beparams) - def debug(self, message): + def debug(self, message: str) -> None: """Log a debug message from this backend""" logger.debug("Backend {}: {}".format(self.backend_id, message)) - def info(self, message): + def info(self, message: str) -> None: """Log an info message from this backend""" logger.info("Backend {}: {}".format(self.backend_id, message)) - def warning(self, message): + def warning(self, message: str) -> None: """Log a warning message from this backend""" logger.warning("Backend {}: {}".format(self.backend_id, message)) @@ -119,7 +151,11 @@ def _learn(self, corpus, params): functionality, with pre-processed parameters.""" pass # pragma: no cover - def learn(self, corpus, params=None): + def learn( + self, + corpus: DocumentCorpus, + params: dict[str, Any] | None = None, + ) -> None: """Further train the model on the given document or subject corpus.""" beparams = self._get_backend_params(params) return self._learn(corpus, params=beparams) diff --git a/annif/backend/dummy.py b/annif/backend/dummy.py index 9d60b0798..5f62517a5 100644 --- a/annif/backend/dummy.py +++ b/annif/backend/dummy.py @@ -1,10 +1,15 @@ """Dummy backend for testing basic interaction of projects and backends""" +from __future__ import annotations +from typing import TYPE_CHECKING, Any from annif.suggestion import SubjectSuggestion from . import backend +if TYPE_CHECKING: + from annif.corpus.document import DocumentCorpus + class DummyBackend(backend.AnnifLearningBackend): name = "dummy" @@ -13,13 +18,13 @@ class DummyBackend(backend.AnnifLearningBackend): is_trained = True modification_time = None - def default_params(self): + def default_params(self) -> dict[str, int]: return backend.AnnifBackend.DEFAULT_PARAMETERS - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: self.initialized = True - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> list[SubjectSuggestion]: score = float(params.get("score", 1.0)) # Ensure tests fail if "text" with wrong type ends up here @@ -37,7 +42,11 @@ def _suggest(self, text, params): return [SubjectSuggestion(subject_id=subject_id, score=score)] - def _learn(self, corpus, params): + def _learn( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + ) -> None: # in this dummy backend we "learn" by picking up the subject ID # of the first subject of the first document in the learning set # and using that in subsequent analysis results diff --git a/annif/backend/ensemble.py b/annif/backend/ensemble.py index 918a41444..6f7f2eb04 100644 --- a/annif/backend/ensemble.py +++ b/annif/backend/ensemble.py @@ -1,5 +1,7 @@ """Ensemble backend that combines results from multiple projects""" +from __future__ import annotations +from typing import TYPE_CHECKING, Any import annif.eval import annif.parallel @@ -9,11 +11,20 @@ from . import backend, hyperopt +if TYPE_CHECKING: + from datetime import datetime + + from optuna.study.study import Study + from optuna.trial import Trial + + from annif.backend.hyperopt import HPRecommendation + from annif.corpus.document import DocumentCorpus + class BaseEnsembleBackend(backend.AnnifBackend): """Base class for ensemble backends""" - def _get_sources_attribute(self, attr): + def _get_sources_attribute(self, attr: str) -> list[bool | None]: params = self._get_backend_params(None) sources = annif.util.parse_sources(params["sources"]) return [ @@ -21,20 +32,27 @@ def _get_sources_attribute(self, attr): for project_id, _ in sources ] - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: # initialize all the source projects params = self._get_backend_params(None) for project_id, _ in annif.util.parse_sources(params["sources"]): project = self.project.registry.get_project(project_id) project.initialize(parallel) - def _suggest_with_sources(self, texts, sources): + def _suggest_with_sources( + self, texts: list[str], sources: list[tuple[str, float]] + ) -> dict[str, SuggestionBatch]: return { project_id: self.project.registry.get_project(project_id).suggest(texts) for project_id, _ in sources } - def _merge_source_batches(self, batch_by_source, sources, params): + def _merge_source_batches( + self, + batch_by_source: dict[str, SuggestionBatch], + sources: list[tuple[str, float]], + params: dict[str, Any], + ) -> SuggestionBatch: """Merge the given SuggestionBatches from each source into a single SuggestionBatch. The default implementation computes a weighted average based on the weights given in the sources tuple. Intended @@ -46,7 +64,9 @@ def _merge_source_batches(self, batch_by_source, sources, params): limit=int(params["limit"]) ) - def _suggest_batch(self, texts, params): + def _suggest_batch( + self, texts: list[str], params: dict[str, Any] + ) -> SuggestionBatch: sources = annif.util.parse_sources(params["sources"]) batch_by_source = self._suggest_with_sources(texts, sources) return self._merge_source_batches(batch_by_source, sources, params) @@ -55,7 +75,9 @@ def _suggest_batch(self, texts, params): class EnsembleOptimizer(hyperopt.HyperparameterOptimizer): """Hyperparameter optimizer for the ensemble backend""" - def __init__(self, backend, corpus, metric): + def __init__( + self, backend: EnsembleBackend, corpus: DocumentCorpus, metric: str + ) -> None: super().__init__(backend, corpus, metric) self._sources = [ project_id @@ -64,7 +86,7 @@ def __init__(self, backend, corpus, metric): ) ] - def _prepare(self, n_jobs=1): + def _prepare(self, n_jobs: int = 1) -> None: self._gold_batches = [] self._source_batches = [] @@ -89,16 +111,16 @@ def _prepare(self, n_jobs=1): self._source_batches.append(suggestions) self._gold_batches.append(gold_batch) - def _normalize(self, hps): + def _normalize(self, hps: dict[str, float]) -> dict[str, float]: total = sum(hps.values()) return {source: hps[source] / total for source in hps} - def _format_cfg_line(self, hps): + def _format_cfg_line(self, hps: dict[str, float]) -> str: return "sources=" + ",".join( [f"{src}:{weight:.4f}" for src, weight in hps.items()] ) - def _objective(self, trial): + def _objective(self, trial: Trial) -> float: eval_batch = annif.eval.EvaluationBatch(self._backend.project.subjects) proj_weights = { project_id: trial.suggest_uniform(project_id, 0.0, 1.0) @@ -114,7 +136,7 @@ def _objective(self, trial): results = eval_batch.results(metrics=[self._metric]) return results[self._metric] - def _postprocess(self, study): + def _postprocess(self, study: Study) -> HPRecommendation: line = self._format_cfg_line(self._normalize(study.best_params)) return hyperopt.HPRecommendation(lines=[line], score=study.best_value) @@ -125,17 +147,19 @@ class EnsembleBackend(BaseEnsembleBackend, hyperopt.AnnifHyperoptBackend): name = "ensemble" @property - def is_trained(self): + def is_trained(self) -> bool: sources_trained = self._get_sources_attribute("is_trained") return all(sources_trained) @property - def modification_time(self): + def modification_time(self) -> datetime | None: mtimes = self._get_sources_attribute("modification_time") return max(filter(None, mtimes), default=None) - def get_hp_optimizer(self, corpus, metric): + def get_hp_optimizer( + self, corpus: DocumentCorpus, metric: str + ) -> EnsembleOptimizer: return EnsembleOptimizer(self, corpus, metric) - def _train(self, corpus, params, jobs=0): + def _train(self, corpus: DocumentCorpus, params: dict[str, Any], jobs: int = 0): raise NotSupportedException("Training ensemble backend is not possible.") diff --git a/annif/backend/fasttext.py b/annif/backend/fasttext.py index 7b6e9e842..23c33539a 100644 --- a/annif/backend/fasttext.py +++ b/annif/backend/fasttext.py @@ -1,7 +1,9 @@ """Annif backend using the fastText classifier""" +from __future__ import annotations import collections import os.path +from typing import TYPE_CHECKING, Any import fasttext @@ -11,6 +13,12 @@ from . import backend, mixins +if TYPE_CHECKING: + from fasttext.FastText import _FastText + from numpy import ndarray + + from annif.corpus.document import DocumentCorpus + class FastTextBackend(mixins.ChunkingBackend, backend.AnnifBackend): """fastText backend for Annif""" @@ -48,14 +56,14 @@ class FastTextBackend(mixins.ChunkingBackend, backend.AnnifBackend): # defaults for uninitialized instances _model = None - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(mixins.ChunkingBackend.DEFAULT_PARAMETERS) params.update(self.DEFAULT_PARAMETERS) return params @staticmethod - def _load_model(path): + def _load_model(path: str) -> _FastText: # monkey patch fasttext.FastText.eprint to avoid spurious warning # see https://github.com/facebookresearch/fastText/issues/1067 orig_eprint = fasttext.FastText.eprint @@ -65,7 +73,7 @@ def _load_model(path): fasttext.FastText.eprint = orig_eprint return model - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: if self._model is None: path = os.path.join(self.datadir, self.MODEL_FILE) self.debug("loading fastText model from {}".format(path)) @@ -79,14 +87,14 @@ def initialize(self, parallel=False): ) @staticmethod - def _id_to_label(subject_id): + def _id_to_label(subject_id: int) -> str: return "__label__{:d}".format(subject_id) - def _label_to_subject_id(self, label): + def _label_to_subject_id(self, label: str) -> int: labelnum = label.replace("__label__", "") return int(labelnum) - def _write_train_file(self, corpus, filename): + def _write_train_file(self, corpus: DocumentCorpus, filename: str) -> None: with open(filename, "w", encoding="utf-8") as trainfile: for doc in corpus.documents: text = self._normalize_text(doc.text) @@ -98,17 +106,20 @@ def _write_train_file(self, corpus, filename): else: self.warning(f'no labels for document "{doc.text}"') - def _normalize_text(self, text): + def _normalize_text(self, text: str) -> str: return " ".join(self.project.analyzer.tokenize_words(text)) - def _create_train_file(self, corpus): + def _create_train_file( + self, + corpus: DocumentCorpus, + ) -> None: self.info("creating fastText training file") annif.util.atomic_save( corpus, self.datadir, self.TRAIN_FILE, method=self._write_train_file ) - def _create_model(self, params, jobs): + def _create_model(self, params: dict[str, Any], jobs: int) -> None: self.info("creating fastText model") trainpath = os.path.join(self.datadir, self.TRAIN_FILE) modelpath = os.path.join(self.datadir, self.MODEL_FILE) @@ -123,7 +134,12 @@ def _create_model(self, params, jobs): self._model = fasttext.train_supervised(trainpath, **params) self._model.save_model(modelpath) - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: if corpus != "cached": if corpus.is_empty(): raise NotSupportedException( @@ -134,7 +150,9 @@ def _train(self, corpus, params, jobs=0): self.info("Reusing cached training data from previous run.") self._create_model(params, jobs) - def _predict_chunks(self, chunktexts, limit): + def _predict_chunks( + self, chunktexts: list[str], limit: int + ) -> tuple[list[list[str]], list[ndarray]]: return self._model.predict( list( filter( @@ -144,7 +162,9 @@ def _predict_chunks(self, chunktexts, limit): limit, ) - def _suggest_chunks(self, chunktexts, params): + def _suggest_chunks( + self, chunktexts: list[str], params: dict[str, Any] + ) -> list[SubjectSuggestion]: limit = int(params["limit"]) chunklabels, chunkscores = self._predict_chunks(chunktexts, limit) label_scores = collections.defaultdict(float) diff --git a/annif/backend/http.py b/annif/backend/http.py index a76dbbb6a..0fce7f8e4 100644 --- a/annif/backend/http.py +++ b/annif/backend/http.py @@ -1,8 +1,9 @@ """HTTP/REST client backend that makes calls to a web service and returns the results""" - +from __future__ import annotations import importlib +from typing import TYPE_CHECKING, Any import dateutil.parser import requests @@ -13,13 +14,16 @@ from . import backend +if TYPE_CHECKING: + from datetime import datetime + class HTTPBackend(backend.AnnifBackend): name = "http" _headers = None @property - def headers(self): + def headers(self) -> dict[str, str]: if self._headers is None: version = importlib.metadata.version("annif") self._headers = { @@ -28,17 +32,17 @@ def headers(self): return self._headers @property - def is_trained(self): + def is_trained(self) -> bool | None: return self._get_project_info("is_trained") @property - def modification_time(self): + def modification_time(self) -> datetime | None: mtime = self._get_project_info("modification_time") if mtime is None: return None return dateutil.parser.parse(mtime) - def _get_project_info(self, key): + def _get_project_info(self, key: str) -> bool | str | None: params = self._get_backend_params(None) try: req = requests.get( @@ -59,7 +63,7 @@ def _get_project_info(self, key): else: return None - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> list[SubjectSuggestion]: data = {"text": text} if "project" in params: data["project"] = params["project"] diff --git a/annif/backend/hyperopt.py b/annif/backend/hyperopt.py index 1bdce0aa4..2c2e7422c 100644 --- a/annif/backend/hyperopt.py +++ b/annif/backend/hyperopt.py @@ -1,14 +1,23 @@ """Hyperparameter optimization functionality for backends""" +from __future__ import annotations import abc import collections import warnings +from typing import TYPE_CHECKING, Callable import optuna import optuna.exceptions from .backend import AnnifBackend +if TYPE_CHECKING: + from click.utils import LazyFile + from optuna.study.study import Study + from optuna.trial import Trial + + from annif.corpus.document import DocumentCorpus + HPRecommendation = collections.namedtuple("HPRecommendation", "lines score") @@ -16,12 +25,12 @@ class TrialWriter: """Object that writes hyperparameter optimization trial results into a TSV file.""" - def __init__(self, results_file, normalize_func): + def __init__(self, results_file: LazyFile, normalize_func: Callable) -> None: self.results_file = results_file self.normalize_func = normalize_func self.header_written = False - def write(self, study, trial): + def write(self, study: Study, trial: Trial) -> None: """Write the results of one trial into the results file. On the first run, write the header line first.""" @@ -44,12 +53,14 @@ def write(self, study, trial): class HyperparameterOptimizer: """Base class for hyperparameter optimizers""" - def __init__(self, backend, corpus, metric): + def __init__( + self, backend: AnnifBackend, corpus: DocumentCorpus, metric: str + ) -> None: self._backend = backend self._corpus = corpus self._metric = metric - def _prepare(self, n_jobs=1): + def _prepare(self, n_jobs: int = 1): """Prepare the optimizer for hyperparameter evaluation. Up to n_jobs parallel threads or processes may be used during the operation.""" @@ -57,21 +68,23 @@ def _prepare(self, n_jobs=1): pass # pragma: no cover @abc.abstractmethod - def _objective(self, trial): + def _objective(self, trial: Trial) -> float: """Objective function to optimize""" pass # pragma: no cover @abc.abstractmethod - def _postprocess(self, study): + def _postprocess(self, study: Study) -> HPRecommendation: """Convert the study results into hyperparameter recommendations""" pass # pragma: no cover - def _normalize(self, hps): + def _normalize(self, hps: dict[str, float]) -> dict[str, float]: """Normalize the given raw hyperparameters. Intended to be overridden by subclasses when necessary. The default is to keep them as-is.""" return hps - def optimize(self, n_trials, n_jobs, results_file): + def optimize( + self, n_trials: int, n_jobs: int, results_file: LazyFile | None + ) -> HPRecommendation: """Find the optimal hyperparameters by testing up to the given number of hyperparameter combinations""" @@ -103,7 +116,7 @@ class AnnifHyperoptBackend(AnnifBackend): optimization""" @abc.abstractmethod - def get_hp_optimizer(self, corpus, metric): + def get_hp_optimizer(self, corpus: DocumentCorpus, metric: str): """Get a HyperparameterOptimizer object that can look for optimal hyperparameter combinations for the given corpus, measured using the given metric""" diff --git a/annif/backend/mixins.py b/annif/backend/mixins.py index 5161a947d..066d5d862 100644 --- a/annif/backend/mixins.py +++ b/annif/backend/mixins.py @@ -1,8 +1,9 @@ """Annif backend mixins that can be used to implement features""" - +from __future__ import annotations import abc import os.path +from typing import TYPE_CHECKING, Any import joblib from sklearn.feature_extraction.text import TfidfVectorizer @@ -10,23 +11,32 @@ import annif.util from annif.exception import NotInitializedException +if TYPE_CHECKING: + from collections.abc import Iterable + + from scipy.sparse._csr import csr_matrix + + from annif.suggestion import SubjectSuggestion + class ChunkingBackend(metaclass=abc.ABCMeta): """Annif backend mixin that implements chunking of input""" DEFAULT_PARAMETERS = {"chunksize": 1} - def default_params(self): + def default_params(self) -> dict[str, Any]: return self.DEFAULT_PARAMETERS @abc.abstractmethod - def _suggest_chunks(self, chunktexts, params): + def _suggest_chunks( + self, chunktexts: list[str], params: dict[str, Any] + ) -> list[SubjectSuggestion]: """Suggest subjects for the chunked text; should be implemented by the subclass inheriting this mixin""" pass # pragma: no cover - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> list[SubjectSuggestion]: self.debug( 'Suggesting subjects for text "{}..." (len={})'.format(text[:20], len(text)) ) @@ -49,7 +59,7 @@ class TfidfVectorizerMixin: vectorizer = None - def initialize_vectorizer(self): + def initialize_vectorizer(self) -> None: if self.vectorizer is None: path = os.path.join(self.datadir, self.VECTORIZER_FILE) if os.path.exists(path): @@ -61,7 +71,9 @@ def initialize_vectorizer(self): backend_id=self.backend_id, ) - def create_vectorizer(self, input, params={}): + def create_vectorizer( + self, input: Iterable[str], params: dict[str, Any] = {} + ) -> csr_matrix: self.info("creating vectorizer") self.vectorizer = TfidfVectorizer(**params) veccorpus = self.vectorizer.fit_transform(input) diff --git a/annif/backend/mllm.py b/annif/backend/mllm.py index 6954dadc3..f73bf8324 100644 --- a/annif/backend/mllm.py +++ b/annif/backend/mllm.py @@ -1,6 +1,8 @@ """Maui-like Lexical Matching backend""" +from __future__ import annotations import os.path +from typing import TYPE_CHECKING, Any import joblib import numpy as np @@ -13,11 +15,21 @@ from . import backend, hyperopt +if TYPE_CHECKING: + from collections.abc import Iterator + + from optuna.study.study import Study + from optuna.trial import Trial + + from annif.backend.hyperopt import HPRecommendation + from annif.corpus.document import DocumentCorpus + from annif.lexical.mllm import Candidate + class MLLMOptimizer(hyperopt.HyperparameterOptimizer): """Hyperparameter optimizer for the MLLM backend""" - def _prepare(self, n_jobs=1): + def _prepare(self, n_jobs: int = 1) -> None: self._backend.initialize() self._train_x, self._train_y = self._backend._load_train_data() self._candidates = [] @@ -29,7 +41,7 @@ def _prepare(self, n_jobs=1): self._candidates.append(candidates) self._gold_subjects.append(doc.subject_set) - def _objective(self, trial): + def _objective(self, trial: Trial) -> float: params = { "min_samples_leaf": trial.suggest_int("min_samples_leaf", 5, 30), "max_leaf_nodes": trial.suggest_int("max_leaf_nodes", 100, 2000), @@ -52,7 +64,7 @@ def _objective(self, trial): results = batch.results(metrics=[self._metric]) return results[self._metric] - def _postprocess(self, study): + def _postprocess(self, study: Study) -> HPRecommendation: bp = study.best_params lines = [ f"min_samples_leaf={bp['min_samples_leaf']}", @@ -80,15 +92,15 @@ class MLLMBackend(hyperopt.AnnifHyperoptBackend): "use_hidden_labels": False, } - def get_hp_optimizer(self, corpus, metric): + def get_hp_optimizer(self, corpus: DocumentCorpus, metric: str) -> MLLMOptimizer: return MLLMOptimizer(self, corpus, metric) - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(self.DEFAULT_PARAMETERS) return params - def _load_model(self): + def _load_model(self) -> MLLMModel: path = os.path.join(self.datadir, self.MODEL_FILE) self.debug("loading model from {}".format(path)) if os.path.exists(path): @@ -98,7 +110,7 @@ def _load_model(self): "model {} not found".format(path), backend_id=self.backend_id ) - def _load_train_data(self): + def _load_train_data(self) -> tuple[np.ndarray, np.ndarray]: path = os.path.join(self.datadir, self.TRAIN_FILE) if os.path.exists(path): return joblib.load(path) @@ -107,11 +119,16 @@ def _load_train_data(self): "train data file {} not found".format(path), backend_id=self.backend_id ) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: if self._model is None: self._model = self._load_model() - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: self.info("starting train") if corpus != "cached": if corpus.is_empty(): @@ -137,16 +154,20 @@ def _train(self, corpus, params, jobs=0): self.info("saving model") annif.util.atomic_save(self._model, self.datadir, self.MODEL_FILE) - def _generate_candidates(self, text): + def _generate_candidates(self, text: str) -> list[Candidate]: return self._model.generate_candidates(text, self.project.analyzer) - def _prediction_to_result(self, prediction, params): + def _prediction_to_result( + self, + prediction: list[tuple[np.float64, int]], + params: dict[str, Any], + ) -> Iterator: vector = np.zeros(len(self.project.subjects), dtype=np.float32) for score, subject_id in prediction: vector[subject_id] = score return vector_to_suggestions(vector, int(params["limit"])) - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> Iterator: candidates = self._generate_candidates(text) prediction = self._model.predict(candidates) return self._prediction_to_result(prediction, params) diff --git a/annif/backend/nn_ensemble.py b/annif/backend/nn_ensemble.py index 2ee5f89c4..658bd79be 100644 --- a/annif/backend/nn_ensemble.py +++ b/annif/backend/nn_ensemble.py @@ -1,10 +1,11 @@ """Neural network based ensemble backend that combines results from multiple projects.""" - +from __future__ import annotations import os.path import shutil from io import BytesIO +from typing import TYPE_CHECKING, Any import joblib import lmdb @@ -23,13 +24,18 @@ from . import backend, ensemble +if TYPE_CHECKING: + from tensorflow.python.framework.ops import EagerTensor + + from annif.corpus.document import DocumentCorpus + -def idx_to_key(idx): +def idx_to_key(idx: int) -> bytes: """convert an integer index to a binary key for use in LMDB""" return b"%08d" % idx -def key_to_idx(key): +def key_to_idx(key: memoryview | bytes) -> int: """convert a binary LMDB key to an integer index""" return int(key) @@ -47,7 +53,7 @@ def __init__(self, txn, batch_size): self._counter = 0 self._batch_size = batch_size - def add_sample(self, inputs, targets): + def add_sample(self, inputs: np.ndarray, targets: np.ndarray) -> None: # use zero-padded 8-digit key key = idx_to_key(self._counter) self._counter += 1 @@ -58,7 +64,7 @@ def add_sample(self, inputs, targets): buf.seek(0) self._txn.put(key, buf.read()) - def __getitem__(self, idx): + def __getitem__(self, idx: int) -> tuple[np.ndarray, np.ndarray]: """get a particular batch of samples""" cursor = self._txn.cursor() first_key = idx * self._batch_size @@ -73,7 +79,7 @@ def __getitem__(self, idx): target_arrays.append(target_csr.toarray().flatten()) return np.array(input_arrays), np.array(target_arrays) - def __len__(self): + def __len__(self) -> int: """return the number of available batches""" return int(np.ceil(self._counter / self._batch_size)) @@ -81,7 +87,7 @@ def __len__(self): class MeanLayer(Layer): """Custom Keras layer that calculates mean values along the 2nd axis.""" - def call(self, inputs): + def call(self, inputs: EagerTensor) -> EagerTensor: return K.mean(inputs, axis=2) @@ -106,12 +112,12 @@ class NNEnsembleBackend(backend.AnnifLearningBackend, ensemble.BaseEnsembleBacke # defaults for uninitialized instances _model = None - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(self.DEFAULT_PARAMETERS) return params - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: super().initialize(parallel) if self._model is not None: return # already initialized @@ -130,7 +136,12 @@ def initialize(self, parallel=False): model_filename, custom_objects={"MeanLayer": MeanLayer} ) - def _merge_source_batches(self, batch_by_source, sources, params): + def _merge_source_batches( + self, + batch_by_source: dict[str, SuggestionBatch], + sources: list[tuple[str, float]], + params: dict[str, Any], + ) -> SuggestionBatch: src_weight = dict(sources) score_vectors = np.array( [ @@ -153,7 +164,7 @@ def _merge_source_batches(self, batch_by_source, sources, params): self.project.subjects, ) - def _create_model(self, sources): + def _create_model(self, sources: list[tuple[str, float]]) -> None: self.info("creating NN ensemble model") inputs = Input(shape=(len(self.project.subjects), len(sources))) @@ -185,7 +196,12 @@ def _create_model(self, sources): self._model.summary(print_fn=summary.append) self.debug("Created model: \n" + "\n".join(summary)) - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: sources = annif.util.parse_sources(self.params["sources"]) self._create_model(sources) self._fit_model( @@ -195,7 +211,12 @@ def _train(self, corpus, params, jobs=0): n_jobs=jobs, ) - def _corpus_to_vectors(self, corpus, seq, n_jobs): + def _corpus_to_vectors( + self, + corpus: DocumentCorpus, + seq: LMDBSequence, + n_jobs: int, + ) -> None: # pass corpus through all source projects sources = dict(annif.util.parse_sources(self.params["sources"])) @@ -236,7 +257,13 @@ def _open_lmdb(self, cached, lmdb_map_size): shutil.rmtree(lmdb_path) return lmdb.open(lmdb_path, map_size=lmdb_map_size, writemap=True) - def _fit_model(self, corpus, epochs, lmdb_map_size, n_jobs=1): + def _fit_model( + self, + corpus: DocumentCorpus, + epochs: int, + lmdb_map_size: int, + n_jobs: int = 1, + ) -> None: env = self._open_lmdb(corpus == "cached", lmdb_map_size) if corpus != "cached": if corpus.is_empty(): @@ -256,7 +283,11 @@ def _fit_model(self, corpus, epochs, lmdb_map_size, n_jobs=1): annif.util.atomic_save(self._model, self.datadir, self.MODEL_FILE) - def _learn(self, corpus, params): + def _learn( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + ) -> None: self.initialize() self._fit_model( corpus, int(params["learn-epochs"]), int(params["lmdb_map_size"]) diff --git a/annif/backend/omikuji.py b/annif/backend/omikuji.py index 99218b951..6c864b89e 100644 --- a/annif/backend/omikuji.py +++ b/annif/backend/omikuji.py @@ -1,7 +1,9 @@ """Annif backend using the Omikuji classifier""" +from __future__ import annotations import os.path import shutil +from typing import TYPE_CHECKING, Any import omikuji @@ -15,6 +17,11 @@ from . import backend, mixins +if TYPE_CHECKING: + from scipy.sparse._csr import csr_matrix + + from annif.corpus.document import DocumentCorpus + class OmikujiBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): """Omikuji based backend for Annif""" @@ -36,12 +43,12 @@ class OmikujiBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): "collapse_every_n_layers": 0, } - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(self.DEFAULT_PARAMETERS) return params - def _initialize_model(self): + def _initialize_model(self) -> None: if self._model is None: path = os.path.join(self.datadir, self.MODEL_FILE) self.debug("loading model from {}".format(path)) @@ -58,11 +65,11 @@ def _initialize_model(self): "model {} not found".format(path), backend_id=self.backend_id ) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: self.initialize_vectorizer() self._initialize_model() - def _create_train_file(self, veccorpus, corpus): + def _create_train_file(self, veccorpus: csr_matrix, corpus: DocumentCorpus) -> None: self.info("creating train file") path = os.path.join(self.datadir, self.TRAIN_FILE) with open(path, "w", encoding="utf-8") as trainfile: @@ -89,7 +96,7 @@ def _create_train_file(self, veccorpus, corpus): trainfile.seek(0) print("{:08d}".format(n_samples), end="", file=trainfile) - def _create_model(self, params, jobs): + def _create_model(self, params: dict[str, Any], jobs: int) -> None: train_path = os.path.join(self.datadir, self.TRAIN_FILE) model_path = os.path.join(self.datadir, self.MODEL_FILE) hyper_param = omikuji.Model.default_hyper_param() @@ -104,7 +111,12 @@ def _create_model(self, params, jobs): shutil.rmtree(model_path) self._model.save(os.path.join(self.datadir, self.MODEL_FILE)) - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: if corpus != "cached": if corpus.is_empty(): raise NotSupportedException( @@ -122,7 +134,9 @@ def _train(self, corpus, params, jobs=0): self.info("Reusing cached training data from previous run.") self._create_model(params, jobs) - def _suggest_batch(self, texts, params): + def _suggest_batch( + self, texts: list[str], params: dict[str, Any] + ) -> SuggestionBatch: vector = self.vectorizer.transform(texts) limit = int(params["limit"]) diff --git a/annif/backend/pav.py b/annif/backend/pav.py index 5125cb8cd..da8a6e2c1 100644 --- a/annif/backend/pav.py +++ b/annif/backend/pav.py @@ -2,8 +2,10 @@ learns which concept suggestions from each backend are trustworthy using the PAV algorithm, a.k.a. isotonic regression, to turn raw scores returned by individual backends into probabilities.""" +from __future__ import annotations import os.path +from typing import TYPE_CHECKING, Any import joblib import numpy as np @@ -17,6 +19,10 @@ from . import backend, ensemble +if TYPE_CHECKING: + from annif.corpus.document import DocumentCorpus + from annif.project import AnnifProject + class PAVBackend(ensemble.BaseEnsembleBackend): """PAV ensemble backend that combines results from multiple projects""" @@ -30,12 +36,12 @@ class PAVBackend(ensemble.BaseEnsembleBackend): DEFAULT_PARAMETERS = {"min-docs": 10} - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(self.DEFAULT_PARAMETERS) return params - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: super().initialize(parallel) if self._models is not None: return # already initialized @@ -53,11 +59,16 @@ def initialize(self, parallel=False): backend_id=self.backend_id, ) - def _get_model(self, source_project_id): + def _get_model(self, source_project_id: str) -> dict[int, IsotonicRegression]: self.initialize() return self._models[source_project_id] - def _merge_source_batches(self, batch_by_source, sources, params): + def _merge_source_batches( + self, + batch_by_source: dict[str, SuggestionBatch], + sources: list[tuple[str, float]], + params: dict[str, Any], + ) -> SuggestionBatch: reg_batch_by_source = {} for project_id, batch in batch_by_source.items(): reg_models = self._get_model(project_id) @@ -82,7 +93,9 @@ def _merge_source_batches(self, batch_by_source, sources, params): return super()._merge_source_batches(reg_batch_by_source, sources, params) @staticmethod - def _suggest_train_corpus(source_project, corpus): + def _suggest_train_corpus( + source_project: AnnifProject, corpus: DocumentCorpus + ) -> tuple[csc_matrix, csc_matrix]: # lists for constructing score matrix data, row, col = [], [], [] # lists for constructing true label matrix @@ -114,7 +127,9 @@ def _suggest_train_corpus(source_project, corpus): ) return csc_matrix(scores), csc_matrix(true) - def _create_pav_model(self, source_project_id, min_docs, corpus): + def _create_pav_model( + self, source_project_id: str, min_docs: int, corpus: DocumentCorpus + ) -> None: self.info( "creating PAV model for source {}, min_docs={}".format( source_project_id, min_docs @@ -138,7 +153,12 @@ def _create_pav_model(self, source_project_id, min_docs, corpus): pav_regressions, self.datadir, model_filename, method=joblib.dump ) - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: if corpus == "cached": raise NotSupportedException( "Training pav project from cached data not supported." diff --git a/annif/backend/stwfsa.py b/annif/backend/stwfsa.py index d8217ee03..fdc962b11 100644 --- a/annif/backend/stwfsa.py +++ b/annif/backend/stwfsa.py @@ -1,4 +1,7 @@ +from __future__ import annotations + import os +from typing import TYPE_CHECKING, Any from stwfsapy.predictor import StwfsapyPredictor @@ -8,6 +11,9 @@ from . import backend +if TYPE_CHECKING: + from annif.corpus.document import DocumentCorpus + _KEY_CONCEPT_TYPE_URI = "concept_type_uri" _KEY_SUBTHESAURUS_TYPE_URI = "sub_thesaurus_type_uri" _KEY_THESAURUS_RELATION_TYPE_URI = "thesaurus_relation_type_uri" @@ -59,7 +65,7 @@ class StwfsaBackend(backend.AnnifBackend): _model = None - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: if self._model is None: path = os.path.join(self.datadir, self.MODEL_FILE) self.debug(f"Loading STWFSA model from {path}.") @@ -71,7 +77,7 @@ def initialize(self, parallel=False): f"Model not found at {path}", backend_id=self.backend_id ) - def _load_data(self, corpus): + def _load_data(self, corpus: DocumentCorpus) -> tuple[list[str], list[list[str]]]: if corpus == "cached": raise NotSupportedException( "Training stwfsa project from cached data not supported." @@ -93,7 +99,12 @@ def _load_data(self, corpus): ) return X, y - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: X, y = self._load_data(corpus) new_params = { key: self.STWFSA_PARAMETERS[key](val) @@ -114,7 +125,7 @@ def _train(self, corpus, params, jobs=0): lambda model, store_path: model.store(store_path), ) - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> list[SubjectSuggestion]: self.debug(f'Suggesting subjects for text "{text[:20]}..." (len={len(text)})') result = self._model.suggest_proba([text])[0] suggestions = [] diff --git a/annif/backend/svc.py b/annif/backend/svc.py index ad8939f5f..1e7932c3e 100644 --- a/annif/backend/svc.py +++ b/annif/backend/svc.py @@ -1,6 +1,8 @@ """Annif backend using a SVM classifier""" +from __future__ import annotations import os.path +from typing import TYPE_CHECKING, Any import joblib import numpy as np @@ -13,6 +15,11 @@ from . import backend, mixins +if TYPE_CHECKING: + from scipy.sparse._csr import csr_matrix + + from annif.corpus.document import DocumentCorpus + class SVCBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): """Support vector classifier backend for Annif""" @@ -26,12 +33,12 @@ class SVCBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): DEFAULT_PARAMETERS = {"min_df": 1, "ngram": 1} - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(self.DEFAULT_PARAMETERS) return params - def _initialize_model(self): + def _initialize_model(self) -> None: if self._model is None: path = os.path.join(self.datadir, self.MODEL_FILE) self.debug("loading model from {}".format(path)) @@ -42,11 +49,13 @@ def _initialize_model(self): "model {} not found".format(path), backend_id=self.backend_id ) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: self.initialize_vectorizer() self._initialize_model() - def _corpus_to_texts_and_classes(self, corpus): + def _corpus_to_texts_and_classes( + self, corpus: DocumentCorpus + ) -> tuple[list[str], list[int]]: texts = [] classes = [] for doc in corpus.documents: @@ -61,7 +70,7 @@ def _corpus_to_texts_and_classes(self, corpus): classes.append(doc.subject_set[0]) return texts, classes - def _train_classifier(self, veccorpus, classes): + def _train_classifier(self, veccorpus: csr_matrix, classes: list[int]) -> None: self.info("creating classifier") self._model = LinearSVC() self._model.fit(veccorpus, classes) @@ -69,7 +78,9 @@ def _train_classifier(self, veccorpus, classes): self._model, self.datadir, self.MODEL_FILE, method=joblib.dump ) - def _train(self, corpus, params, jobs=0): + def _train( + self, corpus: DocumentCorpus, params: dict[str, Any], jobs: int = 0 + ) -> None: if corpus == "cached": raise NotSupportedException( "SVC backend does not support reuse of cached training data." @@ -85,7 +96,9 @@ def _train(self, corpus, params, jobs=0): veccorpus = self.create_vectorizer(texts, vecparams) self._train_classifier(veccorpus, classes) - def _scores_to_suggestions(self, scores, params): + def _scores_to_suggestions( + self, scores: np.ndarray, params: dict[str, Any] + ) -> list[SubjectSuggestion]: results = [] limit = int(params["limit"]) for class_id in np.argsort(scores)[::-1][:limit]: @@ -96,7 +109,9 @@ def _scores_to_suggestions(self, scores, params): ) return results - def _suggest_batch(self, texts, params): + def _suggest_batch( + self, texts: list[str], params: dict[str, Any] + ) -> SuggestionBatch: vector = self.vectorizer.transform(texts) confidences = self._model.decision_function(vector) # convert to 0..1 score range using logistic function diff --git a/annif/backend/tfidf.py b/annif/backend/tfidf.py index 335fe53d1..1cca639ca 100644 --- a/annif/backend/tfidf.py +++ b/annif/backend/tfidf.py @@ -1,8 +1,10 @@ """Backend that returns most similar subjects based on similarity in sparse TF-IDF normalized bag-of-words vector space""" +from __future__ import annotations import os.path import tempfile +from typing import TYPE_CHECKING, Any import gensim.similarities from gensim.matutils import Sparse2Corpus @@ -13,19 +15,26 @@ from . import backend, mixins +if TYPE_CHECKING: + from collections.abc import Iterator + + from scipy.sparse._csr import csr_matrix + + from annif.corpus.document import DocumentCorpus + class SubjectBuffer: """A file-backed buffer to store and retrieve subject text.""" BUFFER_SIZE = 100 - def __init__(self, tempdir, subject_id): + def __init__(self, tempdir: str, subject_id: int) -> None: filename = "{:08d}.txt".format(subject_id) self._path = os.path.join(tempdir, filename) self._buffer = [] self._created = False - def flush(self): + def flush(self) -> None: if self._created: mode = "a" else: @@ -38,12 +47,12 @@ def flush(self): self._buffer = [] self._created = True - def write(self, text): + def write(self, text: str) -> None: self._buffer.append(text) if len(self._buffer) >= self.BUFFER_SIZE: self.flush() - def read(self): + def read(self) -> str: if not self._created: # file was never created - we can simply return the buffer content return "\n".join(self._buffer) @@ -62,7 +71,9 @@ class TFIDFBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): INDEX_FILE = "tfidf-index" - def _generate_subjects_from_documents(self, corpus): + def _generate_subjects_from_documents( + self, corpus: DocumentCorpus + ) -> Iterator[str]: with tempfile.TemporaryDirectory() as tempdir: subject_buffer = {} for subject_id in range(len(self.project.subjects)): @@ -76,7 +87,7 @@ def _generate_subjects_from_documents(self, corpus): for sid in range(len(self.project.subjects)): yield subject_buffer[sid].read() - def _initialize_index(self): + def _initialize_index(self) -> None: if self._index is None: path = os.path.join(self.datadir, self.INDEX_FILE) self.debug("loading similarity index from {}".format(path)) @@ -88,11 +99,11 @@ def _initialize_index(self): backend_id=self.backend_id, ) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: self.initialize_vectorizer() self._initialize_index() - def _create_index(self, veccorpus): + def _create_index(self, veccorpus: csr_matrix) -> None: self.info("creating similarity index") gscorpus = Sparse2Corpus(veccorpus, documents_columns=False) self._index = gensim.similarities.SparseMatrixSimilarity( @@ -100,7 +111,12 @@ def _create_index(self, veccorpus): ) annif.util.atomic_save(self._index, self.datadir, self.INDEX_FILE) - def _train(self, corpus, params, jobs=0): + def _train( + self, + corpus: DocumentCorpus, + params: dict[str, Any], + jobs: int = 0, + ) -> None: if corpus == "cached": raise NotSupportedException( "Training tfidf project from cached data not supported." @@ -112,7 +128,7 @@ def _train(self, corpus, params, jobs=0): veccorpus = self.create_vectorizer(subjects) self._create_index(veccorpus) - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> Iterator: self.debug( 'Suggesting subjects for text "{}..." (len={})'.format(text[:20], len(text)) ) diff --git a/annif/backend/yake.py b/annif/backend/yake.py index bb684aaf5..1e6adfdd5 100644 --- a/annif/backend/yake.py +++ b/annif/backend/yake.py @@ -1,10 +1,12 @@ """Annif backend using Yake keyword extraction""" # For license remarks of this backend see README.md: # https://github.com/NatLibFi/Annif#license. +from __future__ import annotations import os.path import re from collections import defaultdict +from typing import TYPE_CHECKING, Any import joblib import yake @@ -16,6 +18,11 @@ from . import backend +if TYPE_CHECKING: + from rdflib.term import URIRef + + from annif.corpus.document import DocumentCorpus + class YakeBackend(backend.AnnifBackend): """Yake based backend for Annif""" @@ -38,7 +45,7 @@ class YakeBackend(backend.AnnifBackend): "remove_parentheses": False, } - def default_params(self): + def default_params(self) -> dict[str, Any]: params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() params.update(self.DEFAULT_PARAMETERS) return params @@ -48,7 +55,7 @@ def is_trained(self): return True @property - def label_types(self): + def label_types(self) -> list[URIRef]: if type(self.params["label_types"]) == str: # Label types set by user label_types = [lt.strip() for lt in self.params["label_types"].split(",")] self._validate_label_types(label_types) @@ -56,17 +63,17 @@ def label_types(self): label_types = self.params["label_types"] # The defaults return [getattr(SKOS, lt) for lt in label_types] - def _validate_label_types(self, label_types): + def _validate_label_types(self, label_types: list[str]) -> None: for lt in label_types: if lt not in ("prefLabel", "altLabel", "hiddenLabel"): raise ConfigurationException( f"invalid label type {lt}", backend_id=self.backend_id ) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: self._initialize_index() - def _initialize_index(self): + def _initialize_index(self) -> None: if self._index is None: path = os.path.join(self.datadir, self.INDEX_FILE) if os.path.exists(path): @@ -78,12 +85,12 @@ def _initialize_index(self): self._save_index(path) self.info(f"Created index with {len(self._index)} labels") - def _save_index(self, path): + def _save_index(self, path: str) -> None: annif.util.atomic_save( self._index, self.datadir, self.INDEX_FILE, method=joblib.dump ) - def _create_index(self): + def _create_index(self) -> dict[str, set[str]]: index = defaultdict(set) skos_vocab = self.project.vocab.skos for concept in skos_vocab.concepts: @@ -95,21 +102,21 @@ def _create_index(self): index.pop("", None) # Remove possible empty string entry return dict(index) - def _normalize_label(self, label): + def _normalize_label(self, label: str) -> str: label = str(label) if annif.util.boolean(self.params["remove_parentheses"]): label = re.sub(r" \(.*\)", "", label) normalized_label = self._normalize_phrase(label) return self._sort_phrase(normalized_label) - def _normalize_phrase(self, phrase): + def _normalize_phrase(self, phrase: str) -> str: return " ".join(self.project.analyzer.tokenize_words(phrase, filter=False)) - def _sort_phrase(self, phrase): + def _sort_phrase(self, phrase: str) -> str: words = phrase.split() return " ".join(sorted(words)) - def _suggest(self, text, params): + def _suggest(self, text: str, params: dict[str, Any]) -> list[SubjectSuggestion]: self.debug(f'Suggesting subjects for text "{text[:20]}..." (len={len(text)})') limit = int(params["limit"]) @@ -132,7 +139,9 @@ def _suggest(self, text, params): ] return subject_suggestions - def _keyphrases2suggestions(self, keyphrases): + def _keyphrases2suggestions( + self, keyphrases: list[tuple[str, float]] + ) -> list[tuple[str, float]]: suggestions = [] not_matched = [] for kp, score in keyphrases: @@ -154,16 +163,18 @@ def _keyphrases2suggestions(self, keyphrases): ) return suggestions - def _keyphrase2uris(self, keyphrase): + def _keyphrase2uris(self, keyphrase: str) -> set[str]: keyphrase = self._normalize_phrase(keyphrase) keyphrase = self._sort_phrase(keyphrase) return self._index.get(keyphrase, []) - def _transform_score(self, score): + def _transform_score(self, score: float) -> float: score = max(score, 0) return 1.0 / (score + 1) - def _combine_suggestions(self, suggestions): + def _combine_suggestions( + self, suggestions: list[tuple[str, float]] + ) -> list[tuple[str, float]]: combined_suggestions = {} for uri, score in suggestions: if uri not in combined_suggestions: @@ -173,12 +184,12 @@ def _combine_suggestions(self, suggestions): combined_suggestions[uri] = self._combine_scores(score, old_score) return list(combined_suggestions.items()) - def _combine_scores(self, score1, score2): + def _combine_scores(self, score1: float, score2: float) -> float: # The result is never smaller than the greater input score1 = score1 / 2 + 0.5 score2 = score2 / 2 + 0.5 confl = score1 * score2 / (score1 * score2 + (1 - score1) * (1 - score2)) return (confl - 0.5) * 2 - def _train(self, corpus, params, jobs=0): + def _train(self, corpus: DocumentCorpus, params: dict[str, Any], jobs: int = 0): raise NotSupportedException("Training yake backend is not possible.") diff --git a/annif/cli_util.py b/annif/cli_util.py index 72da0d46c..bbfa96df4 100644 --- a/annif/cli_util.py +++ b/annif/cli_util.py @@ -1,10 +1,11 @@ """Utility functions for Annif CLI commands""" - +from __future__ import annotations import collections import itertools import os import sys +from typing import TYPE_CHECKING import click import click_log @@ -14,10 +15,24 @@ from annif.exception import ConfigurationException from annif.project import Access +if TYPE_CHECKING: + from datetime import datetime + from io import TextIOWrapper + + from click.core import Argument, Context, Option + + from annif.corpus.document import DocumentCorpus, DocumentList + from annif.corpus.subject import SubjectIndex + from annif.project import AnnifProject + from annif.suggestion import SuggestionResult + from annif.vocab import AnnifVocabulary + logger = annif.logger -def _set_project_config_file_path(ctx, param, value): +def _set_project_config_file_path( + ctx: Context, param: Option, value: str | None +) -> None: """Override the default path or the path given in env by CLI option""" with ctx.obj.load_app().app_context(): if value: @@ -66,7 +81,7 @@ def docs_limit_option(f): )(f) -def get_project(project_id): +def get_project(project_id: str) -> AnnifProject: """ Helper function to get a project by ID and bail out if it doesn't exist""" try: @@ -76,7 +91,7 @@ def get_project(project_id): sys.exit(1) -def get_vocab(vocab_id): +def get_vocab(vocab_id: str) -> AnnifVocabulary: """ Helper function to get a vocabulary by ID and bail out if it doesn't exist""" @@ -87,7 +102,7 @@ def get_vocab(vocab_id): sys.exit(1) -def make_list_template(*rows): +def make_list_template(*rows) -> str: """Helper function to create a template for a list of entries with fields of variable width. The width of each field is determined by the longest item in the field in the given rows.""" @@ -105,14 +120,19 @@ def make_list_template(*rows): ) -def format_datetime(dt): +def format_datetime(dt: datetime | None) -> str: """Helper function to format a datetime object as a string in the local time.""" if dt is None: return "-" return dt.astimezone().strftime("%Y-%m-%d %H:%M:%S") -def open_documents(paths, subject_index, vocab_lang, docs_limit): +def open_documents( + paths: tuple[str, ...], + subject_index: SubjectIndex, + vocab_lang: str, + docs_limit: int | None, +) -> DocumentCorpus: """Helper function to open a document corpus from a list of pathnames, each of which is either a TSV file or a directory of TXT files. For directories with subjects in TSV files, the given vocabulary language @@ -140,7 +160,7 @@ def open_doc_path(path, subject_index): return docs -def open_text_documents(paths, docs_limit): +def open_text_documents(paths: tuple[str, ...], docs_limit: int | None) -> DocumentList: """ Helper function to read text documents from the given file paths. Returns a DocumentList object with Documents having no subjects. If a path is "-", the @@ -160,7 +180,12 @@ def _docs(paths): return annif.corpus.DocumentList(_docs(paths[:docs_limit])) -def show_hits(hits, project, lang, file=None): +def show_hits( + hits: SuggestionResult, + project: AnnifProject, + lang: str, + file: TextIOWrapper | None = None, +) -> None: """ Print subject suggestions to the console or a file. The suggestions are displayed as a table, with one row per hit. Each row contains the URI, label, possible notation, @@ -177,7 +202,9 @@ def show_hits(hits, project, lang, file=None): click.echo(line, file=file) -def parse_backend_params(backend_param, project): +def parse_backend_params( + backend_param: tuple[str, ...] | tuple[()], project: AnnifProject +) -> collections.defaultdict[str, dict[str, str]]: """Parse a list of backend parameters given with the --backend-param option into a nested dict structure""" backend_params = collections.defaultdict(dict) @@ -189,7 +216,7 @@ def parse_backend_params(backend_param, project): return backend_params -def _validate_backend_params(backend, beparam, project): +def _validate_backend_params(backend: str, beparam: str, project: AnnifProject) -> None: if backend != project.config["backend"]: raise ConfigurationException( 'The backend {} in CLI option "-b {}" not matching the project' @@ -197,13 +224,15 @@ def _validate_backend_params(backend, beparam, project): ) -def generate_filter_params(filter_batch_max_limit): +def generate_filter_params(filter_batch_max_limit: int) -> list[tuple[int, float]]: limits = range(1, filter_batch_max_limit + 1) thresholds = [i * 0.05 for i in range(20)] return list(itertools.product(limits, thresholds)) -def _get_completion_choices(param): +def _get_completion_choices( + param: Argument, +) -> dict[str, AnnifVocabulary] | dict[str, AnnifProject] | list: if param.name == "project_id": return annif.registry.get_projects() elif param.name == "vocab_id": @@ -212,7 +241,7 @@ def _get_completion_choices(param): return [] -def complete_param(ctx, param, incomplete): +def complete_param(ctx: Context, param: Argument, incomplete: str) -> list[str]: with ctx.obj.load_app().app_context(): return [ choice diff --git a/annif/config.py b/annif/config.py index 589b337a3..ab8f0d568 100644 --- a/annif/config.py +++ b/annif/config.py @@ -1,5 +1,5 @@ """Configuration file handling""" - +from __future__ import annotations import configparser import os.path @@ -17,7 +17,7 @@ class AnnifConfigCFG: """Class for reading configuration in CFG/INI format""" - def __init__(self, filename): + def __init__(self, filename: str) -> None: self._config = configparser.ConfigParser() self._config.optionxform = annif.util.identity with open(filename, encoding="utf-8-sig") as projf: @@ -28,20 +28,20 @@ def __init__(self, filename): configparser.DuplicateOptionError, configparser.DuplicateSectionError, ) as err: - raise ConfigurationException(err) + raise ConfigurationException(err.message) @property - def project_ids(self): + def project_ids(self) -> list[str]: return self._config.sections() - def __getitem__(self, key): + def __getitem__(self, key: str) -> configparser.SectionProxy: return self._config[key] class AnnifConfigTOML: """Class for reading configuration in TOML format""" - def __init__(self, filename): + def __init__(self, filename: str) -> None: with open(filename, "rb") as projf: try: logger.debug(f"Reading configuration file {filename} in TOML format") @@ -55,14 +55,14 @@ def __init__(self, filename): def project_ids(self): return self._config.keys() - def __getitem__(self, key): + def __getitem__(self, key: str) -> dict[str, str]: return self._config[key] class AnnifConfigDirectory: """Class for reading configuration from directory""" - def __init__(self, directory): + def __init__(self, directory: str) -> None: files = glob(os.path.join(directory, "*.cfg")) files.extend(glob(os.path.join(directory, "*.toml"))) logger.debug(f"Reading configuration files in directory {directory}") @@ -74,7 +74,7 @@ def __init__(self, directory): self._check_duplicate_project_ids(proj_id, file) self._config[proj_id] = source_config[proj_id] - def _check_duplicate_project_ids(self, proj_id, file): + def _check_duplicate_project_ids(self, proj_id: str, file: str) -> None: if proj_id in self._config: # Error message resembles configparser's DuplicateSection message raise ConfigurationException( @@ -86,11 +86,11 @@ def _check_duplicate_project_ids(self, proj_id, file): def project_ids(self): return self._config.keys() - def __getitem__(self, key): + def __getitem__(self, key: str) -> dict[str, str] | configparser.SectionProxy: return self._config[key] -def check_config(projects_config_path): +def check_config(projects_config_path: str) -> str | None: if os.path.exists(projects_config_path): return projects_config_path else: @@ -104,7 +104,7 @@ def check_config(projects_config_path): return None -def find_config(): +def find_config() -> str | None: for path in ("projects.cfg", "projects.toml", "projects.d"): if os.path.exists(path): return path @@ -119,7 +119,9 @@ def find_config(): return None -def parse_config(projects_config_path): +def parse_config( + projects_config_path: str, +) -> AnnifConfigDirectory | AnnifConfigCFG | AnnifConfigTOML | None: if projects_config_path: projects_config_path = check_config(projects_config_path) else: diff --git a/annif/corpus/combine.py b/annif/corpus/combine.py index 48fc83ff5..75fcc7f55 100644 --- a/annif/corpus/combine.py +++ b/annif/corpus/combine.py @@ -1,19 +1,24 @@ """Class for combining multiple corpora so they behave like a single corpus""" +from __future__ import annotations import itertools +from typing import TYPE_CHECKING from .types import DocumentCorpus +if TYPE_CHECKING: + from annif.corpus.document import DocumentFile + class CombinedCorpus(DocumentCorpus): """Class for combining multiple corpora so they behave like a single corpus""" - def __init__(self, corpora): + def __init__(self, corpora: list[DocumentFile]) -> None: self._corpora = corpora @property - def documents(self): + def documents(self) -> itertools.chain: return itertools.chain.from_iterable( [corpus.documents for corpus in self._corpora] ) diff --git a/annif/corpus/document.py b/annif/corpus/document.py index 54a0a3ba6..09a80a309 100644 --- a/annif/corpus/document.py +++ b/annif/corpus/document.py @@ -1,29 +1,42 @@ """Clases for supporting document corpora""" +from __future__ import annotations import glob import gzip import os.path import re from itertools import islice +from typing import TYPE_CHECKING import annif.util from .subject import SubjectSet from .types import Document, DocumentCorpus +if TYPE_CHECKING: + from collections.abc import Iterator + + from annif.corpus.subject import SubjectIndex + logger = annif.logger class DocumentDirectory(DocumentCorpus): """A directory of files as a full text document corpus""" - def __init__(self, path, subject_index=None, language=None, require_subjects=False): + def __init__( + self, + path: str, + subject_index: SubjectIndex | None = None, + language: str | None = None, + require_subjects: bool = False, + ) -> None: self.path = path self.subject_index = subject_index self.language = language self.require_subjects = require_subjects - def __iter__(self): + def __iter__(self) -> Iterator[tuple[str, str] | tuple[str, None]]: """Iterate through the directory, yielding tuples of (docfile, subjectfile) containing file paths. If require_subjects is False, the subjectfile will be returned as None.""" @@ -42,7 +55,7 @@ def __iter__(self): yield (filename, None) @property - def documents(self): + def documents(self) -> Iterator[Document]: for docfilename, subjfilename in self: with open(docfilename, errors="replace", encoding="utf-8-sig") as docfile: text = docfile.read() @@ -59,12 +72,12 @@ def documents(self): class DocumentFile(DocumentCorpus): """A TSV file as a corpus of documents with subjects""" - def __init__(self, path, subject_index): + def __init__(self, path: str, subject_index: SubjectIndex) -> None: self.path = path self.subject_index = subject_index @property - def documents(self): + def documents(self) -> Iterator[Document]: if self.path.endswith(".gz"): opener = gzip.open else: @@ -73,7 +86,7 @@ def documents(self): for line in tsvfile: yield from self._parse_tsv_line(line) - def _parse_tsv_line(self, line): + def _parse_tsv_line(self, line: str) -> Iterator[Document]: if "\t" in line: text, uris = line.split("\t", maxsplit=1) subject_ids = { diff --git a/annif/corpus/skos.py b/annif/corpus/skos.py index f29eee32d..462a35241 100644 --- a/annif/corpus/skos.py +++ b/annif/corpus/skos.py @@ -1,8 +1,10 @@ """Support for subjects loaded from a SKOS/RDF file""" +from __future__ import annotations import collections import os.path import shutil +from typing import TYPE_CHECKING import rdflib import rdflib.util @@ -12,8 +14,13 @@ from .types import Subject, SubjectCorpus +if TYPE_CHECKING: + from collections.abc import Iterator, Sequence -def serialize_subjects_to_skos(subjects, path): + from rdflib.term import URIRef + + +def serialize_subjects_to_skos(subjects: Iterator, path: str) -> None: """Create a SKOS representation of the given subjects and serialize it into a SKOS/Turtle file with the given path name.""" import joblib @@ -51,7 +58,7 @@ class SubjectFileSKOS(SubjectCorpus): _languages = None - def __init__(self, path): + def __init__(self, path: str) -> None: self.path = path if path.endswith(".dump.gz"): import joblib @@ -62,7 +69,7 @@ def __init__(self, path): self.graph.parse(self.path, format=rdflib.util.guess_format(self.path)) @property - def languages(self): + def languages(self) -> set[str]: if self._languages is None: self._languages = { label.language @@ -73,7 +80,7 @@ def languages(self): } return self._languages - def _concept_labels(self, concept): + def _concept_labels(self, concept: URIRef) -> dict[str, str]: by_lang = self.get_concept_labels(concept, self.PREF_LABEL_PROPERTIES) return { lang: by_lang[lang][0] @@ -85,7 +92,7 @@ def _concept_labels(self, concept): } @property - def subjects(self): + def subjects(self) -> Iterator[Subject]: for concept in self.concepts: labels = self._concept_labels(concept) @@ -96,13 +103,17 @@ def subjects(self): yield Subject(uri=str(concept), labels=labels, notation=notation) @property - def concepts(self): + def concepts(self) -> Iterator[URIRef]: for concept in self.graph.subjects(RDF.type, SKOS.Concept): if (concept, OWL.deprecated, rdflib.Literal(True)) in self.graph: continue yield concept - def get_concept_labels(self, concept, label_types): + def get_concept_labels( + self, + concept: URIRef, + label_types: Sequence[URIRef], + ) -> collections.defaultdict[str | None, list[str]]: """return all the labels of the given concept with the given label properties as a dict-like object where the keys are language codes and the values are lists of labels in that language""" @@ -115,14 +126,14 @@ def get_concept_labels(self, concept, label_types): return labels_by_lang @staticmethod - def is_rdf_file(path): + def is_rdf_file(path: str) -> bool: """return True if the path looks like an RDF file that can be loaded as SKOS""" fmt = rdflib.util.guess_format(path) return fmt is not None - def save_skos(self, path): + def save_skos(self, path: str) -> None: """Save the contents of the subject vocabulary into a SKOS/Turtle file with the given path name.""" @@ -139,5 +150,5 @@ def save_skos(self, path): annif.util.atomic_save( self.graph, *os.path.split(path.replace(".ttl", ".dump.gz")), - method=joblib.dump + method=joblib.dump, ) diff --git a/annif/corpus/subject.py b/annif/corpus/subject.py index 06c33683b..a9ee06397 100644 --- a/annif/corpus/subject.py +++ b/annif/corpus/subject.py @@ -1,7 +1,9 @@ """Classes for supporting subject corpora expressed as directories or files""" +from __future__ import annotations import csv import os.path +from typing import TYPE_CHECKING, Any import annif import annif.util @@ -9,6 +11,11 @@ from .skos import serialize_subjects_to_skos from .types import Subject, SubjectCorpus +if TYPE_CHECKING: + from collections.abc import Generator, Iterator + + import numpy as np + logger = annif.logger.getChild("subject") logger.addFilter(annif.util.DuplicateFilter()) @@ -16,14 +23,14 @@ class SubjectFileTSV(SubjectCorpus): """A monolingual subject vocabulary stored in a TSV file.""" - def __init__(self, path, language): + def __init__(self, path: str, language: str) -> None: """initialize the SubjectFileTSV given a path to a TSV file and the language of the vocabulary""" self.path = path self.language = language - def _parse_line(self, line): + def _parse_line(self, line: str) -> Iterator[Subject]: vals = line.strip().split("\t", 2) clean_uri = annif.util.cleanup_uri(vals[0]) label = vals[1] if len(vals) >= 2 else None @@ -32,16 +39,16 @@ def _parse_line(self, line): yield Subject(uri=clean_uri, labels=labels, notation=notation) @property - def languages(self): + def languages(self) -> list[str]: return [self.language] @property - def subjects(self): + def subjects(self) -> Generator: with open(self.path, encoding="utf-8-sig") as subjfile: for line in subjfile: yield from self._parse_line(line) - def save_skos(self, path): + def save_skos(self, path: str) -> None: """Save the contents of the subject vocabulary into a SKOS/Turtle file with the given path name.""" serialize_subjects_to_skos(self.subjects, path) @@ -50,11 +57,11 @@ def save_skos(self, path): class SubjectFileCSV(SubjectCorpus): """A multilingual subject vocabulary stored in a CSV file.""" - def __init__(self, path): + def __init__(self, path: str) -> None: """initialize the SubjectFileCSV given a path to a CSV file""" self.path = path - def _parse_row(self, row): + def _parse_row(self, row: dict[str, str]) -> Iterator[Subject]: labels = { fname.replace("label_", ""): value or None for fname, value in row.items() @@ -73,7 +80,7 @@ def _parse_row(self, row): ) @property - def languages(self): + def languages(self) -> list[str]: # infer the supported languages from the CSV column names with open(self.path, encoding="utf-8-sig") as csvfile: reader = csv.reader(csvfile) @@ -86,19 +93,19 @@ def languages(self): ] @property - def subjects(self): + def subjects(self) -> Generator: with open(self.path, encoding="utf-8-sig") as csvfile: reader = csv.DictReader(csvfile) for row in reader: yield from self._parse_row(row) - def save_skos(self, path): + def save_skos(self, path: str) -> None: """Save the contents of the subject vocabulary into a SKOS/Turtle file with the given path name.""" serialize_subjects_to_skos(self.subjects, path) @staticmethod - def is_csv_file(path): + def is_csv_file(path: str) -> bool: """return True if the path looks like a CSV file""" return os.path.splitext(path)[1].lower() == ".csv" @@ -108,30 +115,30 @@ class SubjectIndex: """An index that remembers the associations between integers subject IDs and their URIs and labels.""" - def __init__(self): + def __init__(self) -> None: self._subjects = [] self._uri_idx = {} self._label_idx = {} self._languages = None - def load_subjects(self, corpus): + def load_subjects(self, corpus: SubjectCorpus) -> None: """Initialize the subject index from a subject corpus""" self._languages = corpus.languages for subject in corpus.subjects: self.append(subject) - def __len__(self): + def __len__(self) -> int: return len(self._subjects) @property - def languages(self): + def languages(self) -> list[str] | None: return self._languages - def __getitem__(self, subject_id): + def __getitem__(self, subject_id: int) -> Subject: return self._subjects[subject_id] - def append(self, subject): + def append(self, subject: Subject) -> None: if self._languages is None and subject.labels is not None: self._languages = list(subject.labels.keys()) @@ -142,10 +149,10 @@ def append(self, subject): self._label_idx[(label, lang)] = subject_id self._subjects.append(subject) - def contains_uri(self, uri): + def contains_uri(self, uri: str) -> bool: return uri in self._uri_idx - def by_uri(self, uri, warnings=True): + def by_uri(self, uri: str, warnings: bool = True) -> int | None: """return the subject ID of a subject by its URI, or None if not found. If warnings=True, log a warning message if the URI cannot be found.""" try: @@ -155,7 +162,7 @@ def by_uri(self, uri, warnings=True): logger.warning("Unknown subject URI <%s>", uri) return None - def by_label(self, label, language): + def by_label(self, label: str | None, language: str) -> int | None: """return the subject ID of a subject by its label in a given language""" try: @@ -164,7 +171,7 @@ def by_label(self, label, language): logger.warning('Unknown subject label "%s"@%s', label, language) return None - def deprecated_ids(self): + def deprecated_ids(self) -> list[int]: """return indices of deprecated subjects""" return [ @@ -174,7 +181,7 @@ def deprecated_ids(self): ] @property - def active(self): + def active(self) -> list[tuple[int, Subject]]: """return a list of (subject_id, subject) tuples of all subjects that are not deprecated""" @@ -184,7 +191,7 @@ def active(self): if subject.labels is not None ] - def save(self, path): + def save(self, path: str) -> None: """Save this subject index into a file with the given path name.""" fieldnames = ["uri", "notation"] + [f"label_{lang}" for lang in self._languages] @@ -200,7 +207,7 @@ def save(self, path): writer.writerow(row) @classmethod - def load(cls, path): + def load(cls, path: str) -> SubjectIndex: """Load a subject index from a CSV file and return it.""" corpus = SubjectFileCSV(path) @@ -212,7 +219,7 @@ def load(cls, path): class SubjectSet: """Represents a set of subjects for a document.""" - def __init__(self, subject_ids=None): + def __init__(self, subject_ids: Any | None = None) -> None: """Create a SubjectSet and optionally initialize it from an iterable of subject IDs""" @@ -224,23 +231,25 @@ def __init__(self, subject_ids=None): else: self._subject_ids = [] - def __len__(self): + def __len__(self) -> int: return len(self._subject_ids) - def __getitem__(self, idx): + def __getitem__(self, idx: int) -> int: return self._subject_ids[idx] - def __bool__(self): + def __bool__(self) -> bool: return bool(self._subject_ids) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, SubjectSet): return self._subject_ids == other._subject_ids return False @classmethod - def from_string(cls, subj_data, subject_index, language): + def from_string( + cls, subj_data: str, subject_index: SubjectIndex, language: str + ) -> SubjectSet: subject_ids = set() for line in subj_data.splitlines(): uri, label = cls._parse_line(line) @@ -251,7 +260,9 @@ def from_string(cls, subj_data, subject_index, language): return cls(subject_ids) @staticmethod - def _parse_line(line): + def _parse_line( + line: str, + ) -> tuple[str | None, str | None]: uri = label = None vals = line.split("\t") for val in vals: @@ -265,7 +276,9 @@ def _parse_line(line): break return uri, label - def as_vector(self, size=None, destination=None): + def as_vector( + self, size: int | None = None, destination: np.ndarray | None = None + ) -> np.ndarray: """Return the hits as a one-dimensional NumPy array in sklearn multilabel indicator format. Use destination array if given (not None), otherwise create and return a new one of the given size.""" diff --git a/annif/corpus/types.py b/annif/corpus/types.py index fb607fdc7..e6cd4b252 100644 --- a/annif/corpus/types.py +++ b/annif/corpus/types.py @@ -1,4 +1,5 @@ """Basic types for document and subject corpora""" +from __future__ import annotations import abc import collections @@ -19,7 +20,7 @@ def documents(self): pass # pragma: no cover @property - def doc_batches(self): + def doc_batches(self) -> collections.abc.Iterator[list[Document]]: """Iterate through the document corpus in batches, yielding lists of Document objects.""" it = iter(self.documents) @@ -29,7 +30,7 @@ def doc_batches(self): return yield docs_batch - def is_empty(self): + def is_empty(self) -> bool: """Check if there are no documents to iterate.""" try: next(self.documents) diff --git a/annif/datadir.py b/annif/datadir.py index 314f685b1..752da32dd 100644 --- a/annif/datadir.py +++ b/annif/datadir.py @@ -1,4 +1,5 @@ """Mixin class for types that need a data directory""" +from __future__ import annotations import os import os.path @@ -7,11 +8,11 @@ class DatadirMixin: """Mixin class for types that need a data directory for storing files""" - def __init__(self, datadir, typename, identifier): + def __init__(self, datadir: str, typename: str, identifier: str) -> None: self._datadir_path = os.path.join(datadir, typename, identifier) @property - def datadir(self): + def datadir(self) -> str: if not os.path.exists(self._datadir_path): try: os.makedirs(self._datadir_path) diff --git a/annif/eval.py b/annif/eval.py index 264bcad43..5ec5bd17a 100644 --- a/annif/eval.py +++ b/annif/eval.py @@ -1,6 +1,8 @@ """Evaluation metrics for Annif""" +from __future__ import annotations import warnings +from typing import TYPE_CHECKING import numpy as np import scipy.sparse @@ -9,26 +11,38 @@ from annif.exception import NotSupportedException from annif.suggestion import SuggestionBatch, filter_suggestion +if TYPE_CHECKING: + from collections.abc import Iterable, Iterator, Sequence + from io import TextIOWrapper -def true_positives(y_true, y_pred): + from click.utils import LazyFile + from scipy.sparse._arrays import csr_array + + from annif.corpus.subject import SubjectIndex, SubjectSet + from annif.suggestion import SubjectSuggestion + + +def true_positives(y_true: csr_array, y_pred: csr_array) -> int: """calculate the number of true positives using bitwise operations, emulating the way sklearn evaluation metric functions work""" return int((y_true.multiply(y_pred)).sum()) -def false_positives(y_true, y_pred): +def false_positives(y_true: csr_array, y_pred: csr_array) -> int: """calculate the number of false positives using bitwise operations, emulating the way sklearn evaluation metric functions work""" return int((y_true < y_pred).sum()) -def false_negatives(y_true, y_pred): +def false_negatives(y_true: csr_array, y_pred: csr_array) -> int: """calculate the number of false negatives using bitwise operations, emulating the way sklearn evaluation metric functions work""" return int((y_true > y_pred).sum()) -def dcg_score(y_true, y_pred, limit=None): +def dcg_score( + y_true: csr_array, y_pred: csr_array, limit: int | None = None +) -> np.float64: """return the discounted cumulative gain (DCG) score for the selected labels vs. relevant labels""" @@ -43,7 +57,7 @@ def dcg_score(y_true, y_pred, limit=None): return (gain / discount).sum() -def ndcg_score(y_true, y_pred, limit=None): +def ndcg_score(y_true: csr_array, y_pred: csr_array, limit: int | None = None) -> float: """return the normalized discounted cumulative gain (nDCG) score for the selected labels vs. relevant labels""" @@ -65,12 +79,18 @@ class EvaluationBatch: for a list of documents of the batch. Final results can be queried using the results() method.""" - def __init__(self, subject_index): + def __init__(self, subject_index: SubjectIndex) -> None: self._subject_index = subject_index self._suggestion_arrays = [] self._gold_subject_arrays = [] - def evaluate_many(self, suggestion_batch, gold_subject_batch): + def evaluate_many( + self, + suggestion_batch: list[list[SubjectSuggestion]] + | SuggestionBatch + | list[Iterator], + gold_subject_batch: Sequence[SubjectSet], + ) -> None: if not isinstance(suggestion_batch, SuggestionBatch): suggestion_batch = SuggestionBatch.from_sequence( suggestion_batch, self._subject_index @@ -86,7 +106,12 @@ def evaluate_many(self, suggestion_batch, gold_subject_batch): ar[idx, subject_id] = True self._gold_subject_arrays.append(ar.tocsr()) - def _evaluate_samples(self, y_true, y_pred, metrics=[]): + def _evaluate_samples( + self, + y_true: csr_array, + y_pred: csr_array, + metrics: Iterable[str] = [], + ) -> dict[str, float]: y_pred_binary = y_pred > 0.0 # define the available metrics as lazy lambda functions @@ -156,7 +181,9 @@ def _evaluate_samples(self, y_true, y_pred, metrics=[]): return {metric: all_metrics[metric]() for metric in metrics} - def _result_per_subject_header(self, results_file): + def _result_per_subject_header( + self, results_file: LazyFile | TextIOWrapper + ) -> None: print( "\t".join( [ @@ -174,11 +201,19 @@ def _result_per_subject_header(self, results_file): file=results_file, ) - def _result_per_subject_body(self, zipped_results, results_file): + def _result_per_subject_body( + self, zipped_results: zip, results_file: LazyFile | TextIOWrapper + ) -> None: for row in zipped_results: print("\t".join((str(e) for e in row)), file=results_file) - def output_result_per_subject(self, y_true, y_pred, results_file, language): + def output_result_per_subject( + self, + y_true: csr_array, + y_pred: csr_array, + results_file: TextIOWrapper | LazyFile, + language: str, + ) -> None: """Write results per subject (non-aggregated) to outputfile results_file, using labels in the given language""" @@ -208,7 +243,12 @@ def output_result_per_subject(self, y_true, y_pred, results_file, language): self._result_per_subject_header(results_file) self._result_per_subject_body(zipped, results_file) - def results(self, metrics=[], results_file=None, language=None): + def results( + self, + metrics: Iterable[str] = [], + results_file: LazyFile | TextIOWrapper | None = None, + language: str | None = None, + ) -> dict[str, float]: """evaluate a set of selected subjects against a gold standard using different metrics. If metrics is empty, use all available metrics. If results_file (file object) given, write results per subject to it diff --git a/annif/exception.py b/annif/exception.py index efc2d4a3e..b4b9c6552 100644 --- a/annif/exception.py +++ b/annif/exception.py @@ -1,5 +1,5 @@ """Custom exceptions used by Annif""" - +from __future__ import annotations from click import ClickException @@ -9,7 +9,12 @@ class AnnifException(ClickException): that the CLI can automatically handle exceptions. This exception cannot be instantiated directly - subclasses should be used instead.""" - def __init__(self, message, project_id=None, backend_id=None): + def __init__( + self, + message: str, + project_id: str | None = None, + backend_id: str | None = None, + ) -> None: super().__init__(message) self.project_id = project_id self.backend_id = backend_id @@ -20,7 +25,7 @@ def __init__(self, message, project_id=None, backend_id=None): # subclasses should set this to a descriptive prefix prefix = None - def format_message(self): + def format_message(self) -> str: if self.project_id is not None: return "{} project '{}': {}".format( self.prefix, self.project_id, self.message diff --git a/annif/lexical/mllm.py b/annif/lexical/mllm.py index 8c9b59f79..37564a76d 100644 --- a/annif/lexical/mllm.py +++ b/annif/lexical/mllm.py @@ -1,9 +1,11 @@ """MLLM (Maui-like Lexical Matchin) model for Annif""" +from __future__ import annotations import collections import math from enum import IntEnum from statistics import mean +from typing import TYPE_CHECKING, Any import joblib import numpy as np @@ -22,6 +24,16 @@ make_relation_matrix, ) +if TYPE_CHECKING: + from collections import defaultdict + + from rdflib.graph import Graph + from rdflib.term import URIRef + + from annif.analyzer import Analyzer + from annif.corpus.document import DocumentCorpus + from annif.vocab import AnnifVocabulary + Term = collections.namedtuple("Term", "subject_id label is_pref") Match = collections.namedtuple("Match", "subject_id is_pref n_tokens pos ambiguity") @@ -45,7 +57,7 @@ ) -def conflate_matches(matches, doc_length): +def conflate_matches(matches: list[Match], doc_length: int) -> list[Candidate]: subj_matches = collections.defaultdict(list) for match in matches: subj_matches[match.subject_id].append(match) @@ -65,7 +77,12 @@ def conflate_matches(matches, doc_length): ] -def generate_candidates(text, analyzer, vectorizer, index): +def generate_candidates( + text: str, + analyzer: Analyzer, + vectorizer: CountVectorizer, + index: TokenSetIndex, +) -> list[Candidate]: sentences = analyzer.tokenize_sentences(text) sent_tokens = vectorizer.transform(sentences) matches = [] @@ -86,7 +103,9 @@ def generate_candidates(text, analyzer, vectorizer, index): return conflate_matches(matches, len(sentences)) -def candidates_to_features(candidates, mdata): +def candidates_to_features( + candidates: list[Candidate], mdata: "ModelData" +) -> np.ndarray: """Convert a list of Candidates to a NumPy feature matrix""" matrix = np.zeros((len(candidates), len(Feature)), dtype=np.float32) @@ -133,11 +152,11 @@ def candidates_to_features(cls, candidates): class MLLMModel: """Maui-like Lexical Matching model""" - def generate_candidates(self, text, analyzer): + def generate_candidates(self, text: str, analyzer: Analyzer) -> list[Candidate]: return generate_candidates(text, analyzer, self._vectorizer, self._index) @property - def _model_data(self): + def _model_data(self) -> ModelData: return ModelData( broader=self._broader_matrix, narrower=self._narrower_matrix, @@ -148,11 +167,11 @@ def _model_data(self): idf=self._idf, ) - def _candidates_to_features(self, candidates): + def _candidates_to_features(self, candidates: list[Candidate]) -> np.ndarray: return candidates_to_features(candidates, self._model_data) @staticmethod - def _get_label_props(params): + def _get_label_props(params: dict[str, Any]) -> tuple[list[URIRef], list[URIRef]]: pref_label_props = [SKOS.prefLabel] if annif.util.boolean(params["use_hidden_labels"]): @@ -162,7 +181,12 @@ def _get_label_props(params): return (pref_label_props, nonpref_label_props) - def _prepare_terms(self, graph, vocab, params): + def _prepare_terms( + self, + graph: Graph, + vocab: AnnifVocabulary, + params: dict[str, Any], + ) -> tuple[list[Term], list[int]]: pref_label_props, nonpref_label_props = self._get_label_props(params) terms = [] @@ -182,13 +206,18 @@ def _prepare_terms(self, graph, vocab, params): return (terms, subject_ids) - def _prepare_relations(self, graph, vocab): + def _prepare_relations(self, graph: Graph, vocab: AnnifVocabulary) -> None: self._broader_matrix = make_relation_matrix(graph, vocab, SKOS.broader) self._narrower_matrix = make_relation_matrix(graph, vocab, SKOS.narrower) self._related_matrix = make_relation_matrix(graph, vocab, SKOS.related) self._collection_matrix = make_collection_matrix(graph, vocab) - def _prepare_train_index(self, vocab, analyzer, params): + def _prepare_train_index( + self, + vocab: AnnifVocabulary, + analyzer: Analyzer, + params: dict[str, Any], + ) -> list[int]: graph = vocab.as_graph() terms, subject_ids = self._prepare_terms(graph, vocab, params) self._prepare_relations(graph, vocab) @@ -211,7 +240,9 @@ def _prepare_train_index(self, vocab, analyzer, params): return subject_ids - def _prepare_train_data(self, corpus, analyzer, n_jobs): + def _prepare_train_data( + self, corpus: DocumentCorpus, analyzer: Analyzer, n_jobs: int + ) -> tuple[list[list[Candidate]], list[bool]]: # frequency of subjects (by id) in the generated candidates self._doc_freq = collections.Counter() # frequency of manually assigned subjects ("domain keyphraseness") @@ -241,14 +272,18 @@ def _prepare_train_data(self, corpus, analyzer, n_jobs): return (train_x, train_y) - def _calculate_idf(self, subject_ids, doc_count): + def _calculate_idf( + self, subject_ids: list[int], doc_count: int + ) -> defaultdict[int, float]: idf = collections.defaultdict(float) for subj_id in subject_ids: idf[subj_id] = math.log((doc_count + 1) / (self._doc_freq[subj_id] + 1)) + 1 return idf - def _prepare_features(self, train_x, n_jobs): + def _prepare_features( + self, train_x: list[list[Candidate]], n_jobs: int + ) -> list[np.ndarray]: fc_args = {"mdata": self._model_data} jobs, pool_class = annif.parallel.get_pool(n_jobs) @@ -261,7 +296,14 @@ def _prepare_features(self, train_x, n_jobs): return features - def prepare_train(self, corpus, vocab, analyzer, params, n_jobs): + def prepare_train( + self, + corpus: DocumentCorpus, + vocab: AnnifVocabulary, + analyzer: Analyzer, + params: dict[str, Any], + n_jobs: int, + ) -> tuple[np.ndarray, np.ndarray]: # create an index from the vocabulary terms subject_ids = self._prepare_train_index(vocab, analyzer, params) @@ -276,7 +318,7 @@ def prepare_train(self, corpus, vocab, analyzer, params, n_jobs): return (np.vstack(features), np.array(train_y)) - def _create_classifier(self, params): + def _create_classifier(self, params: dict[str, Any]) -> BaggingClassifier: return BaggingClassifier( DecisionTreeClassifier( min_samples_leaf=int(params["min_samples_leaf"]), @@ -285,7 +327,12 @@ def _create_classifier(self, params): max_samples=float(params["max_samples"]), ) - def train(self, train_x, train_y, params): + def train( + self, + train_x: np.ndarray | list[tuple[int, int]], + train_y: list[bool] | np.ndarray, + params: dict[str, Any], + ) -> None: # fit the model on the training corpus self._classifier = self._create_classifier(params) self._classifier.fit(train_x, train_y) @@ -298,20 +345,22 @@ def train(self, train_x, train_y, params): + "data matches your vocabulary." ) - def _prediction_to_list(self, scores, candidates): + def _prediction_to_list( + self, scores: np.ndarray, candidates: list[Candidate] + ) -> list[tuple[np.float64, int]]: subj_scores = [(score[1], c.subject_id) for score, c in zip(scores, candidates)] return sorted(subj_scores, reverse=True) - def predict(self, candidates): + def predict(self, candidates: list[Candidate]) -> list[tuple[np.float64, int]]: if not candidates: return [] features = self._candidates_to_features(candidates) scores = self._classifier.predict_proba(features) return self._prediction_to_list(scores, candidates) - def save(self, filename): + def save(self, filename: str) -> list[str]: return joblib.dump(self, filename) @staticmethod - def load(filename): + def load(filename: str) -> MLLMModel: return joblib.load(filename) diff --git a/annif/lexical/tokenset.py b/annif/lexical/tokenset.py index ebd23e33f..07c15705d 100644 --- a/annif/lexical/tokenset.py +++ b/annif/lexical/tokenset.py @@ -1,6 +1,11 @@ """Index for fast matching of token sets.""" +from __future__ import annotations import collections +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from numpy import ndarray class TokenSet: @@ -8,19 +13,24 @@ class TokenSet: be matched with another set of tokens. A TokenSet can optionally be associated with a subject from the vocabulary.""" - def __init__(self, tokens, subject_id=None, is_pref=False): + def __init__( + self, + tokens: ndarray, + subject_id: int | None = None, + is_pref: bool = False, + ) -> None: self._tokens = set(tokens) self.key = tokens[0] if len(tokens) else None self.subject_id = subject_id self.is_pref = is_pref - def __len__(self): + def __len__(self) -> int: return len(self._tokens) def __iter__(self): return iter(self._tokens) - def contains(self, other): + def contains(self, other: TokenSet) -> bool: """Returns True iff the tokens in the other TokenSet are all included within this TokenSet.""" @@ -30,18 +40,18 @@ def contains(self, other): class TokenSetIndex: """A searchable index of TokenSets (representing vocabulary terms)""" - def __init__(self): + def __init__(self) -> None: self._index = collections.defaultdict(set) - def __len__(self): + def __len__(self) -> int: return len(self._index) - def add(self, tset): + def add(self, tset: TokenSet) -> None: """Add a TokenSet into this index""" if tset.key is not None: self._index[tset.key].add(tset) - def _find_subj_tsets(self, tset): + def _find_subj_tsets(self, tset: TokenSet) -> dict[int | None, TokenSet]: """return a dict (subject_id : TokenSet) of matches contained in the given TokenSet""" @@ -75,7 +85,7 @@ def _find_subj_ambiguity(self, tsets): return subj_ambiguity - def search(self, tset): + def search(self, tset: TokenSet) -> list[tuple[TokenSet, int]]: """Return the TokenSets that are contained in the given TokenSet. The matches are returned as a list of (TokenSet, ambiguity) pairs where ambiguity is an integer indicating the number of other TokenSets diff --git a/annif/lexical/util.py b/annif/lexical/util.py index a6d9931c7..28d21a141 100644 --- a/annif/lexical/util.py +++ b/annif/lexical/util.py @@ -1,13 +1,22 @@ """Utility methods for lexical algorithms""" +from __future__ import annotations import collections +from typing import TYPE_CHECKING from rdflib import URIRef from rdflib.namespace import SKOS from scipy.sparse import csc_matrix, lil_matrix +if TYPE_CHECKING: + from rdflib.graph import Graph -def get_subject_labels(graph, uri, properties, language): + from annif.vocab import AnnifVocabulary + + +def get_subject_labels( + graph: Graph, uri: str, properties: list[URIRef], language: str +) -> list[str]: return [ str(label) for prop in properties @@ -16,7 +25,9 @@ def get_subject_labels(graph, uri, properties, language): ] -def make_relation_matrix(graph, vocab, property): +def make_relation_matrix( + graph: Graph, vocab: AnnifVocabulary, property: URIRef +) -> csc_matrix: n_subj = len(vocab.subjects) matrix = lil_matrix((n_subj, n_subj), dtype=bool) @@ -29,7 +40,7 @@ def make_relation_matrix(graph, vocab, property): return csc_matrix(matrix) -def make_collection_matrix(graph, vocab): +def make_collection_matrix(graph: Graph, vocab: AnnifVocabulary) -> csc_matrix: # make an index with all collection members c_members = collections.defaultdict(list) for coll, member in graph.subject_objects(SKOS.member): diff --git a/annif/openapi/validation.py b/annif/openapi/validation.py index 3799a6126..7f920b35d 100644 --- a/annif/openapi/validation.py +++ b/annif/openapi/validation.py @@ -1,4 +1,5 @@ """Custom validator for the Annif API.""" +from __future__ import annotations import logging @@ -14,10 +15,14 @@ class CustomRequestBodyValidator(decorators.validation.RequestBodyValidator): """Custom request body validator that overrides the default error message for the 'maxItems' validator for the 'documents' property.""" - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) - def validate_schema(self, data, url): + def validate_schema( + self, + data: list | dict, + url: str, + ) -> None: """Validate the request body against the schema.""" if self.is_null_value_valid and is_null(data): diff --git a/annif/parallel.py b/annif/parallel.py index 3162a47c5..c6b293f8e 100644 --- a/annif/parallel.py +++ b/annif/parallel.py @@ -1,8 +1,19 @@ """Parallel processing functionality for Annif""" - +from __future__ import annotations import multiprocessing import multiprocessing.dummy +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from collections import defaultdict + from collections.abc import Iterator + from typing import Callable + + from annif.corpus import Document, SubjectSet + from annif.registry import AnnifRegistry + from annif.suggestion import SuggestionBatch, SuggestionResult + # Start method for processes created by the multiprocessing module. # A value of None means using the platform-specific default. @@ -22,7 +33,7 @@ class BaseWorker: args = None @classmethod - def init(cls, args): + def init(cls, args) -> None: cls.args = args # pragma: no cover @@ -31,14 +42,21 @@ class ProjectSuggestMap: provide a mapping method that converts Document objects to suggestions. Intended to be used with the multiprocessing module.""" - def __init__(self, registry, project_ids, backend_params, limit, threshold): + def __init__( + self, + registry: AnnifRegistry, + project_ids: list[str], + backend_params: defaultdict[str, Any] | None, + limit: int | None, + threshold: float, + ) -> None: self.registry = registry self.project_ids = project_ids self.backend_params = backend_params self.limit = limit self.threshold = threshold - def suggest(self, doc): + def suggest(self, doc: Document) -> tuple[dict[str, SuggestionResult], SubjectSet]: filtered_hits = {} for project_id in self.project_ids: project = self.registry.get_project(project_id) @@ -46,7 +64,9 @@ def suggest(self, doc): filtered_hits[project_id] = batch.filter(self.limit, self.threshold)[0] return (filtered_hits, doc.subject_set) - def suggest_batch(self, batch): + def suggest_batch( + self, batch + ) -> tuple[dict[str, SuggestionBatch], Iterator[SubjectSet]]: filtered_hit_sets = {} texts, subject_sets = zip(*[(doc.text, doc.subject_set) for doc in batch]) @@ -57,19 +77,19 @@ def suggest_batch(self, batch): return (filtered_hit_sets, subject_sets) -def get_pool(n_jobs): - """return a suitable multiprocessing pool class, and the correct jobs - argument for its constructor, for the given amount of parallel jobs""" +def get_pool(n_jobs: int) -> tuple[int | None, Callable]: + """return a suitable constructor for multiprocessing pool class, and the correct + jobs argument for it, for the given amount of parallel jobs""" ctx = multiprocessing.get_context(MP_START_METHOD) if n_jobs < 1: n_jobs = None - pool_class = ctx.Pool + pool_constructor: Callable = ctx.Pool elif n_jobs == 1: # use the dummy wrapper around threading to avoid subprocess overhead - pool_class = multiprocessing.dummy.Pool + pool_constructor = multiprocessing.dummy.Pool else: - pool_class = ctx.Pool + pool_constructor = ctx.Pool - return n_jobs, pool_class + return n_jobs, pool_constructor diff --git a/annif/project.py b/annif/project.py index b94eaf58e..83f7eda7c 100644 --- a/annif/project.py +++ b/annif/project.py @@ -1,8 +1,10 @@ """Project management functionality for Annif""" +from __future__ import annotations import enum import os.path from shutil import rmtree +from typing import TYPE_CHECKING import annif import annif.analyzer @@ -17,6 +19,22 @@ NotSupportedException, ) +if TYPE_CHECKING: + from collections import defaultdict + from configparser import SectionProxy + from datetime import datetime + + from click.utils import LazyFile + + from annif.analyzer import Analyzer + from annif.backend import AnnifBackend + from annif.backend.hyperopt import HPRecommendation + from annif.corpus.document import DocumentCorpus + from annif.corpus.subject import SubjectIndex + from annif.registry import AnnifRegistry + from annif.transform.transform import TransformChain + from annif.vocab import AnnifVocabulary + logger = annif.logger @@ -42,7 +60,13 @@ class AnnifProject(DatadirMixin): # default values for configuration settings DEFAULT_ACCESS = "public" - def __init__(self, project_id, config, datadir, registry): + def __init__( + self, + project_id: str, + config: dict[str, str] | SectionProxy, + datadir: str, + registry: AnnifRegistry, + ) -> None: DatadirMixin.__init__(self, datadir, "projects", project_id) self.project_id = project_id self.name = config.get("name", project_id) @@ -55,7 +79,7 @@ def __init__(self, project_id, config, datadir, registry): self.registry = registry self._init_access() - def _init_access(self): + def _init_access(self) -> None: access = self.config.get("access", self.DEFAULT_ACCESS) try: self.access = getattr(Access, access) @@ -65,7 +89,7 @@ def _init_access(self): project_id=self.project_id, ) - def _initialize_analyzer(self): + def _initialize_analyzer(self) -> None: if not self.analyzer_spec: return # not configured, so assume it's not needed analyzer = self.analyzer @@ -73,7 +97,7 @@ def _initialize_analyzer(self): "Project '%s': initialized analyzer: %s", self.project_id, str(analyzer) ) - def _initialize_subjects(self): + def _initialize_subjects(self) -> None: try: subjects = self.subjects logger.debug( @@ -82,7 +106,7 @@ def _initialize_subjects(self): except AnnifException as err: logger.warning(err.format_message()) - def _initialize_backend(self, parallel): + def _initialize_backend(self, parallel: bool) -> None: logger.debug("Project '%s': initializing backend", self.project_id) try: if not self.backend: @@ -92,7 +116,7 @@ def _initialize_backend(self, parallel): except AnnifException as err: logger.warning(err.format_message()) - def initialize(self, parallel=False): + def initialize(self, parallel: bool = False) -> None: """Initialize this project and its backend so that they are ready to be used. If parallel is True, expect that the project will be used for parallel processing.""" @@ -108,14 +132,18 @@ def initialize(self, parallel=False): self.initialized = True - def _suggest_with_backend(self, texts, backend_params): + def _suggest_with_backend( + self, + texts: list[str], + backend_params: defaultdict[str, dict] | None, + ) -> annif.suggestion.SuggestionBatch: if backend_params is None: backend_params = {} beparams = backend_params.get(self.backend.backend_id, {}) return self.backend.suggest(texts, beparams) @property - def analyzer(self): + def analyzer(self) -> Analyzer: if self._analyzer is None: if self.analyzer_spec: self._analyzer = annif.analyzer.get_analyzer(self.analyzer_spec) @@ -126,7 +154,7 @@ def analyzer(self): return self._analyzer @property - def transform(self): + def transform(self) -> TransformChain: if self._transform is None: self._transform = annif.transform.get_transform( self.transform_spec, project=self @@ -134,7 +162,7 @@ def transform(self): return self._transform @property - def backend(self): + def backend(self) -> AnnifBackend | None: if self._backend is None: if "backend" not in self.config: raise ConfigurationException( @@ -154,7 +182,7 @@ def backend(self): ) return self._backend - def _initialize_vocab(self): + def _initialize_vocab(self) -> None: if self.vocab_spec is None: raise ConfigurationException( "vocab setting is missing", project_id=self.project_id @@ -164,22 +192,22 @@ def _initialize_vocab(self): ) @property - def vocab(self): + def vocab(self) -> AnnifVocabulary: if self._vocab is None: self._initialize_vocab() return self._vocab @property - def vocab_lang(self): + def vocab_lang(self) -> str: if self._vocab_lang is None: self._initialize_vocab() return self._vocab_lang @property - def subjects(self): + def subjects(self) -> SubjectIndex: return self.vocab.subjects - def _get_info(self, key): + def _get_info(self, key: str) -> bool | datetime | None: try: be = self.backend if be is not None: @@ -189,14 +217,18 @@ def _get_info(self, key): return None @property - def is_trained(self): + def is_trained(self) -> bool | None: return self._get_info("is_trained") @property - def modification_time(self): + def modification_time(self) -> datetime | None: return self._get_info("modification_time") - def suggest_corpus(self, corpus, backend_params=None): + def suggest_corpus( + self, + corpus: DocumentCorpus, + backend_params: defaultdict[str, dict] | None = None, + ) -> annif.suggestion.SuggestionResults: """Suggest subjects for the given documents corpus in batches of documents.""" suggestions = ( self.suggest([doc.text for doc in doc_batch], backend_params) @@ -206,7 +238,11 @@ def suggest_corpus(self, corpus, backend_params=None): return annif.suggestion.SuggestionResults(suggestions) - def suggest(self, texts, backend_params=None): + def suggest( + self, + texts: list[str], + backend_params: defaultdict[str, dict] | None = None, + ) -> annif.suggestion.SuggestionBatch: """Suggest subjects for the given documents batch.""" if not self.is_trained: if self.is_trained is None: @@ -216,7 +252,12 @@ def suggest(self, texts, backend_params=None): texts = [self.transform.transform_text(text) for text in texts] return self._suggest_with_backend(texts, backend_params) - def train(self, corpus, backend_params=None, jobs=0): + def train( + self, + corpus: DocumentCorpus, + backend_params: defaultdict[str, dict] | None = None, + jobs: int = 0, + ) -> None: """train the project using documents from a metadata source""" if corpus != "cached": corpus = self.transform.transform_corpus(corpus) @@ -225,7 +266,11 @@ def train(self, corpus, backend_params=None, jobs=0): beparams = backend_params.get(self.backend.backend_id, {}) self.backend.train(corpus, beparams, jobs) - def learn(self, corpus, backend_params=None): + def learn( + self, + corpus: DocumentCorpus, + backend_params: defaultdict[str, dict] | None = None, + ) -> None: """further train the project using documents from a metadata source""" if backend_params is None: backend_params = {} @@ -238,7 +283,14 @@ def learn(self, corpus, backend_params=None): "Learning not supported by backend", project_id=self.project_id ) - def hyperopt(self, corpus, trials, jobs, metric, results_file): + def hyperopt( + self, + corpus: DocumentCorpus, + trials: int, + jobs: int, + metric: str, + results_file: LazyFile | None, + ) -> HPRecommendation: """optimize the hyperparameters of the project using a validation corpus against a given metric""" if isinstance(self.backend, annif.backend.hyperopt.AnnifHyperoptBackend): @@ -250,7 +302,7 @@ def hyperopt(self, corpus, trials, jobs, metric, results_file): project_id=self.project_id, ) - def dump(self): + def dump(self) -> dict[str, str | dict | bool | datetime | None]: """return this project as a dict""" return { "project_id": self.project_id, @@ -261,7 +313,7 @@ def dump(self): "modification_time": self.modification_time, } - def remove_model_data(self): + def remove_model_data(self) -> None: """remove the data of this project""" datadir_path = self._datadir_path if os.path.isdir(datadir_path): diff --git a/annif/registry.py b/annif/registry.py index e0368b1e3..81bd541ef 100644 --- a/annif/registry.py +++ b/annif/registry.py @@ -1,9 +1,9 @@ """Registry that keeps track of Annif projects""" +from __future__ import annotations -import collections import re -from flask import current_app +from flask import Flask, current_app import annif from annif.config import parse_config @@ -28,7 +28,9 @@ class AnnifRegistry: _projects = {} _vocabs = {} - def __init__(self, projects_config_path, datadir, init_projects): + def __init__( + self, projects_config_path: str, datadir: str, init_projects: bool + ) -> None: self._rid = id(self) self._projects_config_path = projects_config_path self._datadir = datadir @@ -37,13 +39,13 @@ def __init__(self, projects_config_path, datadir, init_projects): for project in self._projects[self._rid].values(): project.initialize() - def _init_vars(self): + def _init_vars(self) -> None: # initialize the static variables, if necessary if self._rid not in self._projects: self._projects[self._rid] = self._create_projects() self._vocabs[self._rid] = {} - def _create_projects(self): + def _create_projects(self) -> dict: # parse the configuration config = parse_config(self._projects_config_path) @@ -52,14 +54,16 @@ def _create_projects(self): return {} # create AnnifProject objects from the configuration file - projects = collections.OrderedDict() + projects = dict() for project_id in config.project_ids: projects[project_id] = AnnifProject( project_id, config[project_id], self._datadir, self ) return projects - def get_projects(self, min_access=Access.private): + def get_projects( + self, min_access: Access = Access.private + ) -> dict[str, AnnifProject]: """Return the available projects as a dict of project_id -> AnnifProject. The min_access parameter may be used to set the minimum access level required for the returned projects.""" @@ -71,7 +75,9 @@ def get_projects(self, min_access=Access.private): if project.access >= min_access } - def get_project(self, project_id, min_access=Access.private): + def get_project( + self, project_id: str, min_access: Access = Access.private + ) -> AnnifProject: """return the definition of a single Project by project_id""" projects = self.get_projects(min_access) @@ -80,7 +86,9 @@ def get_project(self, project_id, min_access=Access.private): except KeyError: raise ValueError("No such project {}".format(project_id)) - def get_vocab(self, vocab_spec, default_language): + def get_vocab( + self, vocab_spec: str, default_language: str | None + ) -> tuple[AnnifVocabulary, None] | tuple[AnnifVocabulary, str]: """Return an (AnnifVocabulary, language) pair corresponding to the vocab_spec. If no language information is specified, use the given default language.""" @@ -101,14 +109,14 @@ def get_vocab(self, vocab_spec, default_language): return self._vocabs[self._rid][vocab_key], language -def initialize_projects(app): +def initialize_projects(app: Flask) -> None: projects_config_path = app.config["PROJECTS_CONFIG_PATH"] datadir = app.config["DATADIR"] init_projects = app.config["INITIALIZE_PROJECTS"] app.annif_registry = AnnifRegistry(projects_config_path, datadir, init_projects) -def get_projects(min_access=Access.private): +def get_projects(min_access: Access = Access.private) -> dict[str, AnnifProject]: """Return the available projects as a dict of project_id -> AnnifProject. The min_access parameter may be used to set the minimum access level required for the returned projects.""" @@ -118,7 +126,7 @@ def get_projects(min_access=Access.private): return current_app.annif_registry.get_projects(min_access) -def get_project(project_id, min_access=Access.private): +def get_project(project_id: str, min_access: Access = Access.private) -> AnnifProject: """return the definition of a single Project by project_id""" projects = get_projects(min_access) @@ -128,7 +136,7 @@ def get_project(project_id, min_access=Access.private): raise ValueError(f"No such project '{project_id}'") -def get_vocabs(min_access=Access.private): +def get_vocabs(min_access: Access = Access.private) -> dict[str, AnnifVocabulary]: """Return the available vocabularies as a dict of vocab_id -> AnnifVocabulary. The min_access parameter may be used to set the minimum access level required for the returned vocabularies.""" @@ -143,7 +151,7 @@ def get_vocabs(min_access=Access.private): return vocabs -def get_vocab(vocab_id, min_access=Access.private): +def get_vocab(vocab_id: str, min_access: Access = Access.private) -> AnnifVocabulary: """return a single AnnifVocabulary by vocabulary id""" vocabs = get_vocabs(min_access) diff --git a/annif/rest.py b/annif/rest.py index 0b3b87efe..f848117c8 100644 --- a/annif/rest.py +++ b/annif/rest.py @@ -1,7 +1,9 @@ """Definitions for REST API operations. These are wired via Connexion to methods defined in the OpenAPI specification.""" +from __future__ import annotations import importlib +from typing import TYPE_CHECKING, Any import connexion @@ -10,8 +12,16 @@ from annif.exception import AnnifException from annif.project import Access +if TYPE_CHECKING: + from datetime import datetime -def project_not_found_error(project_id): + from connexion.lifecycle import ConnexionResponse + + from annif.corpus.subject import SubjectIndex + from annif.suggestion import SubjectSuggestion, SuggestionResults + + +def project_not_found_error(project_id: str) -> ConnexionResponse: """return a Connexion error object when a project is not found""" return connexion.problem( @@ -21,7 +31,9 @@ def project_not_found_error(project_id): ) -def server_error(err): +def server_error( + err: AnnifException, +) -> ConnexionResponse: """return a Connexion error object when there is a server error (project or backend problem)""" @@ -30,13 +42,13 @@ def server_error(err): ) -def show_info(): +def show_info() -> dict[str, str]: """return version of annif and a title for the api according to OpenAPI spec""" return {"title": "Annif REST API", "version": importlib.metadata.version("annif")} -def language_not_supported_error(lang): +def language_not_supported_error(lang: str) -> ConnexionResponse: """return a Connexion error object when attempting to use unsupported language""" return connexion.problem( @@ -46,7 +58,7 @@ def language_not_supported_error(lang): ) -def list_projects(): +def list_projects() -> dict[str, list[dict[str, str | dict | bool | datetime | None]]]: """return a dict with projects formatted according to OpenAPI spec""" return { @@ -57,7 +69,9 @@ def list_projects(): } -def show_project(project_id): +def show_project( + project_id: str, +) -> dict | ConnexionResponse: """return a single project formatted according to OpenAPI spec""" try: @@ -67,7 +81,9 @@ def show_project(project_id): return project.dump() -def _suggestion_to_dict(suggestion, subject_index, language): +def _suggestion_to_dict( + suggestion: SubjectSuggestion, subject_index: SubjectIndex, language: str +) -> dict[str, str | float | None]: subject = subject_index[suggestion.subject_id] return { "uri": subject.uri, @@ -77,21 +93,25 @@ def _suggestion_to_dict(suggestion, subject_index, language): } -def _hit_sets_to_list(hit_sets, subjects, lang): +def _hit_sets_to_list( + hit_sets: SuggestionResults, subjects: SubjectIndex, lang: str +) -> list[dict[str, list]]: return [ {"results": [_suggestion_to_dict(hit, subjects, lang) for hit in hits]} for hits in hit_sets ] -def _is_error(result): +def _is_error(result: list[dict[str, list]] | ConnexionResponse) -> bool: return ( isinstance(result, connexion.lifecycle.ConnexionResponse) and result.status_code >= 400 ) -def suggest(project_id, body): +def suggest( + project_id: str, body: dict[str, Any] +) -> dict[str, list] | ConnexionResponse: """suggest subjects for the given text and return a dict with results formatted according to OpenAPI spec""" @@ -106,7 +126,11 @@ def suggest(project_id, body): return result[0] -def suggest_batch(project_id, body, **query_parameters): +def suggest_batch( + project_id: str, + body: dict[str, list], + **query_parameters, +) -> list[dict[str, Any]] | ConnexionResponse: """suggest subjects for the given documents and return a list of dicts with results formatted according to OpenAPI spec""" @@ -120,7 +144,11 @@ def suggest_batch(project_id, body, **query_parameters): return result -def _suggest(project_id, documents, parameters): +def _suggest( + project_id: str, + documents: list[dict[str, str]], + parameters: dict[str, Any], +) -> list[dict[str, list]] | ConnexionResponse: corpus = _documents_to_corpus(documents, subject_index=None) try: project = annif.registry.get_project(project_id, min_access=Access.hidden) @@ -146,7 +174,10 @@ def _suggest(project_id, documents, parameters): return _hit_sets_to_list(hit_sets, project.subjects, lang) -def _documents_to_corpus(documents, subject_index): +def _documents_to_corpus( + documents: list[dict[str, Any]], + subject_index: SubjectIndex | None, +) -> annif.corpus.document.DocumentList: if subject_index is not None: corpus = [ Document( @@ -165,7 +196,10 @@ def _documents_to_corpus(documents, subject_index): return DocumentList(corpus) -def learn(project_id, body): +def learn( + project_id: str, + body: list[dict[str, Any]], +) -> ConnexionResponse | tuple[None, int]: """learn from documents and return an empty 204 response if succesful""" try: diff --git a/annif/suggestion.py b/annif/suggestion.py index 9e967d4bf..ddf3ec2e5 100644 --- a/annif/suggestion.py +++ b/annif/suggestion.py @@ -1,15 +1,22 @@ """Representing suggested subjects.""" +from __future__ import annotations import collections import itertools +from typing import TYPE_CHECKING import numpy as np from scipy.sparse import csr_array +if TYPE_CHECKING: + from collections.abc import Iterable, Iterator, Sequence + + from annif.corpus.subject import SubjectIndex + SubjectSuggestion = collections.namedtuple("SubjectSuggestion", "subject_id score") -def vector_to_suggestions(vector, limit): +def vector_to_suggestions(vector: np.ndarray, limit: int) -> Iterator: limit = min(len(vector), limit) topk_idx = np.argpartition(vector, -limit)[-limit:] return ( @@ -17,7 +24,11 @@ def vector_to_suggestions(vector, limit): ) -def filter_suggestion(preds, limit=None, threshold=0.0): +def filter_suggestion( + preds: csr_array, + limit: int | None = None, + threshold: float = 0.0, +) -> csr_array: """filter a 2D sparse suggestion array (csr_array), retaining only the top K suggestions with a score above or equal to the threshold for each individual prediction; the rest will be left as zeros""" @@ -43,7 +54,7 @@ def filter_suggestion(preds, limit=None, threshold=0.0): class SuggestionResult: """Suggestions for a single document, backed by a row of a sparse array.""" - def __init__(self, array, idx): + def __init__(self, array: csr_array, idx: int) -> None: self._array = array self._idx = idx @@ -57,10 +68,10 @@ def __iter__(self): sorted(suggestions, key=lambda suggestion: suggestion.score, reverse=True) ) - def as_vector(self): + def as_vector(self) -> np.ndarray: return self._array[[self._idx], :].toarray()[0] - def __len__(self): + def __len__(self) -> int: _, cols = self._array[[self._idx], :].nonzero() return len(cols) @@ -68,13 +79,18 @@ def __len__(self): class SuggestionBatch: """Subject suggestions for a batch of documents.""" - def __init__(self, array): + def __init__(self, array: csr_array) -> None: """Create a new SuggestionBatch from a csr_array""" assert isinstance(array, csr_array) self.array = array @classmethod - def from_sequence(cls, suggestion_results, subject_index, limit=None): + def from_sequence( + cls, + suggestion_results: Sequence[Iterable[SubjectSuggestion]], + subject_index: SubjectIndex, + limit: int | None = None, + ) -> SuggestionBatch: """Create a new SuggestionBatch from a sequence where each item is a sequence of SubjectSuggestion objects.""" @@ -96,7 +112,9 @@ def from_sequence(cls, suggestion_results, subject_index, limit=None): ) @classmethod - def from_averaged(cls, batches, weights): + def from_averaged( + cls, batches: list[SuggestionBatch], weights: list[float] + ) -> SuggestionBatch: """Create a new SuggestionBatch where the subject scores are the weighted average of scores in several SuggestionBatches""" @@ -105,31 +123,35 @@ def from_averaged(cls, batches, weights): ) / sum(weights) return SuggestionBatch(avg_array) - def filter(self, limit=None, threshold=0.0): + def filter( + self, limit: int | None = None, threshold: float = 0.0 + ) -> SuggestionBatch: """Return a subset of the hits, filtered by the given limit and score threshold, as another SuggestionBatch object.""" return SuggestionBatch(filter_suggestion(self.array, limit, threshold)) - def __getitem__(self, idx): + def __getitem__(self, idx: int) -> SuggestionResult: if idx < 0 or idx >= len(self): raise IndexError return SuggestionResult(self.array, idx) - def __len__(self): + def __len__(self) -> int: return self.array.shape[0] class SuggestionResults: """Subject suggestions for a potentially very large number of documents.""" - def __init__(self, batches): + def __init__(self, batches: Iterable[SuggestionBatch]) -> None: """Initialize a new SuggestionResults from an iterable that provides SuggestionBatch objects.""" self.batches = batches - def filter(self, limit=None, threshold=0.0): + def filter( + self, limit: int | None = None, threshold: float = 0.0 + ) -> SuggestionResults: """Return a view of these suggestions, filtered by the given limit and/or threshold, as another SuggestionResults object.""" @@ -137,5 +159,5 @@ def filter(self, limit=None, threshold=0.0): (batch.filter(limit, threshold) for batch in self.batches) ) - def __iter__(self): + def __iter__(self) -> itertools.chain: return iter(itertools.chain.from_iterable(self.batches)) diff --git a/annif/transform/__init__.py b/annif/transform/__init__.py index 59317f3f6..716d874a2 100644 --- a/annif/transform/__init__.py +++ b/annif/transform/__init__.py @@ -1,6 +1,8 @@ """Functionality for obtaining text transformation from string specification""" +from __future__ import annotations import re +from typing import TYPE_CHECKING import annif from annif.exception import ConfigurationException @@ -8,8 +10,14 @@ from . import inputlimiter, transform +if TYPE_CHECKING: + from annif.project import AnnifProject + from annif.transform.transform import TransformChain -def parse_specs(transform_specs): + +def parse_specs( + transform_specs: str, +) -> list[tuple[str, list, dict]]: """Parse a transformation specification into a list of tuples, e.g. 'transf_1(x),transf_2(y=42),transf_3' is parsed to [(transf_1, [x], {}), (transf_2, [], {y: 42}), (transf_3, [], {})].""" @@ -27,7 +35,7 @@ def parse_specs(transform_specs): return parsed -def get_transform(transform_specs, project): +def get_transform(transform_specs: str, project: AnnifProject | None) -> TransformChain: transform_defs = parse_specs(transform_specs) transform_classes = [] args = [] diff --git a/annif/transform/inputlimiter.py b/annif/transform/inputlimiter.py index 6883c4c9b..229766864 100644 --- a/annif/transform/inputlimiter.py +++ b/annif/transform/inputlimiter.py @@ -1,23 +1,29 @@ """A simple transformation that truncates the text of input documents to a given character length.""" +from __future__ import annotations + +from typing import TYPE_CHECKING from annif.exception import ConfigurationException from . import transform +if TYPE_CHECKING: + from annif.project import AnnifProject + class InputLimiter(transform.BaseTransform): name = "limit" - def __init__(self, project, input_limit): + def __init__(self, project: AnnifProject | None, input_limit: str) -> None: super().__init__(project) self.input_limit = int(input_limit) self._validate_value(self.input_limit) - def transform_fn(self, text): + def transform_fn(self, text: str) -> str: return text[: self.input_limit] - def _validate_value(self, input_limit): + def _validate_value(self, input_limit: int) -> None: if input_limit < 0: raise ConfigurationException( "input_limit in limit_input transform cannot be negative", diff --git a/annif/transform/langfilter.py b/annif/transform/langfilter.py index 8ee6285a7..6794eb370 100644 --- a/annif/transform/langfilter.py +++ b/annif/transform/langfilter.py @@ -1,5 +1,8 @@ """Transformation filtering out parts of a text that are in a language different from the language of the project.""" +from __future__ import annotations + +from typing import TYPE_CHECKING from simplemma.langdetect import in_target_language @@ -7,6 +10,9 @@ from . import transform +if TYPE_CHECKING: + from annif.project import AnnifProject + logger = annif.logger @@ -14,14 +20,18 @@ class LangFilter(transform.BaseTransform): name = "filter_lang" def __init__( - self, project, text_min_length=500, sentence_min_length=50, min_ratio=0.5 - ): + self, + project: AnnifProject, + text_min_length: int | str = 500, + sentence_min_length: int | str = 50, + min_ratio: float = 0.5, + ) -> None: super().__init__(project) self.text_min_length = int(text_min_length) self.sentence_min_length = int(sentence_min_length) self.min_ratio = float(min_ratio) - def transform_fn(self, text): + def transform_fn(self, text: str) -> str: if len(text) < self.text_min_length: return text @@ -30,7 +40,7 @@ def transform_fn(self, text): if len(sent) < self.sentence_min_length: retained_sentences.append(sent) continue - proportion = in_target_language(sent, lang=self.project.language) + proportion = in_target_language(sent, lang=(self.project.language,)) if proportion >= self.min_ratio: retained_sentences.append(sent) return " ".join(retained_sentences) diff --git a/annif/transform/transform.py b/annif/transform/transform.py index 42123ab56..db71fef37 100644 --- a/annif/transform/transform.py +++ b/annif/transform/transform.py @@ -1,10 +1,16 @@ """Common functionality for transforming text of input documents.""" +from __future__ import annotations import abc +from typing import TYPE_CHECKING, Type from annif.corpus import TransformingDocumentCorpus from annif.exception import ConfigurationException +if TYPE_CHECKING: + from annif.corpus.types import DocumentCorpus + from annif.project import AnnifProject + class BaseTransform(metaclass=abc.ABCMeta): """Base class for text transformations, which need to implement the @@ -12,7 +18,7 @@ class BaseTransform(metaclass=abc.ABCMeta): name = None - def __init__(self, project): + def __init__(self, project: AnnifProject | None) -> None: self.project = project @abc.abstractmethod @@ -26,7 +32,7 @@ class IdentityTransform(BaseTransform): name = "pass" - def transform_fn(self, text): + def transform_fn(self, text: str) -> str: return text @@ -34,11 +40,20 @@ class TransformChain: """Class instantiating and holding the transformation objects performing the actual text transformation.""" - def __init__(self, transform_classes, args, project): + def __init__( + self, + transform_classes: list[Type[BaseTransform]], + args: list[tuple[list, dict]], + project: AnnifProject | None, + ) -> None: self.project = project self.transforms = self._init_transforms(transform_classes, args) - def _init_transforms(self, transform_classes, args): + def _init_transforms( + self, + transform_classes: list[Type[BaseTransform]], + args: list[tuple[list, dict]], + ) -> list[BaseTransform]: transforms = [] for trans, (posargs, kwargs) in zip(transform_classes, args): try: @@ -51,10 +66,10 @@ def _init_transforms(self, transform_classes, args): ) return transforms - def transform_text(self, text): + def transform_text(self, text: str) -> str: for trans in self.transforms: text = trans.transform_fn(text) return text - def transform_corpus(self, corpus): + def transform_corpus(self, corpus: DocumentCorpus) -> TransformingDocumentCorpus: return TransformingDocumentCorpus(corpus, self.transform_text) diff --git a/annif/util.py b/annif/util.py index a664027f5..803aa8aea 100644 --- a/annif/util.py +++ b/annif/util.py @@ -1,10 +1,12 @@ """Utility functions for Annif""" +from __future__ import annotations import glob import logging import os import os.path import tempfile +from typing import Any, Callable from annif import logger @@ -12,11 +14,11 @@ class DuplicateFilter(logging.Filter): """Filter out log messages that have already been displayed.""" - def __init__(self): + def __init__(self) -> None: super().__init__() self.logged = set() - def filter(self, record): + def filter(self, record: logging.LogRecord) -> bool: current_log = hash((record.module, record.levelno, record.msg, record.args)) if current_log not in self.logged: self.logged.add(current_log) @@ -24,7 +26,9 @@ def filter(self, record): return False -def atomic_save(obj, dirname, filename, method=None): +def atomic_save( + obj: Any, dirname: str, filename: str, method: Callable | None = None +) -> None: """Save the given object (which must have a .save() method, unless the method parameter is given) into the given directory with the given filename, using a temporary file and renaming the temporary file to the @@ -44,14 +48,14 @@ def atomic_save(obj, dirname, filename, method=None): os.rename(fn, newname) -def cleanup_uri(uri): +def cleanup_uri(uri: str) -> str: """remove angle brackets from a URI, if any""" if uri.startswith("<") and uri.endswith(">"): return uri[1:-1] return uri -def parse_sources(sourcedef): +def parse_sources(sourcedef: str) -> list[tuple[str, float]]: """parse a source definition such as 'src1:1.0,src2' into a sequence of tuples (src_id, weight)""" @@ -69,7 +73,7 @@ def parse_sources(sourcedef): return [(srcid, weight / totalweight) for srcid, weight in sources] -def parse_args(param_string): +def parse_args(param_string: str) -> tuple[list, dict]: """Parse a string of comma separated arguments such as '42,43,key=abc' into a list of positional args [42, 43] and a dict of keyword args {key: abc}""" @@ -87,7 +91,7 @@ def parse_args(param_string): return posargs, kwargs -def boolean(val): +def boolean(val: Any) -> bool: """Convert the given value to a boolean True/False value, if it isn't already. True values are '1', 'yes', 'true', and 'on' (case insensitive), everything else is False.""" @@ -95,7 +99,7 @@ def boolean(val): return str(val).lower() in ("1", "yes", "true", "on") -def identity(x): +def identity(x: Any) -> Any: """Identity function: return the given argument unchanged""" return x diff --git a/annif/vocab.py b/annif/vocab.py index 14f6209ba..333fa0d69 100644 --- a/annif/vocab.py +++ b/annif/vocab.py @@ -1,6 +1,8 @@ """Vocabulary management functionality for Annif""" +from __future__ import annotations import os.path +from typing import TYPE_CHECKING import annif import annif.corpus @@ -8,6 +10,13 @@ from annif.datadir import DatadirMixin from annif.exception import NotInitializedException +if TYPE_CHECKING: + from rdflib.graph import Graph + + from annif.corpus.skos import SubjectFileSKOS + from annif.corpus.subject import SubjectCorpus, SubjectIndex + + logger = annif.logger @@ -23,18 +32,18 @@ class AnnifVocabulary(DatadirMixin): INDEX_FILENAME_TTL = "subjects.ttl" INDEX_FILENAME_CSV = "subjects.csv" - def __init__(self, vocab_id, datadir): + def __init__(self, vocab_id: str, datadir: str) -> None: DatadirMixin.__init__(self, datadir, "vocabs", vocab_id) self.vocab_id = vocab_id self._skos_vocab = None - def _create_subject_index(self, subject_corpus): + def _create_subject_index(self, subject_corpus: SubjectCorpus) -> SubjectIndex: subjects = annif.corpus.SubjectIndex() subjects.load_subjects(subject_corpus) annif.util.atomic_save(subjects, self.datadir, self.INDEX_FILENAME_CSV) return subjects - def _update_subject_index(self, subject_corpus): + def _update_subject_index(self, subject_corpus: SubjectCorpus) -> SubjectIndex: old_subjects = self.subjects new_subjects = annif.corpus.SubjectIndex() new_subjects.load_subjects(subject_corpus) @@ -55,7 +64,7 @@ def _update_subject_index(self, subject_corpus): return updated_subjects @property - def subjects(self): + def subjects(self) -> SubjectIndex: if self._subjects is None: path = os.path.join(self.datadir, self.INDEX_FILENAME_CSV) if os.path.exists(path): @@ -66,7 +75,7 @@ def subjects(self): return self._subjects @property - def skos(self): + def skos(self) -> SubjectFileSKOS: """return the subject vocabulary from SKOS file""" if self._skos_vocab is not None: return self._skos_vocab @@ -94,14 +103,18 @@ def skos(self): raise NotInitializedException(f"graph file {path} not found") - def __len__(self): + def __len__(self) -> int: return len(self.subjects) @property - def languages(self): + def languages(self) -> list[str]: return self.subjects.languages - def load_vocabulary(self, subject_corpus, force=False): + def load_vocabulary( + self, + subject_corpus: SubjectCorpus, + force: bool = False, + ) -> None: """Load subjects from a subject corpus and save them into one or more subject index files as well as a SKOS/Turtle file for later use. If force=True, replace the existing subject index completely.""" @@ -119,6 +132,6 @@ def load_vocabulary(self, subject_corpus, force=False): logger.info(f"saving vocabulary into SKOS file {skosfile}") subject_corpus.save_skos(skosfile) - def as_graph(self): + def as_graph(self) -> Graph: """return the vocabulary as an rdflib graph""" return self.skos.graph diff --git a/setup.cfg b/setup.cfg index cffe59417..bf3f116d6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,7 +3,7 @@ current_version = 1.0.0-dev commit = True tag = True parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+))? -serialize = +serialize = {major}.{minor}.{patch}-{release} {major}.{minor}.{patch} @@ -13,7 +13,7 @@ serialize = [bumpversion:part:release] optional_value = prod -values = +values = dev prod @@ -23,3 +23,7 @@ test = pytest [flake8] max-line-length = 88 ignore = E203 W503 + +[coverage:report] +exclude_also = + if TYPE_CHECKING: From 31ef59de9f3c03e5bbaabb9621eb2a7979b74502 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 7 Jun 2023 11:28:55 +0300 Subject: [PATCH 24/83] Fix missing confirmation in apt-upgrade (#711) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2372cc044..dbd98cb07 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ ARG optional_dependencies="voikko fasttext nn omikuji yake spacy stwfsa" ARG POETRY_VIRTUALENVS_CREATE=false # Install system dependencies needed at runtime: -RUN apt-get update && apt-get upgrade && \ +RUN apt-get update && apt-get upgrade -y && \ if [[ $optional_dependencies =~ "voikko" ]]; then \ apt-get install -y --no-install-recommends \ libvoikko1 \ From 961a472bef99a395706f24baa9dd04035a040818 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 7 Jun 2023 13:05:47 +0300 Subject: [PATCH 25/83] Add GH Actions job for testing built Docker image --- .github/workflows/cicd.yml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index fd28a25c0..2c51733cd 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -111,9 +111,23 @@ jobs: # A new key is created to update the cache if some dependency has been updated key: poetry-installation-and-cache-${{ matrix.python-version }}-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} + test-docker-image: + name: "test Docker image" + runs-on: ubuntu-22.04 + timeout-minutes: 15 + steps: + - name: "Build image for testing" + uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 + with: + push: false + tags: test-image + - name: "Test with pytest" + run: | + docker run --rm --workdir /Annif test-image pytest + publish-docker-latest: name: publish latest Docker image - needs: [lint, test] + needs: [lint, test, test-docker-image] runs-on: ubuntu-22.04 timeout-minutes: 15 if: github.event_name == 'push' && github.ref == 'refs/heads/main' From a72b4c8919d76f776b2484ff1140adbadd91b6b5 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 7 Jun 2023 13:18:15 +0300 Subject: [PATCH 26/83] Disable pytest cache dir to prevent warnings as it cannot be created --- .github/workflows/cicd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 2c51733cd..8a344c392 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -123,7 +123,7 @@ jobs: tags: test-image - name: "Test with pytest" run: | - docker run --rm --workdir /Annif test-image pytest + docker run --rm --workdir /Annif test-image pytest -p no:cacheprovider publish-docker-latest: name: publish latest Docker image From 1145a4b8191213105c6872e17f050cca917fa2a6 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 7 Jun 2023 13:22:02 +0300 Subject: [PATCH 27/83] Reorder workflow: require "publish Docker latest" before "publish release" --- .github/workflows/cicd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 8a344c392..c06c376d3 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -154,7 +154,7 @@ jobs: publish-release: name: publish release - needs: [lint, test] + needs: [publish-docker-latest] runs-on: ubuntu-22.04 if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') steps: From 51db308b855cf98892ca85b2385c0e72de832f5b Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 9 Jun 2023 15:30:56 +0300 Subject: [PATCH 28/83] Fix job order: Do not require "publish Docker latest" before "publish release" (#714) Reverts 1145a4b8191213105c6872e17f050cca917fa2a6 --- .github/workflows/cicd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index c06c376d3..70a779166 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -154,7 +154,7 @@ jobs: publish-release: name: publish release - needs: [publish-docker-latest] + needs: [lint, test, test-docker-image] runs-on: ubuntu-22.04 if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') steps: From a7bd0ee78c00a7139064ea66062803ec3310e559 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 19 Jun 2023 11:33:51 +0300 Subject: [PATCH 29/83] GH Actions workflow for manual runs for rebuilding Docker image --- .github/workflows/docker-rebuild.yml | 50 ++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/docker-rebuild.yml diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml new file mode 100644 index 000000000..a8e456746 --- /dev/null +++ b/.github/workflows/docker-rebuild.yml @@ -0,0 +1,50 @@ +name: "Docker rebuild" +on: + push: # TODO Remove + branches: + - rebuild-docker-image + workflow_dispatch: +jobs: + rebuild-docker-images: + name: "Docker rebuild" + runs-on: ubuntu-22.04 + timeout-minutes: 15 + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: "Checkout most recent tag" + run: | + git fetch --tags origin + git describe --abbrev=0 | xargs git checkout + - name: "Build for testing" + uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 + with: + push: false + tags: test-image + - name: "Test with pytest" + run: | + docker run --rm --workdir /Annif test-image pytest -p no:cacheprovider + - name: Login to Quay.io + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + with: + registry: quay.io + username: ${{ secrets.YHTEENTOIMIVUUSPALVELUT_QUAY_IO_USERNAME }} + password: ${{ secrets.YHTEENTOIMIVUUSPALVELUT_QUAY_IO_PASSWORD }} + - name: Docker meta + id: meta + uses: docker/metadata-action@2c0bd771b40637d97bf205cbccdd294a32112176 # v4.5.0 + with: + context: git + images: quay.io/natlibfi/annif + flavor: | + latest=false + tags: | + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + - name: Build and push to Quay.io + uses: docker/build-push-action@44ea916f6c540f9302d50c2b1e5a8dc071f15cdf # v4.1.0 + with: + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} From bea17d6f6fb5c442234e3a42030894287a0f4938 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 19 Jun 2023 13:14:03 +0300 Subject: [PATCH 30/83] Add date of build as suffix to semver tags --- .github/workflows/cicd.yml | 1 + .github/workflows/docker-rebuild.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 70a779166..5a1c512fd 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -184,6 +184,7 @@ jobs: images: quay.io/natlibfi/annif tags: | type=semver,pattern={{version}} + type=semver,pattern={{version}},suffix=-{{date 'YYYYMMDD'}} type=semver,pattern={{major}}.{{minor}} - name: Build and push to Quay.io uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index a8e456746..f5f2e1546 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -41,6 +41,7 @@ jobs: latest=false tags: | type=semver,pattern={{version}} + type=semver,pattern={{version}},suffix=-{{date 'YYYYMMDD'}} type=semver,pattern={{major}}.{{minor}} - name: Build and push to Quay.io uses: docker/build-push-action@44ea916f6c540f9302d50c2b1e5a8dc071f15cdf # v4.1.0 From 2e42403f3ef3f7ef419bbb6080cd3474d84b18e2 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 20 Jun 2023 12:00:20 +0300 Subject: [PATCH 31/83] Fix Docker image to be build on git tag instead of current main branch --- .github/workflows/docker-rebuild.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index f5f2e1546..207d50c49 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -20,6 +20,7 @@ jobs: - name: "Build for testing" uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 with: + context: . push: false tags: test-image - name: "Test with pytest" @@ -46,6 +47,7 @@ jobs: - name: Build and push to Quay.io uses: docker/build-push-action@44ea916f6c540f9302d50c2b1e5a8dc071f15cdf # v4.1.0 with: + context: . push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} From bc664cf6b3db3da40a2f48d13507e5eb4e21fa0f Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 20 Jun 2023 14:04:54 +0300 Subject: [PATCH 32/83] Reorder tags in repo listing --- .github/workflows/cicd.yml | 2 +- .github/workflows/docker-rebuild.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 5a1c512fd..ad01154af 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -183,8 +183,8 @@ jobs: with: images: quay.io/natlibfi/annif tags: | - type=semver,pattern={{version}} type=semver,pattern={{version}},suffix=-{{date 'YYYYMMDD'}} + type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} - name: Build and push to Quay.io uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index 207d50c49..5a100b499 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -41,8 +41,8 @@ jobs: flavor: | latest=false tags: | - type=semver,pattern={{version}} type=semver,pattern={{version}},suffix=-{{date 'YYYYMMDD'}} + type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} - name: Build and push to Quay.io uses: docker/build-push-action@44ea916f6c540f9302d50c2b1e5a8dc071f15cdf # v4.1.0 From 29e9b0785be4345f621d8cc42840f0ad2d8328e0 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 20 Jun 2023 14:41:17 +0300 Subject: [PATCH 33/83] Remove push trigger --- .github/workflows/docker-rebuild.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index 5a100b499..89aafdc45 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -1,8 +1,5 @@ name: "Docker rebuild" on: - push: # TODO Remove - branches: - - rebuild-docker-image workflow_dispatch: jobs: rebuild-docker-images: From 0af952e69cf09014247222a993b154e9f2baf54d Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Thu, 22 Jun 2023 10:48:37 +0300 Subject: [PATCH 34/83] Drop unnecessary colon from workflow trigger setting --- .github/workflows/docker-rebuild.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index 89aafdc45..bfadd7299 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -1,6 +1,5 @@ name: "Docker rebuild" -on: - workflow_dispatch: +on: workflow_dispatch jobs: rebuild-docker-images: name: "Docker rebuild" From da3c5eb181a51de6940977aa1bcdc0be6ebe23d7 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Thu, 22 Jun 2023 13:22:46 +0300 Subject: [PATCH 35/83] Drop unnecessary checkout step, use git context (default) in build steps --- .github/workflows/docker-rebuild.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index bfadd7299..aaf3baf2b 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -9,14 +9,9 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - name: "Checkout most recent tag" - run: | - git fetch --tags origin - git describe --abbrev=0 | xargs git checkout - name: "Build for testing" uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 with: - context: . push: false tags: test-image - name: "Test with pytest" @@ -43,7 +38,6 @@ jobs: - name: Build and push to Quay.io uses: docker/build-push-action@44ea916f6c540f9302d50c2b1e5a8dc071f15cdf # v4.1.0 with: - context: . push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} From 6dbebbbf894204a4c3f2c51f26f31d1a21c1dbc8 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 26 Jun 2023 13:20:42 +0300 Subject: [PATCH 36/83] Drop the initial checkout step too; use default workflow context in metadata step --- .github/workflows/docker-rebuild.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/docker-rebuild.yml b/.github/workflows/docker-rebuild.yml index aaf3baf2b..2a3f53b79 100644 --- a/.github/workflows/docker-rebuild.yml +++ b/.github/workflows/docker-rebuild.yml @@ -6,9 +6,6 @@ jobs: runs-on: ubuntu-22.04 timeout-minutes: 15 steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - name: "Build for testing" uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v3.2.0 with: @@ -27,7 +24,6 @@ jobs: id: meta uses: docker/metadata-action@2c0bd771b40637d97bf205cbccdd294a32112176 # v4.5.0 with: - context: git images: quay.io/natlibfi/annif flavor: | latest=false From 235ce35fede260d17a4eddadb850343a39c8ad1e Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:36:07 +0300 Subject: [PATCH 37/83] Install Poetry with pipx using Python version defined in matrix --- .github/actions/prepare/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/prepare/action.yml b/.github/actions/prepare/action.yml index a811812b4..f271a6f07 100644 --- a/.github/actions/prepare/action.yml +++ b/.github/actions/prepare/action.yml @@ -25,7 +25,7 @@ runs: - name: Install Poetry shell: bash run: | - pipx install poetry==${{ inputs.poetry-version }} + pipx install poetry==${{ inputs.poetry-version }} --python $(which python${{ inputs.python-version }}) - name: Set up Python ${{ inputs.python-version }} uses: actions/setup-python@5ccb29d8773c3f3f653e1705f474dfaa8a06a912 # v4.4.0 with: From 676f5cabfb70106303273f12f33107273fcc87df Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 27 Jun 2023 12:43:58 +0300 Subject: [PATCH 38/83] Upgrade to Poetry v1.5.1 --- .github/workflows/cicd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index ad01154af..77a16e4a1 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -10,7 +10,7 @@ on: env: PIPX_HOME: "/home/runner/.cache/pipx" PIPX_BIN_DIR: "/home/runner/.local/bin" - POETRY_VERSION: "1.4.1" + POETRY_VERSION: "1.5.1" jobs: lint: From 74b78429e86d25a9d52cd8bcfa045228dafe41f0 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:45:12 +0300 Subject: [PATCH 39/83] Set up Python step before Install Poetry step --- .github/actions/prepare/action.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/actions/prepare/action.yml b/.github/actions/prepare/action.yml index f271a6f07..9c6d996ca 100644 --- a/.github/actions/prepare/action.yml +++ b/.github/actions/prepare/action.yml @@ -22,11 +22,11 @@ runs: key: ignore-me restore-keys: | poetry-installation-and-cache-${{ inputs.python-version }}-${{ inputs.poetry-version }}- - - name: Install Poetry - shell: bash - run: | - pipx install poetry==${{ inputs.poetry-version }} --python $(which python${{ inputs.python-version }}) - name: Set up Python ${{ inputs.python-version }} uses: actions/setup-python@5ccb29d8773c3f3f653e1705f474dfaa8a06a912 # v4.4.0 with: python-version: ${{ inputs.python-version }} + - name: Install Poetry + shell: bash + run: | + pipx install poetry==${{ inputs.poetry-version }} --python $(which python${{ inputs.python-version }}) From e67aa7332d4220f59a17ddf8d7f8a775478674f6 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 30 Jun 2023 09:28:03 +0300 Subject: [PATCH 40/83] Revert "Install Poetry with pipx using Python version defined in matrix" This reverts commit 235ce35fede260d17a4eddadb850343a39c8ad1e. --- .github/actions/prepare/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/prepare/action.yml b/.github/actions/prepare/action.yml index 9c6d996ca..c9e849e2f 100644 --- a/.github/actions/prepare/action.yml +++ b/.github/actions/prepare/action.yml @@ -29,4 +29,4 @@ runs: - name: Install Poetry shell: bash run: | - pipx install poetry==${{ inputs.poetry-version }} --python $(which python${{ inputs.python-version }}) + pipx install poetry==${{ inputs.poetry-version }} From 1442f031ffd9f4b818c22dae5027c3c3fdb7dcfb Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 30 Jun 2023 09:29:53 +0300 Subject: [PATCH 41/83] Set Python version used in Poetry venv to version defined in matrix --- .github/actions/prepare/action.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/actions/prepare/action.yml b/.github/actions/prepare/action.yml index c9e849e2f..2ca643937 100644 --- a/.github/actions/prepare/action.yml +++ b/.github/actions/prepare/action.yml @@ -30,3 +30,4 @@ runs: shell: bash run: | pipx install poetry==${{ inputs.poetry-version }} + poetry env use ${{ inputs.python-version }} From 0b4157dba2ebfea6045228c72106986676f7d336 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 27 Jun 2023 10:39:12 +0300 Subject: [PATCH 42/83] Add unit-testing on Python 3.11 to CI/CD pipeline allowing tests to fail --- .github/workflows/cicd.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 77a16e4a1..57ada68c7 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -57,9 +57,14 @@ jobs: test: runs-on: ubuntu-22.04 timeout-minutes: 15 + continue-on-error: ${{ matrix.experimental }} strategy: matrix: python-version: ["3.8", "3.9", "3.10"] + experimental: [false] + include: + - python-version: "3.11" + experimental: true name: test on Python ${{ matrix.python-version }} steps: - uses: actions/checkout@v3 @@ -91,6 +96,10 @@ jobs: if [[ ${{ matrix.python-version }} == '3.10' ]]; then poetry install -E "nn omikuji yake stwfsa"; fi + # For Python 3.11: + if [[ ${{ matrix.python-version }} == '3.11' ]]; then + poetry install --all-extras; + fi poetry run python -m nltk.downloader punkt - name: Test with pytest run: | From dd88eef2723ebe0c8e45d214e8d8cd4f59ce1a8f Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 27 Jun 2023 15:03:00 +0300 Subject: [PATCH 43/83] Allow installing Annif on Python 3.11 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 008b388e8..6cf258e3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ classifiers=[ ] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.8,<3.12" connexion = {version = "2.14.*", extras = ["swagger-ui"]} flask = ">=1.0.4,<3" From f595278fc7dfc62c6c9733b8d9067788e0f0e4fb Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 27 Jun 2023 16:33:56 +0300 Subject: [PATCH 44/83] Install & import tomli only on Python <3.11, on 3.11 use tomllib --- annif/config.py | 9 ++++++--- pyproject.toml | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/annif/config.py b/annif/config.py index ab8f0d568..810249bd6 100644 --- a/annif/config.py +++ b/annif/config.py @@ -5,7 +5,10 @@ import os.path from glob import glob -import tomli +try: + import tomllib +except ImportError: + import tomli as tomllib import annif import annif.util @@ -45,8 +48,8 @@ def __init__(self, filename: str) -> None: with open(filename, "rb") as projf: try: logger.debug(f"Reading configuration file {filename} in TOML format") - self._config = tomli.load(projf) - except tomli.TOMLDecodeError as err: + self._config = tomllib.load(projf) + except tomllib.TOMLDecodeError as err: raise ConfigurationException( f"Parsing TOML file '{filename}' failed: {err}" ) diff --git a/pyproject.toml b/pyproject.toml index 6cf258e3e..100e0558e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ gunicorn = "20.1.*" numpy = "1.24.*" optuna = "2.10.*" python-dateutil = "2.8.*" -tomli = "2.0.*" +tomli = { version = "2.0.*", python = "<3.11" } simplemma = "0.9.*" jsonschema = "4.17.*" From c5f1617ac8afbda70c0f4137dbf49c59ae73f45e Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:07:39 +0300 Subject: [PATCH 45/83] Use AnnifBackend.default_params() method via inheriting in most backends --- annif/backend/backend.py | 4 +++- annif/backend/dummy.py | 3 --- annif/backend/mllm.py | 7 +------ annif/backend/nn_ensemble.py | 5 ----- annif/backend/omikuji.py | 5 ----- annif/backend/pav.py | 7 +------ annif/backend/svc.py | 5 ----- annif/backend/yake.py | 5 ----- 8 files changed, 5 insertions(+), 36 deletions(-) diff --git a/annif/backend/backend.py b/annif/backend/backend.py index f35b0a312..5d722e7b2 100644 --- a/annif/backend/backend.py +++ b/annif/backend/backend.py @@ -40,7 +40,9 @@ def __init__( self.datadir = project.datadir def default_params(self) -> dict[str, Any]: - return self.DEFAULT_PARAMETERS + params = AnnifBackend.DEFAULT_PARAMETERS.copy() + params.update(self.DEFAULT_PARAMETERS) # Optional backend specific parameters + return params @property def params(self) -> dict[str, Any]: diff --git a/annif/backend/dummy.py b/annif/backend/dummy.py index 5f62517a5..bb52e22e6 100644 --- a/annif/backend/dummy.py +++ b/annif/backend/dummy.py @@ -18,9 +18,6 @@ class DummyBackend(backend.AnnifLearningBackend): is_trained = True modification_time = None - def default_params(self) -> dict[str, int]: - return backend.AnnifBackend.DEFAULT_PARAMETERS - def initialize(self, parallel: bool = False) -> None: self.initialized = True diff --git a/annif/backend/mllm.py b/annif/backend/mllm.py index f73bf8324..cbcef11b1 100644 --- a/annif/backend/mllm.py +++ b/annif/backend/mllm.py @@ -13,7 +13,7 @@ from annif.lexical.mllm import MLLMModel from annif.suggestion import vector_to_suggestions -from . import backend, hyperopt +from . import hyperopt if TYPE_CHECKING: from collections.abc import Iterator @@ -95,11 +95,6 @@ class MLLMBackend(hyperopt.AnnifHyperoptBackend): def get_hp_optimizer(self, corpus: DocumentCorpus, metric: str) -> MLLMOptimizer: return MLLMOptimizer(self, corpus, metric) - def default_params(self) -> dict[str, Any]: - params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() - params.update(self.DEFAULT_PARAMETERS) - return params - def _load_model(self) -> MLLMModel: path = os.path.join(self.datadir, self.MODEL_FILE) self.debug("loading model from {}".format(path)) diff --git a/annif/backend/nn_ensemble.py b/annif/backend/nn_ensemble.py index 658bd79be..169eb8234 100644 --- a/annif/backend/nn_ensemble.py +++ b/annif/backend/nn_ensemble.py @@ -112,11 +112,6 @@ class NNEnsembleBackend(backend.AnnifLearningBackend, ensemble.BaseEnsembleBacke # defaults for uninitialized instances _model = None - def default_params(self) -> dict[str, Any]: - params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() - params.update(self.DEFAULT_PARAMETERS) - return params - def initialize(self, parallel: bool = False) -> None: super().initialize(parallel) if self._model is not None: diff --git a/annif/backend/omikuji.py b/annif/backend/omikuji.py index 6c864b89e..7a2e6a1bb 100644 --- a/annif/backend/omikuji.py +++ b/annif/backend/omikuji.py @@ -43,11 +43,6 @@ class OmikujiBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): "collapse_every_n_layers": 0, } - def default_params(self) -> dict[str, Any]: - params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() - params.update(self.DEFAULT_PARAMETERS) - return params - def _initialize_model(self) -> None: if self._model is None: path = os.path.join(self.datadir, self.MODEL_FILE) diff --git a/annif/backend/pav.py b/annif/backend/pav.py index da8a6e2c1..61f4362d1 100644 --- a/annif/backend/pav.py +++ b/annif/backend/pav.py @@ -17,7 +17,7 @@ from annif.exception import NotInitializedException, NotSupportedException from annif.suggestion import SubjectSuggestion, SuggestionBatch -from . import backend, ensemble +from . import ensemble if TYPE_CHECKING: from annif.corpus.document import DocumentCorpus @@ -36,11 +36,6 @@ class PAVBackend(ensemble.BaseEnsembleBackend): DEFAULT_PARAMETERS = {"min-docs": 10} - def default_params(self) -> dict[str, Any]: - params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() - params.update(self.DEFAULT_PARAMETERS) - return params - def initialize(self, parallel: bool = False) -> None: super().initialize(parallel) if self._models is not None: diff --git a/annif/backend/svc.py b/annif/backend/svc.py index 1e7932c3e..e2f6c33a8 100644 --- a/annif/backend/svc.py +++ b/annif/backend/svc.py @@ -33,11 +33,6 @@ class SVCBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): DEFAULT_PARAMETERS = {"min_df": 1, "ngram": 1} - def default_params(self) -> dict[str, Any]: - params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() - params.update(self.DEFAULT_PARAMETERS) - return params - def _initialize_model(self) -> None: if self._model is None: path = os.path.join(self.datadir, self.MODEL_FILE) diff --git a/annif/backend/yake.py b/annif/backend/yake.py index 1e6adfdd5..8f7d38c50 100644 --- a/annif/backend/yake.py +++ b/annif/backend/yake.py @@ -45,11 +45,6 @@ class YakeBackend(backend.AnnifBackend): "remove_parentheses": False, } - def default_params(self) -> dict[str, Any]: - params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() - params.update(self.DEFAULT_PARAMETERS) - return params - @property def is_trained(self): return True From bf5f8445ccd7b09db646b581ef24ed0849138160 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:08:45 +0300 Subject: [PATCH 46/83] Adapt stwfsa default_params test to include limit parameter --- tests/test_backend_stwfsa.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_backend_stwfsa.py b/tests/test_backend_stwfsa.py index ecac18f38..14b5bcf79 100644 --- a/tests/test_backend_stwfsa.py +++ b/tests/test_backend_stwfsa.py @@ -24,6 +24,7 @@ def test_stwfsa_default_params(project): backend_id=stwfsa_backend_name, config_params={}, project=project ) expected_default_params = { + "limit": 100, "concept_type_uri": "http://www.w3.org/2004/02/skos/core#Concept", "sub_thesaurus_type_uri": "http://www.w3.org/2004/02/skos/core#Collection", "thesaurus_relation_type_uri": "http://www.w3.org/2004/02/skos/core#member", From 02f15332a07ce832d52da6f8543775cdba67bf0a Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Thu, 3 Aug 2023 12:02:38 +0300 Subject: [PATCH 47/83] Suppress TensorFlow info messages to debug level (#721) * Suppress TF info messages during import * Suppress all TF info messages by default * Suppress all TF info messages by default, really * Set TF log level by mapping Annif log level * Make tests pass If this test is run after other tests with @mock.patch.dict(os.environ,...), the tests with caplog fixture in test_config.py fail * Alter level mapping Tone down only TensorFlow INFO level: INFO messages by TFare shown only when running Annif at DEBUG level * Fix comment on TF level * Add docstring --- annif/__init__.py | 19 +++++++++++++++++++ tests/test_cli.py | 21 +++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/annif/__init__.py b/annif/__init__.py index bb196b4ee..a71b9f379 100644 --- a/annif/__init__.py +++ b/annif/__init__.py @@ -21,6 +21,8 @@ def create_flask_app(config_name: str | None = None) -> Flask: """Create a Flask app to be used by the CLI.""" from flask import Flask + _set_tensorflow_loglevel() + app = Flask(__name__) config_name = _get_config_name(config_name) logger.debug(f"creating flask app with configuration {config_name}") @@ -75,3 +77,20 @@ def _get_config_name(config_name: str | None) -> str: else: config_name = "annif.default_config.ProductionConfig" # pragma: no cover return config_name + + +def _set_tensorflow_loglevel(): + """Set TensorFlow log level based on Annif log level (--verbosity/-v + option) using an environment variable. INFO messages by TF are shown only on + DEBUG (or NOTSET) level of Annif.""" + annif_loglevel = logger.getEffectiveLevel() + tf_loglevel_mapping = { + 0: "0", # NOTSET + 10: "0", # DEBUG + 20: "1", # INFO + 30: "1", # WARNING + 40: "2", # ERROR + 50: "3", # CRITICAL + } + tf_loglevel = tf_loglevel_mapping[annif_loglevel] + os.environ.setdefault("TF_CPP_MIN_LOG_LEVEL", tf_loglevel) diff --git a/tests/test_cli.py b/tests/test_cli.py index ef44df8a3..77adeab0f 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -23,6 +23,27 @@ PROJECTS_CONFIG_PATH = "tests/projects_for_config_path_option.cfg" +@mock.patch.dict(os.environ, clear=True) +def test_tensorflow_loglevel(): + tf_env = "TF_CPP_MIN_LOG_LEVEL" + + runner.invoke(annif.cli.cli, ["list-projects", "-v", "DEBUG"]) + assert os.environ[tf_env] == "0" # Show INFO, WARNING and ERROR messages by TF + os.environ.pop(tf_env) + runner.invoke(annif.cli.cli, ["list-projects"]) # INFO level by default + assert os.environ[tf_env] == "1" # Show WARNING and ERROR messages by TF + os.environ.pop(tf_env) + runner.invoke(annif.cli.cli, ["list-projects", "-v", "WARN"]) + assert os.environ[tf_env] == "1" # Show WARNING and ERROR messages by TF + os.environ.pop(tf_env) + runner.invoke(annif.cli.cli, ["list-projects", "-v", "ERROR"]) + assert os.environ[tf_env] == "2" # Show ERROR messages by TF + os.environ.pop(tf_env) + runner.invoke(annif.cli.cli, ["list-projects", "-v", "CRITICAL"]) + assert os.environ[tf_env] == "3" # Show no messages by TF + os.environ.pop(tf_env) + + def test_list_projects(): result = runner.invoke(annif.cli.cli, ["list-projects"]) assert not result.exception From 417afa1e184303ed4507fcea91455f4b96b8d2a9 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Thu, 3 Aug 2023 15:23:29 +0300 Subject: [PATCH 48/83] Consider only model files for train state and mtime --- annif/backend/backend.py | 15 +++++++++++++-- tests/test_project.py | 14 ++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/annif/backend/backend.py b/annif/backend/backend.py index 5d722e7b2..8c22c54ef 100644 --- a/annif/backend/backend.py +++ b/annif/backend/backend.py @@ -51,15 +51,26 @@ def params(self) -> dict[str, Any]: params.update(self.config_params) return params + @property + def _model_file_paths(self) -> list: + all_paths = glob(os.path.join(self.datadir, "*")) + ignore_patterns = ("*-train*", "vectorizer") + ignore_paths = [ + path + for igp in ignore_patterns + for path in glob(os.path.join(self.datadir, igp)) + ] + return list(set(all_paths) - set(ignore_paths)) + @property def is_trained(self) -> bool: - return bool(glob(os.path.join(self.datadir, "*"))) + return bool(self._model_file_paths) @property def modification_time(self) -> datetime | None: mtimes = [ datetime.utcfromtimestamp(os.path.getmtime(p)) - for p in glob(os.path.join(self.datadir, "*")) + for p in self._model_file_paths ] most_recent = max(mtimes, default=None) if most_recent is None: diff --git a/tests/test_project.py b/tests/test_project.py index a6294edb6..e626e180c 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -152,6 +152,20 @@ def test_project_tfidf_is_not_trained(registry): assert not project.is_trained +def test_project_tfidf_is_not_trained_prepared_only(registry, testdatadir): + testdatadir.join("projects/tfidf-fi").ensure("vectorizer") + testdatadir.join("projects/tfidf-fi").ensure("dummy-tfidf-train.txt") + project = registry.get_project("tfidf-fi") + assert not project.is_trained + + +def test_project_tfidf_modification_time_prepared_only(registry, testdatadir): + testdatadir.join("projects/tfidf-fi").ensure("vectorizer") + testdatadir.join("projects/tfidf-fi").ensure("dummy-tfidf-train.txt") + project = registry.get_project("tfidf-fi") + assert project.modification_time is None + + def test_project_train_tfidf(registry, document_corpus, testdatadir): project = registry.get_project("tfidf-fi") project.train(document_corpus) From 54f4136242209bf999ae97f3ea70e2a1e8aa9fdf Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 4 Aug 2023 12:40:42 +0300 Subject: [PATCH 49/83] Add "tmp-" prefix to tempfile names and ignore them as model files --- annif/backend/backend.py | 2 +- annif/util.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/annif/backend/backend.py b/annif/backend/backend.py index 8c22c54ef..6a63c86b2 100644 --- a/annif/backend/backend.py +++ b/annif/backend/backend.py @@ -54,7 +54,7 @@ def params(self) -> dict[str, Any]: @property def _model_file_paths(self) -> list: all_paths = glob(os.path.join(self.datadir, "*")) - ignore_patterns = ("*-train*", "vectorizer") + ignore_patterns = ("*-train*", "tmp-*", "vectorizer") ignore_paths = [ path for igp in ignore_patterns diff --git a/annif/util.py b/annif/util.py index 803aa8aea..1702c2e4b 100644 --- a/annif/util.py +++ b/annif/util.py @@ -35,6 +35,7 @@ def atomic_save( final name.""" prefix, suffix = os.path.splitext(filename) + prefix = "tmp-" + prefix tempfd, tempfilename = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dirname) os.close(tempfd) logger.debug("saving %s to temporary file %s", str(obj)[:90], tempfilename) From 7f707e323efc33f3e871ce65b5a8578942b3e434 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:01:06 +0300 Subject: [PATCH 50/83] Stop using to-be-deprected global site-packages directory --- .readthedocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 24c88d8ab..ab9b0e4ff 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -33,4 +33,3 @@ python: - requirements: docs/requirements.txt - method: pip path: . - system_packages: true From 4cde80091912d6e1e31a3982ea3c9359ff153e52 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:10:13 +0300 Subject: [PATCH 51/83] Add stwfsa as dependency to install --- .readthedocs.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.readthedocs.yml b/.readthedocs.yml index ab9b0e4ff..97e561f04 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -27,6 +27,7 @@ python: - nn - omikuji - fasttext + - stwfsa - yake - pycld3 - spacy From 7bab5fcc4f7e636557a9fb4e3eb08d188b4b21ae Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:11:21 +0300 Subject: [PATCH 52/83] Remove pycld3 dependency --- .readthedocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 97e561f04..a23b05577 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -29,7 +29,6 @@ python: - fasttext - stwfsa - yake - - pycld3 - spacy - requirements: docs/requirements.txt - method: pip From 6f37052e0220862d283c8a580baa3f6f985328df Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:22:59 +0300 Subject: [PATCH 53/83] Upgrade to ubuntu-22.04 image --- .readthedocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index a23b05577..c07869569 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -6,7 +6,7 @@ version: 2 build: - os: "ubuntu-20.04" + os: "ubuntu-22.04" tools: python: "3.9" From 838774759d6b15358020e8b5468d54de97c64d07 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:24:10 +0300 Subject: [PATCH 54/83] Upgrade to Python 3.10 --- .readthedocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index c07869569..fc039f256 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,7 +8,7 @@ version: 2 build: os: "ubuntu-22.04" tools: - python: "3.9" + python: "3.10" # Build documentation in the docs/ directory with Sphinx sphinx: From 0698126c07b4c4591117e894274ca765624f85ab Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:54:36 +0300 Subject: [PATCH 55/83] Make copyright years span to be updated automatically --- docs/conf.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 17183ac12..bdf8bdeb2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,6 +13,7 @@ import os import re import sys +from datetime import date sys.path.insert(0, os.path.abspath("..")) @@ -20,7 +21,11 @@ # -- Project information ----------------------------------------------------- project = "annif" -copyright = "2017, University Of Helsinki (The National Library Of Finland)" +copyright = ( + f"2017-{date.today().year}, University Of Helsinki " + + "(The National Library Of Finland)" +) + author = "Osma Suominen" # Get version number from GitHub tag From 6b3f8d0809b2e06cf80e855366df4b6ab70818d6 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 16:04:45 +0300 Subject: [PATCH 56/83] Drop 'documentation' part from the html page title --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index bdf8bdeb2..5d6d31936 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -32,7 +32,7 @@ release = re.sub("^v", "", os.popen("git describe --tags").read().strip()) # The short X.Y version. version = release - +html_title = project + " " + release # -- General configuration --------------------------------------------------- From 0d7ecd9fb57477538e5b10fd82c1f33aa5bd0e2d Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 7 Aug 2023 16:27:54 +0300 Subject: [PATCH 57/83] Change author to NatLibFi, to align with other metadata --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 5d6d31936..ca5480876 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,7 +26,7 @@ + "(The National Library Of Finland)" ) -author = "Osma Suominen" +author = "National Library Of Finland" # Get version number from GitHub tag release = re.sub("^v", "", os.popen("git describe --tags").read().strip()) From 2709b55b0e5de517fa502c91caebb370d8ec3d94 Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Thu, 10 Aug 2023 16:33:09 +0300 Subject: [PATCH 58/83] Increase maximum startup time threshold (0.3 -> 0.4 s) --- tests/time-startup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/time-startup.sh b/tests/time-startup.sh index dabb56134..9204c4cf2 100755 --- a/tests/time-startup.sh +++ b/tests/time-startup.sh @@ -18,7 +18,7 @@ average_startup_time=$(echo "scale=3; ($startup_time1 + $startup_time2 + $startu echo "Average Startup time: $average_startup_time seconds" # Set the threshold for acceptable startup time in seconds -threshold=0.300 +threshold=0.400 # Compare the average startup time with the threshold if (( $(echo "$average_startup_time > $threshold" | bc -l) )); then From 82240e4ebac9172111d633d69324b11674368193 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:26:52 +0300 Subject: [PATCH 59/83] Pin to Flask 2.2.* --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 008b388e8..2c03b2e9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,8 +30,8 @@ classifiers=[ [tool.poetry.dependencies] python = ">=3.8,<3.11" -connexion = {version = "2.14.*", extras = ["swagger-ui"]} -flask = ">=1.0.4,<3" +connexion = {version = "2.14.2", extras = ["swagger-ui"]} +flask = "2.2.*" flask-cors = "3.0.*" click = "8.1.*" click-log = "0.4.*" From f0ac9187b5e6857dc9cb3e31faacde32d9d5e743 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:49:59 +0300 Subject: [PATCH 60/83] Upgrade to Flask-cors 4.0.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2c03b2e9f..118fe8015 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ python = ">=3.8,<3.11" connexion = {version = "2.14.2", extras = ["swagger-ui"]} flask = "2.2.*" -flask-cors = "3.0.*" +flask-cors = "4.0.*" click = "8.1.*" click-log = "0.4.*" joblib = "1.2.*" From b7aeac42f144012dd1e90e0fd4b8f64a0096b670 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:50:57 +0300 Subject: [PATCH 61/83] Upgrade to gunicorn 21.2.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 118fe8015..d9fdc254f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,7 +41,7 @@ gensim = "4.3.*" scikit-learn = "1.2.2" scipy = "1.10.*" rdflib = ">=4.2,<7.0" -gunicorn = "20.1.*" +gunicorn = "21.2.*" numpy = "1.24.*" optuna = "2.10.*" python-dateutil = "2.8.*" From c13b11c04508cd06cca80a2a1edc73947f678600 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:53:33 +0300 Subject: [PATCH 62/83] Upgrade to joblib 1.3.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d9fdc254f..fa39d7457 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ flask = "2.2.*" flask-cors = "4.0.*" click = "8.1.*" click-log = "0.4.*" -joblib = "1.2.*" +joblib = "1.3.*" nltk = "3.8.*" gensim = "4.3.*" scikit-learn = "1.2.2" From 5cecc47edc00bd9bbc52f5b7e72920538056645e Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 12:02:36 +0300 Subject: [PATCH 63/83] Upgrade to scikit-learn 1.3.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fa39d7457..fd7ed7cfc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ click-log = "0.4.*" joblib = "1.3.*" nltk = "3.8.*" gensim = "4.3.*" -scikit-learn = "1.2.2" +scikit-learn = "1.3.*" scipy = "1.10.*" rdflib = ">=4.2,<7.0" gunicorn = "21.2.*" From 4b5d65935356fd1ce4bd2a3c18b748a1d28e9cc5 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 12:08:44 +0300 Subject: [PATCH 64/83] Upgrade to spacy 3.6.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fd7ed7cfc..fb3b4bee5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ tensorflow-cpu = {version = "2.11.*", optional = true} lmdb = {version = "1.4.1", optional = true} omikuji = {version = "0.5.*", optional = true} yake = {version = "0.4.5", optional = true} -spacy = {version = "3.5.*", optional = true} +spacy = {version = "3.6.*", optional = true} stwfsapy = {version="0.3.*", optional = true} [tool.poetry.dev-dependencies] From 0f76e466ecaca606bfc22837249e0cf7456989e2 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 12:12:03 +0300 Subject: [PATCH 65/83] Upgrade to tensorflow-cpu 2.13.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fb3b4bee5..5f3085631 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ jsonschema = "4.17.*" fasttext-wheel = {version = "0.9.2", optional = true} voikko = {version = "0.5.*", optional = true} -tensorflow-cpu = {version = "2.11.*", optional = true} +tensorflow-cpu = {version = "2.13.*", optional = true} lmdb = {version = "1.4.1", optional = true} omikuji = {version = "0.5.*", optional = true} yake = {version = "0.4.5", optional = true} From 39e783a89eb46f15eeebca60b5f6f02940d3edd7 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Mon, 14 Aug 2023 14:39:40 +0300 Subject: [PATCH 66/83] Pin to rdflib 6.3.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5f3085631..f9ac5b8b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ nltk = "3.8.*" gensim = "4.3.*" scikit-learn = "1.3.*" scipy = "1.10.*" -rdflib = ">=4.2,<7.0" +rdflib = "6.3.*" gunicorn = "21.2.*" numpy = "1.24.*" optuna = "2.10.*" From 8d03a9173c5426b2577a7f3272ed943a9781594c Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 15 Aug 2023 09:44:57 +0300 Subject: [PATCH 67/83] Upgrade to optuna 3.3.* --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f9ac5b8b6..76f2bdf69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ scipy = "1.10.*" rdflib = "6.3.*" gunicorn = "21.2.*" numpy = "1.24.*" -optuna = "2.10.*" +optuna = "3.3.*" python-dateutil = "2.8.*" tomli = "2.0.*" simplemma = "0.9.*" From cc0bcd82e575430055d85b9ed72d1dccbc72135c Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 15 Aug 2023 09:47:58 +0300 Subject: [PATCH 68/83] Switch to use optuna method trial.suggest_float() Old method trial.suggest_uniform() is deprecated --- annif/backend/ensemble.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/backend/ensemble.py b/annif/backend/ensemble.py index 6f7f2eb04..9953c9e6c 100644 --- a/annif/backend/ensemble.py +++ b/annif/backend/ensemble.py @@ -123,7 +123,7 @@ def _format_cfg_line(self, hps: dict[str, float]) -> str: def _objective(self, trial: Trial) -> float: eval_batch = annif.eval.EvaluationBatch(self._backend.project.subjects) proj_weights = { - project_id: trial.suggest_uniform(project_id, 0.0, 1.0) + project_id: trial.suggest_float(project_id, 0.0, 1.0) for project_id in self._sources } for gold_batch, src_batches in zip(self._gold_batches, self._source_batches): From dd7166a33ef3744bc875987b3cbce2b97bf1f348 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 15 Aug 2023 15:14:33 +0300 Subject: [PATCH 69/83] Install all optional dependencies except Omikuji on Python 3.11 --- .github/workflows/cicd.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 57ada68c7..530ee0d88 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -98,7 +98,9 @@ jobs: fi # For Python 3.11: if [[ ${{ matrix.python-version }} == '3.11' ]]; then - poetry install --all-extras; + poetry install -E "nn fasttext yake stwfsa voikko spacy"; + # download the small English pretrained spaCy model needed by spacy analyzer + poetry run python -m spacy download en_core_web_sm --upgrade-strategy only-if-needed fi poetry run python -m nltk.downloader punkt - name: Test with pytest From f928844bb53fdd7b25a6a6431d3cc033f3d1f282 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 16 Aug 2023 09:20:56 +0300 Subject: [PATCH 70/83] Make CI/CD job on Python 3.11 not-experimental --- .github/workflows/cicd.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 530ee0d88..188a1c8cd 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -57,14 +57,9 @@ jobs: test: runs-on: ubuntu-22.04 timeout-minutes: 15 - continue-on-error: ${{ matrix.experimental }} strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] - experimental: [false] - include: - - python-version: "3.11" - experimental: true + python-version: ["3.8", "3.9", "3.10", "3.11"] name: test on Python ${{ matrix.python-version }} steps: - uses: actions/checkout@v3 From e037b78d351f650b3b224159c9e58b22b7c2e8c1 Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Wed, 16 Aug 2023 10:00:14 +0300 Subject: [PATCH 71/83] fix scikit-learn FutureWarning about LinearSVC dual parameter --- annif/backend/svc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/backend/svc.py b/annif/backend/svc.py index e2f6c33a8..30fb23eb4 100644 --- a/annif/backend/svc.py +++ b/annif/backend/svc.py @@ -67,7 +67,7 @@ def _corpus_to_texts_and_classes( def _train_classifier(self, veccorpus: csr_matrix, classes: list[int]) -> None: self.info("creating classifier") - self._model = LinearSVC() + self._model = LinearSVC(dual="auto") self._model.fit(veccorpus, classes) annif.util.atomic_save( self._model, self.datadir, self.MODEL_FILE, method=joblib.dump From f6434d72305b1e87de6a7925f532557ced3b3c26 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 16 Aug 2023 10:15:35 +0300 Subject: [PATCH 72/83] Register "slow" test marker to avoid PytestUnknownMarkWarning (#728) --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 008b388e8..9b8c37414 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,4 +93,7 @@ line_length = "88" skip_gitignore = true [tool.pytest.ini_options] +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", +] addopts = "-m 'not slow'" From 183dc782a2ce3574a54f364d132405530b611603 Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Wed, 16 Aug 2023 10:16:48 +0300 Subject: [PATCH 73/83] fix scikit-learn UserWarning for vectorizer parameter token_pattern --- annif/backend/mixins.py | 3 +++ annif/lexical/mllm.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/annif/backend/mixins.py b/annif/backend/mixins.py index 066d5d862..348a58c1c 100644 --- a/annif/backend/mixins.py +++ b/annif/backend/mixins.py @@ -75,6 +75,9 @@ def create_vectorizer( self, input: Iterable[str], params: dict[str, Any] = {} ) -> csr_matrix: self.info("creating vectorizer") + # avoid UserWarning when overriding tokenizer + if "tokenizer" in params: + params["token_pattern"] = None self.vectorizer = TfidfVectorizer(**params) veccorpus = self.vectorizer.fit_transform(input) annif.util.atomic_save( diff --git a/annif/lexical/mllm.py b/annif/lexical/mllm.py index 37564a76d..ff8bc5894 100644 --- a/annif/lexical/mllm.py +++ b/annif/lexical/mllm.py @@ -223,7 +223,7 @@ def _prepare_train_index( self._prepare_relations(graph, vocab) self._vectorizer = CountVectorizer( - binary=True, tokenizer=analyzer.tokenize_words + binary=True, tokenizer=analyzer.tokenize_words, token_pattern=None ) label_corpus = self._vectorizer.fit_transform((t.label for t in terms)) From a84e46629a7ebfc6a0d5de14e7c50e8c8d0c2af2 Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Wed, 16 Aug 2023 10:23:52 +0300 Subject: [PATCH 74/83] fix code smell --- annif/backend/mixins.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/annif/backend/mixins.py b/annif/backend/mixins.py index 348a58c1c..2bb094641 100644 --- a/annif/backend/mixins.py +++ b/annif/backend/mixins.py @@ -72,9 +72,11 @@ def initialize_vectorizer(self) -> None: ) def create_vectorizer( - self, input: Iterable[str], params: dict[str, Any] = {} + self, input: Iterable[str], params: dict[str, Any] = None ) -> csr_matrix: self.info("creating vectorizer") + if params is None: + params = {} # avoid UserWarning when overriding tokenizer if "tokenizer" in params: params["token_pattern"] = None From 147d97a552bccdb71ab8aa2c0e921970c7c71ea8 Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Wed, 16 Aug 2023 11:46:49 +0300 Subject: [PATCH 75/83] modernize Keras imports --- annif/backend/nn_ensemble.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/annif/backend/nn_ensemble.py b/annif/backend/nn_ensemble.py index 169eb8234..7a20beeb8 100644 --- a/annif/backend/nn_ensemble.py +++ b/annif/backend/nn_ensemble.py @@ -8,13 +8,14 @@ from typing import TYPE_CHECKING, Any import joblib +import keras.backend as K import lmdb import numpy as np -import tensorflow.keras.backend as K +from keras.layers import Add, Dense, Dropout, Flatten, Input, Layer +from keras.models import Model +from keras.saving import load_model +from keras.utils import Sequence from scipy.sparse import csc_matrix, csr_matrix -from tensorflow.keras.layers import Add, Dense, Dropout, Flatten, Input, Layer -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.utils import Sequence import annif.corpus import annif.parallel From 1c8bc48b90d88dfaa5f74908ddefae9c4c16615b Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Wed, 16 Aug 2023 11:15:05 +0300 Subject: [PATCH 76/83] Switch to Keras v3 format for nn_ensemble --- annif/backend/nn_ensemble.py | 2 +- tests/test_backend_nn_ensemble.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/annif/backend/nn_ensemble.py b/annif/backend/nn_ensemble.py index 7a20beeb8..d4bc09058 100644 --- a/annif/backend/nn_ensemble.py +++ b/annif/backend/nn_ensemble.py @@ -98,7 +98,7 @@ class NNEnsembleBackend(backend.AnnifLearningBackend, ensemble.BaseEnsembleBacke name = "nn_ensemble" - MODEL_FILE = "nn-model.h5" + MODEL_FILE = "nn-model.keras" LMDB_FILE = "nn-train.mdb" DEFAULT_PARAMETERS = { diff --git a/tests/test_backend_nn_ensemble.py b/tests/test_backend_nn_ensemble.py index 1941e8665..b6e9c1ea7 100644 --- a/tests/test_backend_nn_ensemble.py +++ b/tests/test_backend_nn_ensemble.py @@ -105,11 +105,11 @@ def test_nn_ensemble_train_and_learn(registry, tmpdir): assert nn_ensemble._model.optimizer.learning_rate.value() == 0.001 datadir = py.path.local(project.datadir) - assert datadir.join("nn-model.h5").exists() - assert datadir.join("nn-model.h5").size() > 0 + assert datadir.join("nn-model.keras").exists() + assert datadir.join("nn-model.keras").size() > 0 # test online learning - modelfile = datadir.join("nn-model.h5") + modelfile = datadir.join("nn-model.keras") old_size = modelfile.size() old_mtime = modelfile.mtime() @@ -129,7 +129,7 @@ def test_nn_ensemble_train_cached(registry): datadir = py.path.local(project.datadir) assert datadir.join("nn-train.mdb").exists() - datadir.join("nn-model.h5").remove() + datadir.join("nn-model.keras").remove() nn_ensemble_type = annif.backend.get_backend("nn_ensemble") nn_ensemble = nn_ensemble_type( @@ -140,8 +140,8 @@ def test_nn_ensemble_train_cached(registry): nn_ensemble.train("cached") - assert datadir.join("nn-model.h5").exists() - assert datadir.join("nn-model.h5").size() > 0 + assert datadir.join("nn-model.keras").exists() + assert datadir.join("nn-model.keras").size() > 0 def test_nn_ensemble_train_and_learn_params(registry, tmpdir, capfd): From a5463073746343db84079cf90269894a62b44448 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Wed, 16 Aug 2023 13:25:49 +0300 Subject: [PATCH 77/83] Upgrade Docker baseimage to Debian Bookworm (#731) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index dbd98cb07..52198a69f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10-slim-bullseye +FROM python:3.10-slim-bookworm LABEL org.opencontainers.image.authors="grp-natlibfi-annif@helsinki.fi" SHELL ["/bin/bash", "-c"] From ce63051bae6629ca37b5a96a0095401db15a6402 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 18 Aug 2023 12:37:33 +0300 Subject: [PATCH 78/83] Update release-date --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index 71a2bd3fd..7b00d3db8 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -37,7 +37,7 @@ version: 1.0.0-dev license: - Apache-2.0 - GPL-3.0 -date-released: 2023-04-18 +date-released: 2023-08-18 doi: 10.5281/zenodo.2578948 repository-code: "https://github.com/NatLibFi/Annif" contact: From 915d5db805163d9b8dffe01bee48bd835bb87f79 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 18 Aug 2023 12:38:44 +0300 Subject: [PATCH 79/83] =?UTF-8?q?Bump=20version:=201.0.0-dev=20=E2=86=92?= =?UTF-8?q?=201.0.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CITATION.cff | 2 +- pyproject.toml | 2 +- setup.cfg | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index 7b00d3db8..da6508d0d 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -33,7 +33,7 @@ authors: affiliation: "National Library of Finland" title: "Annif" abstract: "Annif is an automatic indexing software." -version: 1.0.0-dev +version: 1.0.0 license: - Apache-2.0 - GPL-3.0 diff --git a/pyproject.toml b/pyproject.toml index f712f7986..121c47eaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "annif" -version = "1.0.0-dev" +version = "1.0.0" description = "Automated subject indexing and classification tool" authors = ["National Library of Finland "] maintainers = [ diff --git a/setup.cfg b/setup.cfg index bf3f116d6..d440b931c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,9 +1,9 @@ [bumpversion] -current_version = 1.0.0-dev +current_version = 1.0.0 commit = True tag = True parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+))? -serialize = +serialize = {major}.{minor}.{patch}-{release} {major}.{minor}.{patch} @@ -13,7 +13,7 @@ serialize = [bumpversion:part:release] optional_value = prod -values = +values = dev prod @@ -25,5 +25,5 @@ max-line-length = 88 ignore = E203 W503 [coverage:report] -exclude_also = - if TYPE_CHECKING: +exclude_also = + if TYPE_CHECKING: From fd8561ddf938baa71f892d5732791e55abbc307f Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Fri, 18 Aug 2023 13:02:32 +0300 Subject: [PATCH 80/83] =?UTF-8?q?Bump=20version:=201.0.0=20=E2=86=92=201.1?= =?UTF-8?q?.0-dev?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CITATION.cff | 2 +- pyproject.toml | 2 +- setup.cfg | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index da6508d0d..4fa99d211 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -33,7 +33,7 @@ authors: affiliation: "National Library of Finland" title: "Annif" abstract: "Annif is an automatic indexing software." -version: 1.0.0 +version: 1.1.0-dev license: - Apache-2.0 - GPL-3.0 diff --git a/pyproject.toml b/pyproject.toml index 121c47eaf..059af591a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "annif" -version = "1.0.0" +version = "1.1.0-dev" description = "Automated subject indexing and classification tool" authors = ["National Library of Finland "] maintainers = [ diff --git a/setup.cfg b/setup.cfg index d440b931c..edb7c0ada 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.0.0 +current_version = 1.1.0-dev commit = True tag = True parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+))? From 3ea90300a7849c83f540304cd780a187b702b635 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 22 Aug 2023 13:10:11 +0300 Subject: [PATCH 81/83] Rename function to follow convention --- annif/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/cli.py b/annif/cli.py index ebed088ba..73f18f02e 100644 --- a/annif/cli.py +++ b/annif/cli.py @@ -587,7 +587,7 @@ def run_hyperopt(project_id, paths, docs_limit, trials, jobs, metric, results_fi @click.option("--bash", "shell", flag_value="bash") @click.option("--zsh", "shell", flag_value="zsh") @click.option("--fish", "shell", flag_value="fish") -def completion(shell): +def run_completion(shell): """Generate the script for tab-key autocompletion for the given shell. To enable the completion support in your current bash terminal session run\n source <(annif completion --bash) From 4bcbf288523cd9e4cc00a588834265c70b7076a2 Mon Sep 17 00:00:00 2001 From: Juho Inkinen <34240031+juhoinkinen@users.noreply.github.com> Date: Tue, 22 Aug 2023 13:11:39 +0300 Subject: [PATCH 82/83] Add completion command to commands list in RTD --- docs/source/commands.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/source/commands.rst b/docs/source/commands.rst index 65cca5e9d..849f6aadf 100644 --- a/docs/source/commands.rst +++ b/docs/source/commands.rst @@ -127,3 +127,14 @@ Subject index administration **REST equivalent** N/A + +***** +Other +***** + +.. click:: annif.cli:run_completion + :prog: annif completion + +**REST equivalent** + + N/A From a5182d1203f730b1118dd2cb83ac9091a3b1a32a Mon Sep 17 00:00:00 2001 From: Osma Suominen Date: Fri, 22 Sep 2023 15:55:46 +0300 Subject: [PATCH 83/83] optimization: load a vocabulary only once even if used in different languages --- annif/registry.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/annif/registry.py b/annif/registry.py index 81bd541ef..a97dc366e 100644 --- a/annif/registry.py +++ b/annif/registry.py @@ -99,14 +99,11 @@ def get_vocab( vocab_id = match.group(1) posargs, kwargs = parse_args(match.group(3)) language = posargs[0] if posargs else default_language - vocab_key = (vocab_id, language) self._init_vars() - if vocab_key not in self._vocabs[self._rid]: - self._vocabs[self._rid][vocab_key] = AnnifVocabulary( - vocab_id, self._datadir - ) - return self._vocabs[self._rid][vocab_key], language + if vocab_id not in self._vocabs[self._rid]: + self._vocabs[self._rid][vocab_id] = AnnifVocabulary(vocab_id, self._datadir) + return self._vocabs[self._rid][vocab_id], language def initialize_projects(app: Flask) -> None: