diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 50c79d6f..4aaa2324 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: @@ -35,7 +35,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: @@ -67,7 +67,7 @@ jobs: timeout-minutes: 10 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 @@ -94,7 +94,7 @@ jobs: timeout-minutes: 20 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v4 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d23d622c..9fe57ae8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -11,15 +11,13 @@ concurrency: defaults: run: - shell: - bash -eux {0} - + shell: bash -eux {0} jobs: check_release: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyter-server/jupyter_releaser/.github/actions/check-release@v2 with: @@ -28,7 +26,7 @@ jobs: check_links: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1 @@ -40,18 +38,18 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.8", "3.11"] + python-version: ["3.8", "3.12"] include: - os: windows-latest - python-version: "3.9" + python-version: "3.11" - os: ubuntu-latest - python-version: "pypy-3.8" + python-version: "pypy-3.9" - os: ubuntu-latest python-version: "3.10" - os: macos-latest - python-version: "3.8" + python-version: "3.9" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Run the tests if: ${{ !startsWith(matrix.python-version, 'pypy') && !startsWith(matrix.os, 'windows') }} @@ -72,7 +70,7 @@ jobs: needs: - test steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/report-coverage@v1 with: fail_under: 78 @@ -80,7 +78,7 @@ jobs: docs: runs-on: windows-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Build API docs run: | @@ -97,12 +95,12 @@ jobs: name: Test Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - - name: Run Linters - run: | + - name: Run Linters + run: | hatch run typing:test - hatch run lint:style + hatch run lint:build pipx run interrogate -v . pipx run doc8 --max-line-length=200 --ignore-path=docs/source/other/full-config.rst @@ -111,7 +109,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: dependency_type: minimum @@ -124,7 +122,7 @@ jobs: timeout-minutes: 10 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: dependency_type: pre @@ -137,7 +135,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/make-sdist@v1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 77e547ad..9d3580ed 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,9 +1,10 @@ ci: autoupdate_schedule: monthly + autoupdate_commit_msg: "chore: update pre-commit hooks" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-case-conflict - id: check-ast @@ -20,22 +21,61 @@ repos: - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.23.3 + rev: 0.27.1 hooks: - id: check-github-workflows - repo: https://github.com/executablebooks/mdformat - rev: 0.7.16 + rev: 0.7.17 hooks: - id: mdformat - - repo: https://github.com/psf/black - rev: 23.7.0 + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.0.3" hooks: - - id: black + - id: prettier + types_or: [yaml, html, json] + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: "v1.6.1" + hooks: + - id: mypy + files: jupyter_client + stages: [manual] + args: ["--install-types", "--non-interactive"] + additional_dependencies: + ["traitlets>=5.13", "ipykernel>=6.26", "jupyter_core>=5.3.2"] + + - repo: https://github.com/adamchainz/blacken-docs + rev: "1.16.0" + hooks: + - id: blacken-docs + additional_dependencies: [black==23.7.0] + + - repo: https://github.com/codespell-project/codespell + rev: "v2.2.6" + hooks: + - id: codespell + args: ["-L", "sur,nd"] + + - repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.281 + rev: v0.1.5 hooks: - id: ruff - args: ["--fix"] + types_or: [python, jupyter] + args: ["--fix", "--show-fixes"] + - id: ruff-format + types_or: [python, jupyter] + + - repo: https://github.com/scientific-python/cookie + rev: "2023.10.27" + hooks: + - id: sp-repo-review + additional_dependencies: ["repo-review[cli]"] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 80c6aa44..284ae364 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -9,4 +9,4 @@ python: build: os: ubuntu-22.04 tools: - python: "3.11" + python: "3.11" diff --git a/CHANGELOG.md b/CHANGELOG.md index 678bd9c0..58920aad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,68 @@ +## 8.6.0 + +([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.5.0...2d6f52bdf5266688c51f3270cd7e47bbd17c708c)) + +### Bugs fixed + +- Fix possibly not defined tracker [#991](https://github.com/jupyter/jupyter_client/pull/991) ([@davidbrochart](https://github.com/davidbrochart)) +- BUG: Fix Kwarg only in update_env [#989](https://github.com/jupyter/jupyter_client/pull/989) ([@Carreau](https://github.com/Carreau)) + +### Maintenance and upkeep improvements + +- Update typing for traitlets 5.13 [#995](https://github.com/jupyter/jupyter_client/pull/995) ([@blink1073](https://github.com/blink1073)) +- Use ruff format [#992](https://github.com/jupyter/jupyter_client/pull/992) ([@blink1073](https://github.com/blink1073)) +- Unused `*args` in `KernelManager`'s `__init__` [#986](https://github.com/jupyter/jupyter_client/pull/986) ([@Carreau](https://github.com/Carreau)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-10-25&to=2023-11-06&type=c)) + +[@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-10-25..2023-11-06&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2023-10-25..2023-11-06&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2023-10-25..2023-11-06&type=Issues) + + + +## 8.5.0 + +([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.4.0...ff94e310c2af3546f2f9384e6b35fd11c3d09a71)) + +### Enhancements made + +- Allow to update kernels env in between restart. [#987](https://github.com/jupyter/jupyter_client/pull/987) ([@Carreau](https://github.com/Carreau)) + +### Maintenance and upkeep improvements + +- Enable strict typing [#984](https://github.com/jupyter/jupyter_client/pull/984) ([@blink1073](https://github.com/blink1073)) +- Update typings for mypy 1.6 [#983](https://github.com/jupyter/jupyter_client/pull/983) ([@blink1073](https://github.com/blink1073)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-10-11&to=2023-10-25&type=c)) + +[@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-10-11..2023-10-25&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2023-10-11..2023-10-25&type=Issues) + +## 8.4.0 + +([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.3.1...74044393230e70134f68e664f2ef19fab92b7774)) + +### Maintenance and upkeep improvements + +- Test on python 3.12 [#978](https://github.com/jupyter/jupyter_client/pull/978) ([@blink1073](https://github.com/blink1073)) +- Update typing for traitlets 5.11 [#977](https://github.com/jupyter/jupyter_client/pull/977) ([@blink1073](https://github.com/blink1073)) +- chore: update pre-commit hooks [#975](https://github.com/jupyter/jupyter_client/pull/975) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- Update typings for traitlets 5.10.1 [#974](https://github.com/jupyter/jupyter_client/pull/974) ([@blink1073](https://github.com/blink1073)) +- Do not use datetime.utcnow() that is deprecated in Python 3.12 [#972](https://github.com/jupyter/jupyter_client/pull/972) ([@ellert](https://github.com/ellert)) +- Use sp-repo-review [#971](https://github.com/jupyter/jupyter_client/pull/971) ([@blink1073](https://github.com/blink1073)) +- Bump actions/checkout from 3 to 4 [#968](https://github.com/jupyter/jupyter_client/pull/968) ([@dependabot](https://github.com/dependabot)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-08-29&to=2023-10-11&type=c)) + +[@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-08-29..2023-10-11&type=Issues) | [@dependabot](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adependabot+updated%3A2023-08-29..2023-10-11&type=Issues) | [@ellert](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aellert+updated%3A2023-08-29..2023-10-11&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-08-29..2023-10-11&type=Issues) + ## 8.3.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.3.0...b4f7d947fae55a4fe59a27df0830a9a78dcd4e12)) @@ -22,15 +84,13 @@ [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-06-23..2023-08-29&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2023-06-23..2023-08-29&type=Issues) | [@jkitchin](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ajkitchin+updated%3A2023-06-23..2023-08-29&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2023-06-23..2023-08-29&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-06-23..2023-08-29&type=Issues) | [@tmaxwell-anthropic](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Atmaxwell-anthropic+updated%3A2023-06-23..2023-08-29&type=Issues) - - ## 8.3.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.2.0...bddb8854a4aa3324e128e0497539e17246fbf630)) ### Enhancements made -- Allow kwargs when writting connection_file [#953](https://github.com/jupyter/jupyter_client/pull/953) ([@fecet](https://github.com/fecet)) +- Allow kwargs when writing connection_file [#953](https://github.com/jupyter/jupyter_client/pull/953) ([@fecet](https://github.com/fecet)) ### Maintenance and upkeep improvements @@ -574,7 +634,7 @@ No merged PRs ### Enhancements made -- Further improvements to pending kernels managment [#732](https://github.com/jupyter/jupyter_client/pull/732) ([@Zsailer](https://github.com/Zsailer)) +- Further improvements to pending kernels management [#732](https://github.com/jupyter/jupyter_client/pull/732) ([@Zsailer](https://github.com/Zsailer)) ### Maintenance and upkeep improvements diff --git a/docs/conf.py b/docs/conf.py index 2b6d823e..198a7921 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,13 @@ # # All configuration values have a default; values that are commented out # serve to show the default. +import logging as pylogging import os import os.path as osp import shutil +from sphinx.util import logging # type:ignore[import-not-found] + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -29,16 +32,26 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'myst_parser', - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'sphinx.ext.napoleon', - 'sphinxcontrib_github_alt', + "myst_parser", + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.napoleon", + "sphinxcontrib_github_alt", "sphinx_autodoc_typehints", ] + +# Workaround for https://github.com/agronholm/sphinx-autodoc-typehints/issues/123 +class FilterForIssue123(pylogging.Filter): + def filter(self, record: pylogging.LogRecord) -> bool: + return not record.getMessage().startswith("Cannot handle as a local function") + + +logging.getLogger("sphinx_autodoc_typehints").logger.addFilter(FilterForIssue123()) +# End of a workaround + try: - import enchant # type:ignore # noqa + import enchant # type:ignore[import-not-found] # noqa extensions += ["sphinxcontrib.spelling"] except ImportError: @@ -47,23 +60,23 @@ myst_enable_extensions = ["html_image"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'jupyter_client' -copyright = '2015, Jupyter Development Team' -author = 'Jupyter Development Team' +project = "jupyter_client" +copyright = "2015, Jupyter Development Team" +author = "Jupyter Development Team" github_project_url = "https://github.com/jupyter/jupyter_client" @@ -73,14 +86,14 @@ # version_ns: dict = {} here = os.path.dirname(__file__) -version_py = os.path.join(here, os.pardir, 'jupyter_client', '_version.py') +version_py = os.path.join(here, os.pardir, "jupyter_client", "_version.py") with open(version_py) as f: - exec(compile(f.read(), version_py, 'exec'), version_ns) # noqa + exec(compile(f.read(), version_py, "exec"), version_ns) # noqa # The short X.Y version. -version = '%i.%i' % version_ns['version_info'][:2] +version = "%i.%i" % version_ns["version_info"][:2] # The full version, including alpha/beta/rc tags. -release = version_ns['__version__'] +release = version_ns["__version__"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -97,7 +110,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -115,7 +128,7 @@ # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -131,12 +144,12 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'pydata_sphinx_theme' +html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -# html_theme_options = {} +html_theme_options = {"navigation_with_keys": False} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] @@ -223,7 +236,7 @@ # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'jupyter_clientdoc' +htmlhelp_basename = "jupyter_clientdoc" # -- Options for LaTeX output --------------------------------------------- @@ -244,10 +257,10 @@ latex_documents = [ ( master_doc, - 'jupyter_client.tex', - 'jupyter\\_client Documentation', - 'Jupyter Development Team', - 'manual', + "jupyter_client.tex", + "jupyter\\_client Documentation", + "Jupyter Development Team", + "manual", ), ] @@ -276,7 +289,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, 'jupyter_client', 'jupyter_client Documentation', [author], 1)] +man_pages = [(master_doc, "jupyter_client", "jupyter_client Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -290,12 +303,12 @@ texinfo_documents = [ ( master_doc, - 'jupyter_client', - 'jupyter_client Documentation', + "jupyter_client", + "jupyter_client Documentation", author, - 'jupyter_client', - 'One line description of project.', - 'Miscellaneous', + "jupyter_client", + "One line description of project.", + "Miscellaneous", ), ] @@ -313,10 +326,10 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'ipython': ('http://ipython.readthedocs.io/en/stable/', None)} +intersphinx_mapping = {"ipython": ("http://ipython.readthedocs.io/en/stable/", None)} -def setup(app): +def setup(app: object) -> None: HERE = osp.abspath(osp.dirname(__file__)) - dest = osp.join(HERE, 'changelog.md') - shutil.copy(osp.join(HERE, '..', 'CHANGELOG.md'), dest) + dest = osp.join(HERE, "changelog.md") + shutil.copy(osp.join(HERE, "..", "CHANGELOG.md"), dest) diff --git a/docs/kernels.rst b/docs/kernels.rst index c8b7060e..60b1edd0 100644 --- a/docs/kernels.rst +++ b/docs/kernels.rst @@ -143,14 +143,14 @@ JSON serialised dictionary containing the following keys and values: characters. - **language**: The name of the language of the kernel. When loading notebooks, if no matching kernelspec key (may differ across machines) - is found, a kernel with a matching `language` will be used. + is found, a kernel with a matching ``language`` will be used. This allows a notebook written on any Python or Julia kernel to be properly associated with the user's Python or Julia kernel, even if they aren't listed under the same name as the author's. - **interrupt_mode** (optional): May be either ``signal`` or ``message`` and specifies how a client is supposed to interrupt cell execution on this kernel, either by sending an interrupt ``signal`` via the operating system's - signalling facilities (e.g. `SIGINT` on POSIX systems), or by sending an + signalling facilities (e.g. ``SIGINT`` on POSIX systems), or by sending an ``interrupt_request`` message on the control channel (see :ref:`msging_interrupt`). If this is not specified the client will default to ``signal`` mode. diff --git a/docs/messaging.rst b/docs/messaging.rst index b0b9c5d7..12ae051f 100644 --- a/docs/messaging.rst +++ b/docs/messaging.rst @@ -96,7 +96,7 @@ A message is composed of five dictionaries. Message Header -------------- -The message `header` contains information about the message, +The message ``header`` contains information about the message, such as unique identifiers for the originating session and the actual message id, the type of message, the version of the Jupyter protocol, and the date the message was created. @@ -109,15 +109,15 @@ so that frontends can label the various messages in a meaningful way. .. sourcecode:: python { - 'msg_id' : str, # typically UUID, must be unique per message - 'session' : str, # typically UUID, should be unique per session - 'username' : str, + "msg_id": str, # typically UUID, must be unique per message + "session": str, # typically UUID, should be unique per session + "username": str, # ISO 8601 timestamp for when the message is created - 'date': str, + "date": str, # All recognized message type strings are listed below. - 'msg_type' : str, + "msg_type": str, # the message protocol version - 'version' : '5.0', + "version": "5.0", } .. note:: @@ -174,7 +174,7 @@ such as outputs to a cell. Metadata -------- -The `metadata` dict contains information about the message that is not part of the content. +The ``metadata`` dict contains information about the message that is not part of the content. This is not often used, but can be an extra location to store information about requests and replies, such as extensions adding information about request or execution context. @@ -271,15 +271,15 @@ Every message is serialized to a sequence of at least six blobs of bytes: .. sourcecode:: python [ - b'u-u-i-d', # zmq identity(ies) - b'', # delimiter - b'baddad42', # HMAC signature - b'{header}', # serialized header dict - b'{parent_header}', # serialized parent header dict - b'{metadata}', # serialized metadata dict - b'{content}', # serialized content dict - b'\xf0\x9f\x90\xb1' # extra raw data buffer(s) - ... + b"u-u-i-d", # zmq identity(ies) + b"", # delimiter + b"baddad42", # HMAC signature + b"{header}", # serialized header dict + b"{parent_header}", # serialized parent header dict + b"{metadata}", # serialized metadata dict + b"{content}", # serialized content dict + b"\xf0\x9f\x90\xb1" # extra raw data buffer(s) + # ... ] The front of the message is the ZeroMQ routing prefix, @@ -304,7 +304,7 @@ By default, the hashing function used for computing these signatures is sha256. .. note:: To disable authentication and signature checking, - set the `key` field of a connection file to an empty string. + set the ``key`` field of a connection file to an empty string. The signature is the HMAC hex digest of the concatenation of: @@ -403,7 +403,7 @@ All reply messages have a ``'status'`` field, which will have one of the followi - ``status='abort'``: This is the same as ``status='error'`` but with no information about the error. - No fields should be present other that `status`. + No fields should be present other that ``status``. As a special case, ``execute_reply`` messages (see :ref:`execution_results`) have an ``execution_count`` field regardless of their status. @@ -565,12 +565,12 @@ and are not included in notebook documents. .. sourcecode:: python { - "source": "page", - # mime-bundle of data to display in the pager. - # Must include text/plain. - "data": mimebundle, - # line offset to start from - "start": int, + "source": "page", + # mime-bundle of data to display in the pager. + # Must include text/plain. + "data": mimebundle, + # line offset to start from + "start": int, } **set_next_input**: create a new output @@ -582,24 +582,24 @@ The main example being ``%load``. .. sourcecode:: python { - "source": "set_next_input", - # the text contents of the cell to create - "text": "some cell content", - # If true, replace the current cell in document UIs instead of inserting - # a cell. Ignored in console UIs. - "replace": bool, + "source": "set_next_input", + # the text contents of the cell to create + "text": "some cell content", + # If true, replace the current cell in document UIs instead of inserting + # a cell. Ignored in console UIs. + "replace": bool, } **edit_magic**: open a file for editing. -Triggered by `%edit`. Only the QtConsole currently supports edit payloads. +Triggered by ``%edit``. Only the QtConsole currently supports edit payloads. .. sourcecode:: python { - "source": "edit_magic", - "filename": "/path/to/file.py", # the file to edit - "line_number": int, # the line number to start with + "source": "edit_magic", + "filename": "/path/to/file.py", # the file to edit + "line_number": int, # the line number to start with } **ask_exit**: instruct the frontend to prompt the user for exit @@ -610,9 +610,9 @@ Only for console frontends. .. sourcecode:: python { - "source": "ask_exit", - # whether the kernel should be left running, only closing the client - "keepkernel": bool, + "source": "ask_exit", + # whether the kernel should be left running, only closing the client + "keepkernel": bool, } @@ -993,7 +993,7 @@ Message type: ``kernel_info_reply``:: }, # A banner of information about the kernel, - # which may be desplayed in console environments. + # which may be displayed in console environments. 'banner': str, # A boolean flag which tells if the kernel supports debugging in the notebook. @@ -1060,7 +1060,7 @@ running and a new kernel process within it would be started. The client sends a shutdown request to the kernel, and once it receives the reply message (which is otherwise empty), it can assume that the kernel has -completed shutdown safely. The request is sent on the `control` channel. +completed shutdown safely. The request is sent on the ``control`` channel. Upon their own shutdown, client applications will typically execute a last minute sanity check and forcefully terminate any kernel that is still alive, to @@ -1099,8 +1099,8 @@ Kernel interrupt In case a kernel can not catch operating system interrupt signals (e.g. the used runtime handles signals and does not allow a user program to define a callback), a kernel can choose to be notified using a message instead. For this to work, -the kernels kernelspec must set `interrupt_mode` to ``message``. An interruption -will then result in the following message on the `control` channel: +the kernels kernelspec must set ``interrupt_mode`` to ``message``. An interruption +will then result in the following message on the ``control`` channel: Message type: ``interrupt_request``:: @@ -1130,9 +1130,11 @@ Message type: ``debug_reply``:: content = {} -The ``content`` dicts of the `debug_request` and `debug_reply` messages respectively follow the specification of the `Request` and `Response` messages from the `Debug Adapter Protocol (DAP) `_ as of version 1.39 or later. +The ``content`` dicts of the ``debug_request`` and ``debug_reply`` messages respectively follow the +specification of the ``Request`` and ``Response`` messages from the +`Debug Adapter Protocol (DAP) `_ as of version 1.39 or later. -Debug requests and replies are sent over the `control` channel to prevent +Debug requests and replies are sent over the ``control`` channel to prevent queuing behind execution requests. Additions to the DAP @@ -1153,7 +1155,7 @@ In order to support the debugging of notebook cells and of Jupyter consoles, which are not based on source files, we need a message to submit code to the debugger to which breakpoints can be added. - Content of the `dumpCell` request:: + Content of the ``dumpCell`` request:: { 'type' : 'request', @@ -1163,7 +1165,7 @@ debugger to which breakpoints can be added. } } - Content of the `dumpCell` response:: + Content of the ``dumpCell`` response:: { 'type' : 'response', @@ -1178,17 +1180,17 @@ debugInfo In order to support page reloading, or a client connecting at a later stage, Jupyter kernels must store the state of the debugger (such as breakpoints, -whether the debugger is currently stopped). The `debugInfo` request is a DAP -`Request` with no extra argument. +whether the debugger is currently stopped). The ``debugInfo`` request is a DAP +``Request`` with no extra argument. - Content of the `debugInfo` request:: + Content of the ``debugInfo`` request:: { 'type' : 'request', 'command' : 'debugInfo' } - Content of the `debugInfo` response:: + Content of the ``debugInfo`` response:: { 'type' : 'response', @@ -1211,23 +1213,23 @@ whether the debugger is currently stopped). The `debugInfo` request is a DAP } } - The `source_breakpoint` schema is specified by the Debug Adapter Protocol. + The ``source_breakpoint`` schema is specified by the Debug Adapter Protocol. inspectVariables ################ -The `inspectVariables` is meant to retrieve the values of all the variables -that have been defined in the kernel. It is a DAP `Request` with no extra +The ``inspectVariables`` is meant to retrieve the values of all the variables +that have been defined in the kernel. It is a DAP ``Request`` with no extra argument. - Content of the `inspectVariables` request:: + Content of the ``inspectVariables`` request:: { 'type' : 'request', 'command' : 'inspectVariables' } - Content of the `inspectVariables` response:: + Content of the ``inspectVariables`` response:: { 'type' : 'response', @@ -1247,10 +1249,10 @@ argument. richInspectVariables #################### -The `richInspectVariables` request allows to get the rich representation of a +The ``richInspectVariables`` request allows to get the rich representation of a variable that has been defined in the kernel. - Content of the `richInspectVariables` request:: + Content of the ``richInspectVariables`` request:: { 'type' : 'request', @@ -1262,13 +1264,13 @@ variable that has been defined in the kernel. } } - Content of the `richInspectVariables` response:: + Content of the ``richInspectVariables`` response:: { 'type' : 'response', 'success' : bool, 'body' : { - # Dictionary of rich reprensentations of the variable + # Dictionary of rich representations of the variable 'data' : dict, 'metadata' : dict } @@ -1277,10 +1279,10 @@ variable that has been defined in the kernel. copyToGlobals ############# -The `copyToGlobals` request allows to copy a variable from the local variable panel -of the debugger to the `global`` scope to inspect it after debug session. +The ``copyToGlobals`` request allows to copy a variable from the local variable panel +of the debugger to the ``global`` scope to inspect it after debug session. - Content of the `copyToGlobals` request:: + Content of the ``copyToGlobals`` request:: { 'type': 'request', @@ -1294,7 +1296,7 @@ of the debugger to the `global`` scope to inspect it after debug session. } } - Content of the `copyToGlobals` response:: + Content of the ``copyToGlobals`` response:: { 'type': 'response', @@ -1593,7 +1595,7 @@ Message type: ``debug_event``:: content = {} -The ``content`` dict follows the specification of the `Event` message from the `Debug Adapter Protocol (DAP) `_. +The ``content`` dict follows the specification of the ``Event`` message from the `Debug Adapter Protocol (DAP) `_. .. versionadded:: 5.5 diff --git a/docs/provisioning.rst b/docs/provisioning.rst index de1eb450..ecf94b49 100644 --- a/docs/provisioning.rst +++ b/docs/provisioning.rst @@ -84,7 +84,7 @@ Kernel provisioner authors implement their provisioners by deriving from :class:`KernelProvisionerBase` and expose their provisioner for consumption via entry-points: -.. code:: python +.. code:: 'jupyter_client.kernel_provisioners': [ 'k8s-provisioner = my_package:K8sProvisioner', @@ -145,14 +145,13 @@ provisioner. If the user is not in the role, an exception will be thrown. .. code:: python class RBACProvisioner(LocalProvisioner): - role: str = Unicode(config=True) async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: - if not self.user_in_role(self.role): - raise PermissionError(f"User is not in role {self.role} and " - f"cannot launch this kernel.") + raise PermissionError( + f"User is not in role {self.role} and " f"cannot launch this kernel." + ) return await super().pre_launch(**kwargs) @@ -197,7 +196,7 @@ implementation of :class:`LocalProvisioner` can also be used as a reference. Notice the internal method ``_get_application_id()``. This method is what the provisioner uses to determine if the YARN application (i.e., -the kernel) is still running within te cluster. Although the provisioner +the kernel) is still running within the cluster. Although the provisioner doesn't dictate the application id, the application id is discovered via the application *name* which is a function of ``kernel_id``. @@ -236,8 +235,8 @@ discovered via the application *name* which is a function of ``kernel_id``. Notice how in some cases we can compose provisioner methods to implement others. For example, since sending a signal number of 0 is tantamount to polling the process, we -go ahead and call :meth:`poll` to handle `signum` of 0 and :meth:`kill` to handle -`SIGKILL` requests. +go ahead and call :meth:`poll` to handle ``signum`` of 0 and :meth:`kill` to handle +``SIGKILL`` requests. Here we see how ``_get_application_id`` uses the ``kernel_id`` to acquire the application id - which is the *primary id* for controlling YARN application lifecycles. Since startup @@ -249,33 +248,37 @@ This answer is implemented in the provisioner via the :meth:`get_shutdown_wait_t .. code:: python def _get_application_id(self, ignore_final_states: bool = False) -> str: - if not self.application_id: app = self._query_app_by_name(self.kernel_id) state_condition = True if type(app) is dict: - state = app.get('state') + state = app.get("state") self.last_known_state = state if ignore_final_states: state_condition = state not in YarnProvisioner.final_states - if len(app.get('id', '')) > 0 and state_condition: - self.application_id = app['id'] - self.log.info(f"ApplicationID: '{app['id']}' assigned for " - f"KernelID: '{self.kernel_id}', state: {state}.") + if len(app.get("id", "")) > 0 and state_condition: + self.application_id = app["id"] + self.log.info( + f"ApplicationID: '{app['id']}' assigned for " + f"KernelID: '{self.kernel_id}', state: {state}." + ) if not self.application_id: - self.log.debug(f"ApplicationID not yet assigned for KernelID: " - f"'{self.kernel_id}' - retrying...") + self.log.debug( + f"ApplicationID not yet assigned for KernelID: " + f"'{self.kernel_id}' - retrying..." + ) return self.application_id def get_shutdown_wait_time(self, recommended: Optional[float] = 5.0) -> float: - if recommended < yarn_shutdown_wait_time: recommended = yarn_shutdown_wait_time - self.log.debug(f"{type(self).__name__} shutdown wait time adjusted to " - f"{recommended} seconds.") + self.log.debug( + f"{type(self).__name__} shutdown wait time adjusted to " + f"{recommended} seconds." + ) return recommended @@ -343,7 +346,7 @@ Listing available kernel provisioners ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To confirm that your custom provisioner is available for use, the ``jupyter kernelspec`` command has been extended to include -a `provisioners` sub-command. As a result, running ``jupyter kernelspec provisioners`` +a ``provisioners`` sub-command. As a result, running ``jupyter kernelspec provisioners`` will list the available provisioners by name followed by their module and object names (colon-separated): diff --git a/docs/wrapperkernels.rst b/docs/wrapperkernels.rst index 00717f44..1dd9fd41 100644 --- a/docs/wrapperkernels.rst +++ b/docs/wrapperkernels.rst @@ -1,7 +1,7 @@ Making simple Python wrapper kernels ==================================== -You can re-use IPython's kernel machinery to easily make new kernels. +You can reuse IPython's kernel machinery to easily make new kernels. This is useful for languages that have Python bindings, such as `Hy `_ (see `Calysto Hy `_), or languages diff --git a/jupyter_client/_version.py b/jupyter_client/_version.py index 88f6713f..a329a5c8 100644 --- a/jupyter_client/_version.py +++ b/jupyter_client/_version.py @@ -2,15 +2,15 @@ import re from typing import List, Union -__version__ = "8.3.1" +__version__ = "8.6.0" # Build up version_info tuple for backwards compatibility -pattern = r'(?P\d+).(?P\d+).(?P\d+)(?P.*)' +pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" match = re.match(pattern, __version__) if match: - parts: List[Union[int, str]] = [int(match[part]) for part in ['major', 'minor', 'patch']] - if match['rest']: - parts.append(match['rest']) + parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]] + if match["rest"]: + parts.append(match["rest"]) else: parts = [] version_info = tuple(parts) diff --git a/jupyter_client/asynchronous/client.py b/jupyter_client/asynchronous/client.py index 8f7b082b..11873416 100644 --- a/jupyter_client/asynchronous/client.py +++ b/jupyter_client/asynchronous/client.py @@ -1,6 +1,9 @@ """Implements an async kernel client""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import typing as t import zmq.asyncio from traitlets import Instance, Type @@ -9,10 +12,10 @@ from ..client import KernelClient, reqrep -def wrapped(meth, channel): +def wrapped(meth: t.Callable, channel: str) -> t.Callable: """Wrap a method on a channel and handle replies.""" - def _(self, *args, **kwargs): + def _(self: AsyncKernelClient, *args: t.Any, **kwargs: t.Any) -> t.Any: reply = kwargs.pop("reply", False) timeout = kwargs.pop("timeout", None) msg_id = meth(self, *args, **kwargs) @@ -48,11 +51,11 @@ def _context_default(self) -> zmq.asyncio.Context: wait_for_ready = KernelClient._async_wait_for_ready # The classes to use for the various channels - shell_channel_class = Type(AsyncZMQSocketChannel) - iopub_channel_class = Type(AsyncZMQSocketChannel) - stdin_channel_class = Type(AsyncZMQSocketChannel) - hb_channel_class = Type(HBChannel) - control_channel_class = Type(AsyncZMQSocketChannel) + shell_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + iopub_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + stdin_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + hb_channel_class = Type(HBChannel) # type:ignore[arg-type] + control_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] _recv_reply = KernelClient._async_recv_reply diff --git a/jupyter_client/blocking/client.py b/jupyter_client/blocking/client.py index a664db06..5c815eb8 100644 --- a/jupyter_client/blocking/client.py +++ b/jupyter_client/blocking/client.py @@ -4,6 +4,10 @@ """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import typing as t + from traitlets import Type from ..channels import HBChannel, ZMQSocketChannel @@ -11,10 +15,10 @@ from ..utils import run_sync -def wrapped(meth, channel): +def wrapped(meth: t.Callable, channel: str) -> t.Callable: """Wrap a method on a channel and handle replies.""" - def _(self, *args, **kwargs): + def _(self: BlockingKernelClient, *args: t.Any, **kwargs: t.Any) -> t.Any: reply = kwargs.pop("reply", False) timeout = kwargs.pop("timeout", None) msg_id = meth(self, *args, **kwargs) @@ -44,11 +48,11 @@ class BlockingKernelClient(KernelClient): wait_for_ready = run_sync(KernelClient._async_wait_for_ready) # The classes to use for the various channels - shell_channel_class = Type(ZMQSocketChannel) - iopub_channel_class = Type(ZMQSocketChannel) - stdin_channel_class = Type(ZMQSocketChannel) - hb_channel_class = Type(HBChannel) - control_channel_class = Type(ZMQSocketChannel) + shell_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + iopub_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + stdin_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + hb_channel_class = Type(HBChannel) # type:ignore[arg-type] + control_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] _recv_reply = run_sync(KernelClient._async_recv_reply) diff --git a/jupyter_client/channels.py b/jupyter_client/channels.py index 123055b0..c645b134 100644 --- a/jupyter_client/channels.py +++ b/jupyter_client/channels.py @@ -54,7 +54,7 @@ def __init__( context: t.Optional[zmq.Context] = None, session: t.Optional[Session] = None, address: t.Union[t.Tuple[str, int], str] = "", - ): + ) -> None: """Create the heartbeat monitor thread. Parameters @@ -98,7 +98,7 @@ def _notice_exit() -> None: def _create_socket(self) -> None: if self.socket is not None: # close previous socket, before opening a new one - self.poller.unregister(self.socket) + self.poller.unregister(self.socket) # type:ignore[unreachable] self.socket.close() assert self.context is not None self.socket = self.context.socket(zmq.REQ) @@ -290,7 +290,7 @@ def __init__(self, socket: zmq.asyncio.Socket, session: Session, loop: t.Any = N Unused here, for other implementations """ if not isinstance(socket, zmq.asyncio.Socket): - msg = 'Socket must be asyncio' + msg = "Socket must be asyncio" # type:ignore[unreachable] raise ValueError(msg) super().__init__(socket, session) diff --git a/jupyter_client/channelsabc.py b/jupyter_client/channelsabc.py index 1bfe7922..af053dfa 100644 --- a/jupyter_client/channelsabc.py +++ b/jupyter_client/channelsabc.py @@ -8,17 +8,17 @@ class ChannelABC(metaclass=abc.ABCMeta): """A base class for all channel ABCs.""" @abc.abstractmethod - def start(self): + def start(self) -> None: """Start the channel.""" pass @abc.abstractmethod - def stop(self): + def stop(self) -> None: """Stop the channel.""" pass @abc.abstractmethod - def is_alive(self): + def is_alive(self) -> bool: """Test whether the channel is alive.""" pass @@ -32,20 +32,20 @@ class HBChannelABC(ChannelABC): """ @abc.abstractproperty - def time_to_dead(self): + def time_to_dead(self) -> float: pass @abc.abstractmethod - def pause(self): + def pause(self) -> None: """Pause the heartbeat channel.""" pass @abc.abstractmethod - def unpause(self): + def unpause(self) -> None: """Unpause the heartbeat channel.""" pass @abc.abstractmethod - def is_beating(self): + def is_beating(self) -> bool: """Test whether the channel is beating.""" pass diff --git a/jupyter_client/client.py b/jupyter_client/client.py index 20115fd2..aa353ac2 100644 --- a/jupyter_client/client.py +++ b/jupyter_client/client.py @@ -113,9 +113,13 @@ def _context_default(self) -> zmq.Context: # flag for whether execute requests should be allowed to call raw_input: allow_stdin: bool = True - def __del__(self): + def __del__(self) -> None: """Handle garbage collection. Destroy context if applicable.""" - if self._created_context and self.context and not self.context.closed: + if ( + self._created_context + and self.context is not None # type:ignore[redundant-expr] + and not self.context.closed + ): if self.channels_running: if self.log: self.log.warning("Could not destroy zmq context for %s", self) @@ -349,7 +353,9 @@ def shell_channel(self) -> t.Any: url = self._make_url("shell") self.log.debug("connecting shell channel to %s", url) socket = self.connect_shell(identity=self.session.bsession) - self._shell_channel = self.shell_channel_class(socket, self.session, self.ioloop) + self._shell_channel = self.shell_channel_class( # type:ignore[call-arg,abstract] + socket, self.session, self.ioloop + ) return self._shell_channel @property @@ -359,7 +365,9 @@ def iopub_channel(self) -> t.Any: url = self._make_url("iopub") self.log.debug("connecting iopub channel to %s", url) socket = self.connect_iopub() - self._iopub_channel = self.iopub_channel_class(socket, self.session, self.ioloop) + self._iopub_channel = self.iopub_channel_class( # type:ignore[call-arg,abstract] + socket, self.session, self.ioloop + ) return self._iopub_channel @property @@ -369,7 +377,9 @@ def stdin_channel(self) -> t.Any: url = self._make_url("stdin") self.log.debug("connecting stdin channel to %s", url) socket = self.connect_stdin(identity=self.session.bsession) - self._stdin_channel = self.stdin_channel_class(socket, self.session, self.ioloop) + self._stdin_channel = self.stdin_channel_class( # type:ignore[call-arg,abstract] + socket, self.session, self.ioloop + ) return self._stdin_channel @property @@ -378,7 +388,9 @@ def hb_channel(self) -> t.Any: if self._hb_channel is None: url = self._make_url("hb") self.log.debug("connecting heartbeat channel to %s", url) - self._hb_channel = self.hb_channel_class(self.context, self.session, url) + self._hb_channel = self.hb_channel_class( # type:ignore[call-arg,abstract] + self.context, self.session, url + ) return self._hb_channel @property @@ -388,7 +400,9 @@ def control_channel(self) -> t.Any: url = self._make_url("control") self.log.debug("connecting control channel to %s", url) socket = self.connect_control(identity=self.session.bsession) - self._control_channel = self.control_channel_class(socket, self.session, self.ioloop) + self._control_channel = self.control_channel_class( # type:ignore[call-arg,abstract] + socket, self.session, self.ioloop + ) return self._control_channel async def _async_is_alive(self) -> bool: @@ -497,7 +511,7 @@ async def _async_execute_interactive( if output_hook is None and "IPython" in sys.modules: from IPython import get_ipython - ip = get_ipython() + ip = get_ipython() # type:ignore[no-untyped-call] in_kernel = getattr(ip, "kernel", False) if in_kernel: output_hook = partial( diff --git a/jupyter_client/clientabc.py b/jupyter_client/clientabc.py index 3623b833..d003fe17 100644 --- a/jupyter_client/clientabc.py +++ b/jupyter_client/clientabc.py @@ -8,7 +8,13 @@ # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- +from __future__ import annotations + import abc +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from .channelsabc import ChannelABC # ----------------------------------------------------------------------------- # Main kernel client class @@ -24,27 +30,27 @@ class KernelClientABC(metaclass=abc.ABCMeta): """ @abc.abstractproperty - def kernel(self): + def kernel(self) -> Any: pass @abc.abstractproperty - def shell_channel_class(self): + def shell_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty - def iopub_channel_class(self): + def iopub_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty - def hb_channel_class(self): + def hb_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty - def stdin_channel_class(self): + def stdin_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty - def control_channel_class(self): + def control_channel_class(self) -> type[ChannelABC]: pass # -------------------------------------------------------------------------- @@ -52,36 +58,43 @@ def control_channel_class(self): # -------------------------------------------------------------------------- @abc.abstractmethod - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): + def start_channels( + self, + shell: bool = True, + iopub: bool = True, + stdin: bool = True, + hb: bool = True, + control: bool = True, + ) -> None: """Start the channels for the client.""" pass @abc.abstractmethod - def stop_channels(self): + def stop_channels(self) -> None: """Stop the channels for the client.""" pass @abc.abstractproperty - def channels_running(self): + def channels_running(self) -> bool: """Get whether the channels are running.""" pass @abc.abstractproperty - def shell_channel(self): + def shell_channel(self) -> ChannelABC: pass @abc.abstractproperty - def iopub_channel(self): + def iopub_channel(self) -> ChannelABC: pass @abc.abstractproperty - def stdin_channel(self): + def stdin_channel(self) -> ChannelABC: pass @abc.abstractproperty - def hb_channel(self): + def hb_channel(self) -> ChannelABC: pass @abc.abstractproperty - def control_channel(self): + def control_channel(self) -> ChannelABC: pass diff --git a/jupyter_client/connect.py b/jupyter_client/connect.py index 6ebec00c..a634be3d 100644 --- a/jupyter_client/connect.py +++ b/jupyter_client/connect.py @@ -5,6 +5,8 @@ """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import errno import glob import json @@ -14,7 +16,7 @@ import tempfile import warnings from getpass import getpass -from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Dict, Union, cast import zmq from jupyter_core.paths import jupyter_data_dir, jupyter_runtime_dir, secure_write @@ -24,12 +26,17 @@ from .localinterfaces import localhost from .utils import _filefind +if TYPE_CHECKING: + from jupyter_client import BlockingKernelClient + + from .session import Session + # Define custom type for kernel connection info KernelConnectionInfo = Dict[str, Union[int, str, bytes]] def write_connection_file( - fname: Optional[str] = None, + fname: str | None = None, shell_port: int = 0, iopub_port: int = 0, stdin_port: int = 0, @@ -41,7 +48,7 @@ def write_connection_file( signature_scheme: str = "hmac-sha256", kernel_name: str = "", **kwargs: Any, -) -> Tuple[str, KernelConnectionInfo]: +) -> tuple[str, KernelConnectionInfo]: """Generates a JSON config file, including the selection of random ports. Parameters @@ -91,8 +98,8 @@ def write_connection_file( # Find open ports as necessary. - ports: List[int] = [] - sockets: List[socket.socket] = [] + ports: list[int] = [] + sockets: list[socket.socket] = [] ports_needed = ( int(shell_port <= 0) + int(iopub_port <= 0) @@ -169,8 +176,8 @@ def write_connection_file( def find_connection_file( filename: str = "kernel-*.json", - path: Optional[Union[str, List[str]]] = None, - profile: Optional[str] = None, + path: str | list[str] | None = None, + profile: str | None = None, ) -> str: """find a connection file, and return its absolute path. @@ -232,10 +239,10 @@ def find_connection_file( def tunnel_to_kernel( - connection_info: Union[str, KernelConnectionInfo], + connection_info: str | KernelConnectionInfo, sshserver: str, - sshkey: Optional[str] = None, -) -> Tuple[Any, ...]: + sshkey: str | None = None, +) -> tuple[Any, ...]: """tunnel connections to a kernel via ssh This will open five SSH tunnels from localhost on this machine to the @@ -282,7 +289,7 @@ def tunnel_to_kernel( remote_ip = cf["ip"] if tunnel.try_passwordless_ssh(sshserver, sshkey): - password: Union[bool, str] = False + password: bool | str = False else: password = getpass("SSH Password for %s: " % sshserver) @@ -310,9 +317,9 @@ def tunnel_to_kernel( class ConnectionFileMixin(LoggingConfigurable): """Mixin for configurable classes that work with connection files""" - data_dir: Union[str, Unicode] = Unicode() + data_dir: str | Unicode = Unicode() - def _data_dir_default(self): + def _data_dir_default(self) -> str: return jupyter_data_dir() # The addresses for the communication channels @@ -329,7 +336,7 @@ def _data_dir_default(self): _connection_file_written = Bool(False) transport = CaselessStrEnum(["tcp", "ipc"], default_value="tcp", config=True) - kernel_name: Union[str, Unicode] = Unicode() + kernel_name: str | Unicode = Unicode() context = Instance(zmq.Context) @@ -341,7 +348,7 @@ def _data_dir_default(self): to the Kernel, so be careful!""", ) - def _ip_default(self): + def _ip_default(self) -> str: if self.transport == "ipc": if self.connection_file: return os.path.splitext(self.connection_file)[0] + "-ipc" @@ -351,7 +358,7 @@ def _ip_default(self): return localhost() @observe("ip") - def _ip_changed(self, change): + def _ip_changed(self, change: Any) -> None: if change["new"] == "*": self.ip = "0.0.0.0" # noqa @@ -364,16 +371,16 @@ def _ip_changed(self, change): control_port = Integer(0, config=True, help="set the control (ROUTER) port [default: random]") # names of the ports with random assignment - _random_port_names: Optional[List[str]] = None + _random_port_names: list[str] | None = None @property - def ports(self) -> List[int]: + def ports(self) -> list[int]: return [getattr(self, name) for name in port_names] # The Session to use for communication with the kernel. session = Instance("jupyter_client.session.Session") - def _session_default(self): + def _session_default(self) -> Session: from .session import Session return Session(parent=self) @@ -423,10 +430,10 @@ def get_connection_info(self, session: bool = False) -> KernelConnectionInfo: # factory for blocking clients blocking_class = Type(klass=object, default_value="jupyter_client.BlockingKernelClient") - def blocking_client(self): + def blocking_client(self) -> BlockingKernelClient: """Make a blocking client connected to my kernel""" info = self.get_connection_info() - bc = self.blocking_class(parent=self) + bc = self.blocking_class(parent=self) # type:ignore[operator] bc.load_connection_info(info) return bc @@ -511,7 +518,7 @@ def write_connection_file(self, **kwargs: Any) -> None: self._connection_file_written = True - def load_connection_file(self, connection_file: Optional[str] = None) -> None: + def load_connection_file(self, connection_file: str | None = None) -> None: """Load connection info from JSON dict in self.connection_file. Parameters @@ -540,7 +547,7 @@ def load_connection_info(self, info: KernelConnectionInfo) -> None: See the connection_file spec for details. """ self.transport = info.get("transport", self.transport) - self.ip = info.get("ip", self._ip_default()) + self.ip = info.get("ip", self._ip_default()) # type:ignore[assignment] self._record_random_port_names() for name in port_names: @@ -638,7 +645,7 @@ def _make_url(self, channel: str) -> str: return f"{transport}://{ip}-{port}" def _create_connected_socket( - self, channel: str, identity: Optional[bytes] = None + self, channel: str, identity: bytes | None = None ) -> zmq.sugar.socket.Socket: """Create a zmq Socket and connect it to the kernel.""" url = self._make_url(channel) @@ -652,25 +659,25 @@ def _create_connected_socket( sock.connect(url) return sock - def connect_iopub(self, identity: Optional[bytes] = None) -> zmq.sugar.socket.Socket: + def connect_iopub(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the IOPub channel""" sock = self._create_connected_socket("iopub", identity=identity) sock.setsockopt(zmq.SUBSCRIBE, b"") return sock - def connect_shell(self, identity: Optional[bytes] = None) -> zmq.sugar.socket.Socket: + def connect_shell(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the Shell channel""" return self._create_connected_socket("shell", identity=identity) - def connect_stdin(self, identity: Optional[bytes] = None) -> zmq.sugar.socket.Socket: + def connect_stdin(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the StdIn channel""" return self._create_connected_socket("stdin", identity=identity) - def connect_hb(self, identity: Optional[bytes] = None) -> zmq.sugar.socket.Socket: + def connect_hb(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the Heartbeat channel""" return self._create_connected_socket("hb", identity=identity) - def connect_control(self, identity: Optional[bytes] = None) -> zmq.sugar.socket.Socket: + def connect_control(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the Control channel""" return self._create_connected_socket("control", identity=identity) @@ -688,7 +695,7 @@ class is attempting to resolve (minimize). def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) - self.currently_used_ports: Set[int] = set() + self.currently_used_ports: set[int] = set() def find_available_port(self, ip: str) -> int: while True: diff --git a/jupyter_client/consoleapp.py b/jupyter_client/consoleapp.py index 2d06ed07..4e9ad357 100644 --- a/jupyter_client/consoleapp.py +++ b/jupyter_client/consoleapp.py @@ -20,6 +20,7 @@ from . import KernelManager, connect, find_connection_file, tunnel_to_kernel from .blocking import BlockingKernelClient +from .connect import KernelConnectionInfo from .kernelspec import NoSuchKernel from .localinterfaces import localhost from .restarter import KernelRestarter @@ -93,9 +94,7 @@ class JupyterConsoleApp(ConnectionFileMixin): name: t.Union[str, Unicode] = "jupyter-console-mixin" - description: t.Union[ - str, Unicode - ] = """ + description: t.Union[str, Unicode] = """ The Jupyter Console Mixin. This class contains the common portions of console client (QtConsole, @@ -234,7 +233,7 @@ def init_ssh(self) -> None: ip = localhost() # build connection dict for tunnels: - info = { + info: KernelConnectionInfo = { "ip": ip, "shell_port": self.shell_port, "iopub_port": self.iopub_port, @@ -369,7 +368,7 @@ def initialize(self, argv: object = None) -> None: class IPythonConsoleApp(JupyterConsoleApp): """An app to manage an ipython console.""" - def __init__(self, *args, **kwargs): + def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: """Initialize the app.""" warnings.warn("IPythonConsoleApp is deprecated. Use JupyterConsoleApp", stacklevel=2) super().__init__(*args, **kwargs) diff --git a/jupyter_client/ioloop/manager.py b/jupyter_client/ioloop/manager.py index a3f07211..5a6c8aec 100644 --- a/jupyter_client/ioloop/manager.py +++ b/jupyter_client/ioloop/manager.py @@ -12,10 +12,10 @@ from .restarter import AsyncIOLoopKernelRestarter, IOLoopKernelRestarter -def as_zmqstream(f): +def as_zmqstream(f: t.Any) -> t.Callable: """Convert a socket to a zmq stream.""" - def wrapped(self, *args, **kwargs): + def wrapped(self: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: save_socket_class = None # zmqstreams only support sync sockets if self.context._socket_class is not zmq.Socket: @@ -37,7 +37,7 @@ class IOLoopKernelManager(KernelManager): loop = Instance("tornado.ioloop.IOLoop") - def _loop_default(self): + def _loop_default(self) -> ioloop.IOLoop: return ioloop.IOLoop.current() restarter_class = Type( @@ -52,7 +52,7 @@ def _loop_default(self): ) _restarter: t.Any = Instance("jupyter_client.ioloop.IOLoopKernelRestarter", allow_none=True) - def start_restarter(self): + def start_restarter(self) -> None: """Start the restarter.""" if self.autorestart and self.has_kernel: if self._restarter is None: @@ -61,7 +61,7 @@ def start_restarter(self): ) self._restarter.start() - def stop_restarter(self): + def stop_restarter(self) -> None: """Stop the restarter.""" if self.autorestart and self._restarter is not None: self._restarter.stop() @@ -78,7 +78,7 @@ class AsyncIOLoopKernelManager(AsyncKernelManager): loop = Instance("tornado.ioloop.IOLoop") - def _loop_default(self): + def _loop_default(self) -> ioloop.IOLoop: return ioloop.IOLoop.current() restarter_class = Type( @@ -95,7 +95,7 @@ def _loop_default(self): "jupyter_client.ioloop.AsyncIOLoopKernelRestarter", allow_none=True ) - def start_restarter(self): + def start_restarter(self) -> None: """Start the restarter.""" if self.autorestart and self.has_kernel: if self._restarter is None: @@ -104,7 +104,7 @@ def start_restarter(self): ) self._restarter.start() - def stop_restarter(self): + def stop_restarter(self) -> None: """Stop the restarter.""" if self.autorestart and self._restarter is not None: self._restarter.stop() diff --git a/jupyter_client/ioloop/restarter.py b/jupyter_client/ioloop/restarter.py index d0c70396..64b50840 100644 --- a/jupyter_client/ioloop/restarter.py +++ b/jupyter_client/ioloop/restarter.py @@ -7,6 +7,7 @@ # Distributed under the terms of the Modified BSD License. import time import warnings +from typing import Any from traitlets import Instance @@ -18,7 +19,7 @@ class IOLoopKernelRestarter(KernelRestarter): loop = Instance("tornado.ioloop.IOLoop") - def _loop_default(self): + def _loop_default(self) -> Any: warnings.warn( "IOLoopKernelRestarter.loop is deprecated in jupyter-client 5.2", DeprecationWarning, @@ -30,7 +31,7 @@ def _loop_default(self): _pcallback = None - def start(self): + def start(self) -> None: """Start the polling of the kernel.""" if self._pcallback is None: from tornado.ioloop import PeriodicCallback @@ -41,7 +42,7 @@ def start(self): ) self._pcallback.start() - def stop(self): + def stop(self) -> None: """Stop the kernel polling.""" if self._pcallback is not None: self._pcallback.stop() @@ -51,7 +52,7 @@ def stop(self): class AsyncIOLoopKernelRestarter(IOLoopKernelRestarter): """An async io loop kernel restarter.""" - async def poll(self): + async def poll(self) -> None: # type:ignore[override] """Poll the kernel.""" if self.debug: self.log.debug("Polling kernel...") diff --git a/jupyter_client/jsonutil.py b/jupyter_client/jsonutil.py index db46d1b1..2ba640fe 100644 --- a/jupyter_client/jsonutil.py +++ b/jupyter_client/jsonutil.py @@ -9,7 +9,7 @@ from binascii import b2a_base64 from collections.abc import Iterable from datetime import datetime -from typing import Optional, Union +from typing import Any, Optional, Union from dateutil.parser import parse as _dateutil_parse from dateutil.tz import tzlocal @@ -67,7 +67,7 @@ def parse_date(s: Optional[str]) -> Optional[Union[str, datetime]]: return s -def extract_dates(obj): +def extract_dates(obj: Any) -> Any: """extract ISO8601 dates from unpacked JSON""" if isinstance(obj, dict): new_obj = {} # don't clobber @@ -81,7 +81,7 @@ def extract_dates(obj): return obj -def squash_dates(obj): +def squash_dates(obj: Any) -> Any: """squash datetime objects into ISO8601 strings""" if isinstance(obj, dict): obj = dict(obj) # don't clobber @@ -94,7 +94,7 @@ def squash_dates(obj): return obj -def date_default(obj): +def date_default(obj: Any) -> Any: """DEPRECATED: Use jupyter_client.jsonutil.json_default""" warnings.warn( "date_default is deprecated since jupyter_client 7.0.0." @@ -104,14 +104,14 @@ def date_default(obj): return json_default(obj) -def json_default(obj): +def json_default(obj: Any) -> Any: """default function for packing objects in JSON.""" if isinstance(obj, datetime): obj = _ensure_tzinfo(obj) - return obj.isoformat().replace('+00:00', 'Z') + return obj.isoformat().replace("+00:00", "Z") if isinstance(obj, bytes): - return b2a_base64(obj, newline=False).decode('ascii') + return b2a_base64(obj, newline=False).decode("ascii") if isinstance(obj, Iterable): return list(obj) @@ -128,7 +128,7 @@ def json_default(obj): # Copy of the old ipykernel's json_clean # This is temporary, it should be removed when we deprecate support for # non-valid JSON messages -def json_clean(obj): +def json_clean(obj: Any) -> Any: # types that are 'atomic' and ok in json as-is. atomic_ok = (str, type(None)) @@ -157,10 +157,10 @@ def json_clean(obj): if isinstance(obj, bytes): # unanmbiguous binary data is base64-encoded # (this probably should have happened upstream) - return b2a_base64(obj, newline=False).decode('ascii') + return b2a_base64(obj, newline=False).decode("ascii") if isinstance(obj, container_to_list) or ( - hasattr(obj, '__iter__') and hasattr(obj, next_attr_name) + hasattr(obj, "__iter__") and hasattr(obj, next_attr_name) ): obj = list(obj) @@ -175,8 +175,8 @@ def json_clean(obj): nkeys_collapsed = len(set(map(str, obj))) if nkeys != nkeys_collapsed: msg = ( - 'dict cannot be safely converted to JSON: ' - 'key collision would lead to dropped values' + "dict cannot be safely converted to JSON: " + "key collision would lead to dropped values" ) raise ValueError(msg) # If all OK, proceed by making the new dict that will be json-safe diff --git a/jupyter_client/kernelapp.py b/jupyter_client/kernelapp.py index b66e1542..5d43c64e 100644 --- a/jupyter_client/kernelapp.py +++ b/jupyter_client/kernelapp.py @@ -1,6 +1,7 @@ """An application to launch a kernel by name in a local subprocess.""" import os import signal +import typing as t import uuid from jupyter_core.application import JupyterApp, base_flags @@ -30,7 +31,7 @@ class KernelApp(JupyterApp): config=True ) - def initialize(self, argv=None): + def initialize(self, argv: t.Union[str, t.Sequence[str], None] = None) -> None: """Initialize the application.""" super().initialize(argv) @@ -48,7 +49,7 @@ def setup_signals(self) -> None: if os.name == "nt": return - def shutdown_handler(signo, frame): + def shutdown_handler(signo: int, frame: t.Any) -> None: self.loop.add_callback_from_signal(self.shutdown, signo) for sig in [signal.SIGTERM, signal.SIGINT]: diff --git a/jupyter_client/kernelspec.py b/jupyter_client/kernelspec.py index 46c365d4..41ed2bad 100644 --- a/jupyter_client/kernelspec.py +++ b/jupyter_client/kernelspec.py @@ -1,10 +1,13 @@ """Tools for managing kernel specs""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import json import os import re import shutil +import typing as t import warnings from jupyter_core.paths import SYSTEM_JUPYTER_PATH, jupyter_data_dir, jupyter_path @@ -21,7 +24,7 @@ class KernelSpec(HasTraits): """A kernel spec model object.""" - argv = List() + argv: List[str] = List() name = Unicode() mimetype = Unicode() display_name = Unicode() @@ -32,7 +35,7 @@ class KernelSpec(HasTraits): metadata = Dict() @classmethod - def from_resource_dir(cls, resource_dir): + def from_resource_dir(cls: type[KernelSpec], resource_dir: str) -> KernelSpec: """Create a KernelSpec object by reading kernel.json Pass the path to the *directory* containing kernel.json. @@ -42,7 +45,7 @@ def from_resource_dir(cls, resource_dir): kernel_dict = json.load(f) return cls(resource_dir=resource_dir, **kernel_dict) - def to_dict(self): + def to_dict(self) -> dict[str, t.Any]: """Convert the kernel spec to a dict.""" d = { "argv": self.argv, @@ -55,7 +58,7 @@ def to_dict(self): return d - def to_json(self): + def to_json(self) -> str: """Serialise this kernelspec to a JSON object. Returns a string. @@ -66,7 +69,7 @@ def to_json(self): _kernel_name_pat = re.compile(r"^[a-z0-9._\-]+$", re.IGNORECASE) -def _is_valid_kernel_name(name): +def _is_valid_kernel_name(name: str) -> t.Any: """Check that a kernel name is valid.""" # quote is not unicode-safe on Python 2 return _kernel_name_pat.match(name) @@ -78,12 +81,12 @@ def _is_valid_kernel_name(name): ) -def _is_kernel_dir(path): +def _is_kernel_dir(path: str) -> bool: """Is ``path`` a kernel directory?""" return os.path.isdir(path) and os.path.isfile(pjoin(path, "kernel.json")) -def _list_kernels_in(dir): +def _list_kernels_in(dir: str | None) -> dict[str, str]: """Return a mapping of kernel names to resource directories from dir. If dir is None or does not exist, returns an empty dict. @@ -108,11 +111,11 @@ def _list_kernels_in(dir): class NoSuchKernel(KeyError): # noqa """An error raised when there is no kernel of a give name.""" - def __init__(self, name): + def __init__(self, name: str) -> None: """Initialize the error.""" self.name = name - def __str__(self): + def __str__(self) -> str: return f"No such kernel named {self.name}" @@ -137,12 +140,12 @@ class KernelSpecManager(LoggingConfigurable): data_dir = Unicode() - def _data_dir_default(self): + def _data_dir_default(self) -> str: return jupyter_data_dir() user_kernel_dir = Unicode() - def _user_kernel_dir_default(self): + def _user_kernel_dir_default(self) -> str: return pjoin(self.data_dir, "kernels") whitelist = Set( @@ -157,7 +160,7 @@ def _user_kernel_dir_default(self): By default, all installed kernels are allowed. """, ) - kernel_dirs = List( + kernel_dirs: List[str] = List( help="List of kernel directories to search. Later ones take priority over earlier." ) @@ -168,7 +171,7 @@ def _user_kernel_dir_default(self): # Method copied from # https://github.com/jupyterhub/jupyterhub/blob/d1a85e53dccfc7b1dd81b0c1985d158cc6b61820/jupyterhub/auth.py#L143-L161 @observe(*list(_deprecated_aliases)) - def _deprecated_trait(self, change): + def _deprecated_trait(self, change: t.Any) -> None: """observer for deprecated traits""" old_attr = change.name new_attr, version = self._deprecated_aliases[old_attr] @@ -178,19 +181,12 @@ def _deprecated_trait(self, change): # protects backward-compatible config from warnings # if they set the same value under both names self.log.warning( - ( - "{cls}.{old} is deprecated in jupyter_client " - "{version}, use {cls}.{new} instead" - ).format( - cls=self.__class__.__name__, - old=old_attr, - new=new_attr, - version=version, - ) + f"{self.__class__.__name__}.{old_attr} is deprecated in jupyter_client " + f"{version}, use {self.__class__.__name__}.{new_attr} instead" ) setattr(self, new_attr, change.new) - def _kernel_dirs_default(self): + def _kernel_dirs_default(self) -> list[str]: dirs = jupyter_path("kernels") # At some point, we should stop adding .ipython/kernels to the path, # but the cost to keeping it is very small. @@ -203,7 +199,7 @@ def _kernel_dirs_default(self): pass return dirs - def find_kernel_specs(self): + def find_kernel_specs(self) -> dict[str, str]: """Returns a dict mapping kernel names to resource directories.""" d = {} for kernel_dir in self.kernel_dirs: @@ -232,7 +228,7 @@ def find_kernel_specs(self): return d # TODO: Caching? - def _get_kernel_spec_by_name(self, kernel_name, resource_dir): + def _get_kernel_spec_by_name(self, kernel_name: str, resource_dir: str) -> KernelSpec: """Returns a :class:`KernelSpec` instance for a given kernel_name and resource_dir. """ @@ -245,7 +241,8 @@ def _get_kernel_spec_by_name(self, kernel_name, resource_dir): pass else: if resource_dir == RESOURCES: - kspec = self.kernel_spec_class(resource_dir=resource_dir, **get_kernel_dict()) + kdict = get_kernel_dict() + kspec = self.kernel_spec_class(resource_dir=resource_dir, **kdict) if not kspec: kspec = self.kernel_spec_class.from_resource_dir(resource_dir) @@ -254,7 +251,7 @@ def _get_kernel_spec_by_name(self, kernel_name, resource_dir): return kspec - def _find_spec_directory(self, kernel_name): + def _find_spec_directory(self, kernel_name: str) -> str | None: """Find the resource directory of a named kernel spec""" for kernel_dir in [kd for kd in self.kernel_dirs if os.path.isdir(kd)]: files = os.listdir(kernel_dir) @@ -270,8 +267,9 @@ def _find_spec_directory(self, kernel_name): pass else: return RESOURCES + return None - def get_kernel_spec(self, kernel_name): + def get_kernel_spec(self, kernel_name: str) -> KernelSpec: """Returns a :class:`KernelSpec` instance for the given kernel_name. Raises :exc:`NoSuchKernel` if the given kernel name is not found. @@ -288,7 +286,7 @@ def get_kernel_spec(self, kernel_name): return self._get_kernel_spec_by_name(kernel_name, resource_dir) - def get_all_specs(self): + def get_all_specs(self) -> dict[str, t.Any]: """Returns a dict mapping kernel names to kernelspecs. Returns a dict of the form:: @@ -320,7 +318,7 @@ def get_all_specs(self): self.log.warning("Error loading kernelspec %r", kname, exc_info=True) return res - def remove_kernel_spec(self, name): + def remove_kernel_spec(self, name: str) -> str: """Remove a kernel spec directory by name. Returns the path that was deleted. @@ -339,7 +337,9 @@ def remove_kernel_spec(self, name): shutil.rmtree(spec_dir) return spec_dir - def _get_destination_dir(self, kernel_name, user=False, prefix=None): + def _get_destination_dir( + self, kernel_name: str, user: bool = False, prefix: str | None = None + ) -> str: if user: return os.path.join(self.user_kernel_dir, kernel_name) elif prefix: @@ -348,8 +348,13 @@ def _get_destination_dir(self, kernel_name, user=False, prefix=None): return os.path.join(SYSTEM_JUPYTER_PATH[0], "kernels", kernel_name) def install_kernel_spec( - self, source_dir, kernel_name=None, user=False, replace=None, prefix=None - ): + self, + source_dir: str, + kernel_name: str | None = None, + user: bool = False, + replace: bool | None = None, + prefix: str | None = None, + ) -> str: """Install a kernel spec by copying its directory. If ``kernel_name`` is not given, the basename of ``source_dir`` will @@ -402,7 +407,7 @@ def install_kernel_spec( self.log.info("Installed kernelspec %s in %s", kernel_name, destination) return destination - def install_native_kernel_spec(self, user=False): + def install_native_kernel_spec(self, user: bool = False) -> None: """DEPRECATED: Use ipykernel.kernelspec.install""" warnings.warn( "install_native_kernel_spec is deprecated. Use ipykernel.kernelspec import install.", @@ -413,12 +418,12 @@ def install_native_kernel_spec(self, user=False): install(self, user=user) -def find_kernel_specs(): +def find_kernel_specs() -> dict[str, str]: """Returns a dict mapping kernel names to resource directories.""" return KernelSpecManager().find_kernel_specs() -def get_kernel_spec(kernel_name): +def get_kernel_spec(kernel_name: str) -> KernelSpec: """Returns a :class:`KernelSpec` instance for the given kernel_name. Raises KeyError if the given kernel name is not found. @@ -426,7 +431,13 @@ def get_kernel_spec(kernel_name): return KernelSpecManager().get_kernel_spec(kernel_name) -def install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False, prefix=None): +def install_kernel_spec( + source_dir: str, + kernel_name: str | None = None, + user: bool = False, + replace: bool | None = False, + prefix: str | None = None, +) -> str: """Install a kernel spec in a given directory.""" return KernelSpecManager().install_kernel_spec(source_dir, kernel_name, user, replace, prefix) @@ -434,9 +445,9 @@ def install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False, install_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__ -def install_native_kernel_spec(user=False): +def install_native_kernel_spec(user: bool = False) -> None: """Install the native kernel spec.""" - return KernelSpecManager().install_native_kernel_spec(user=user) + KernelSpecManager().install_native_kernel_spec(user=user) install_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__ diff --git a/jupyter_client/kernelspecapp.py b/jupyter_client/kernelspecapp.py index e9b14703..186aa9cf 100644 --- a/jupyter_client/kernelspecapp.py +++ b/jupyter_client/kernelspecapp.py @@ -1,6 +1,8 @@ """Apps for managing kernel specs.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import errno import json import os.path @@ -10,7 +12,6 @@ from jupyter_core.application import JupyterApp, base_aliases, base_flags from traitlets import Bool, Dict, Instance, List, Unicode from traitlets.config.application import Application -from traitlets.config.loader import Config from . import __version__ from .kernelspec import KernelSpecManager @@ -37,21 +38,21 @@ class ListKernelSpecs(JupyterApp): "debug": base_flags["debug"], } - def _kernel_spec_manager_default(self): + def _kernel_spec_manager_default(self) -> KernelSpecManager: return KernelSpecManager(parent=self, data_dir=self.data_dir) - def start(self): + def start(self) -> dict[str, t.Any] | None: # type:ignore[override] """Start the application.""" paths = self.kernel_spec_manager.find_kernel_specs() specs = self.kernel_spec_manager.get_all_specs() if not self.json_output: if not specs: print("No kernels available") - return + return None # pad to width of longest kernel name name_len = len(sorted(paths, key=lambda name: len(name))[-1]) - def path_key(item): + def path_key(item: t.Any) -> t.Any: """sort key function for Jupyter path priority""" path = item[1] for idx, prefix in enumerate(self.jupyter_path): @@ -85,13 +86,13 @@ class InstallKernelSpec(JupyterApp): usage = "jupyter kernelspec install SOURCE_DIR [--options]" kernel_spec_manager = Instance(KernelSpecManager) - def _kernel_spec_manager_default(self): + def _kernel_spec_manager_default(self) -> KernelSpecManager: return KernelSpecManager(data_dir=self.data_dir) sourcedir = Unicode() kernel_name = Unicode("", config=True, help="Install the kernel spec with this name") - def _kernel_name_default(self): + def _kernel_name_default(self) -> str: return os.path.basename(self.sourcedir) user = Bool( @@ -133,7 +134,7 @@ def _kernel_name_default(self): "debug": base_flags["debug"], } - def parse_command_line(self, argv): + def parse_command_line(self, argv: None | list[str]) -> None: # type:ignore[override] """Parse the command line args.""" super().parse_command_line(argv) # accept positional arg as profile name @@ -143,7 +144,7 @@ def parse_command_line(self, argv): print("No source directory specified.", file=sys.stderr) self.exit(1) - def start(self): + def start(self) -> None: """Start the application.""" if self.user and self.prefix: self.exit("Can't specify both user and prefix. Please choose one or the other.") @@ -179,7 +180,7 @@ class RemoveKernelSpec(JupyterApp): kernel_spec_manager = Instance(KernelSpecManager) - def _kernel_spec_manager_default(self): + def _kernel_spec_manager_default(self) -> KernelSpecManager: return KernelSpecManager(data_dir=self.data_dir, parent=self) flags = { @@ -187,7 +188,7 @@ def _kernel_spec_manager_default(self): } flags.update(JupyterApp.flags) - def parse_command_line(self, argv): + def parse_command_line(self, argv: list[str] | None) -> None: # type:ignore[override] """Parse the command line args.""" super().parse_command_line(argv) # accept positional arg as profile name @@ -196,7 +197,7 @@ def parse_command_line(self, argv): else: self.exit("No kernelspec specified.") - def start(self): + def start(self) -> None: """Start the application.""" self.kernel_spec_manager.ensure_native_kernel = False spec_paths = self.kernel_spec_manager.find_kernel_specs() @@ -233,7 +234,7 @@ class InstallNativeKernelSpec(JupyterApp): description = """[DEPRECATED] Install the IPython kernel spec directory for this Python.""" kernel_spec_manager = Instance(KernelSpecManager) - def _kernel_spec_manager_default(self): # pragma: no cover + def _kernel_spec_manager_default(self) -> KernelSpecManager: # pragma: no cover return KernelSpecManager(data_dir=self.data_dir) user = Bool( @@ -253,7 +254,7 @@ def _kernel_spec_manager_default(self): # pragma: no cover "debug": base_flags["debug"], } - def start(self): # pragma: no cover + def start(self) -> None: # pragma: no cover """Start the application.""" self.log.warning( "`jupyter kernelspec install-self` is DEPRECATED as of 4.0." @@ -275,7 +276,7 @@ def start(self): # pragma: no cover file=sys.stderr, ) self.exit(1) - self.exit(e) + self.exit(e) # type:ignore[arg-type] class ListProvisioners(JupyterApp): @@ -284,7 +285,7 @@ class ListProvisioners(JupyterApp): version = __version__ description = """List available provisioners for use in kernel specifications.""" - def start(self): + def start(self) -> None: """Start the application.""" kfp = KernelProvisionerFactory.instance(parent=self) print("Available kernel provisioners:") @@ -321,12 +322,10 @@ class KernelSpecApp(Application): } ) - aliases: t.Dict[t.Union[str, t.Tuple[str, ...]], t.Union[str, t.Tuple[str, str]]] = {} - flags: t.Dict[ - t.Union[str, t.Tuple[str, ...]], t.Tuple[t.Union[t.Dict[str, t.Any], Config], str] - ] = {} + aliases = {} + flags = {} - def start(self): + def start(self) -> None: """Start the application.""" if self.subapp is None: print("No subcommand specified. Must specify one of: %s" % list(self.subcommands)) diff --git a/jupyter_client/launcher.py b/jupyter_client/launcher.py index 6172916f..f0d07ad1 100644 --- a/jupyter_client/launcher.py +++ b/jupyter_client/launcher.py @@ -170,7 +170,7 @@ def launch_kernel( raise ex if sys.platform == "win32": - # Attach the interrupt event to the Popen objet so it can be used later. + # Attach the interrupt event to the Popen object so it can be used later. proc.win32_interrupt_event = interrupt_event # Clean up pipes created to work around Popen bug. diff --git a/jupyter_client/localinterfaces.py b/jupyter_client/localinterfaces.py index afc5f712..ca684a6b 100644 --- a/jupyter_client/localinterfaces.py +++ b/jupyter_client/localinterfaces.py @@ -1,21 +1,23 @@ """Utilities for identifying local IP addresses.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import os import re import socket import subprocess from subprocess import PIPE, Popen -from typing import Iterable, List +from typing import Any, Callable, Iterable, Sequence from warnings import warn -LOCAL_IPS: List = [] -PUBLIC_IPS: List = [] +LOCAL_IPS: list = [] +PUBLIC_IPS: list = [] -LOCALHOST = "" +LOCALHOST: str = "" -def _uniq_stable(elems: Iterable) -> List: +def _uniq_stable(elems: Iterable) -> list: """uniq_stable(elems) -> list Return from an iterable, a list of all the unique elements in the input, @@ -30,7 +32,7 @@ def _uniq_stable(elems: Iterable) -> List: return value -def _get_output(cmd): +def _get_output(cmd: str | Sequence[str]) -> str: """Get output of a command, raising IOError if it fails""" startupinfo = None if os.name == "nt": @@ -44,24 +46,24 @@ def _get_output(cmd): return stdout.decode("utf8", "replace") -def _only_once(f): +def _only_once(f: Callable) -> Callable: """decorator to only run a function once""" - f.called = False + f.called = False # type:ignore[attr-defined] - def wrapped(**kwargs): - if f.called: + def wrapped(**kwargs: Any) -> Any: + if f.called: # type:ignore[attr-defined] return ret = f(**kwargs) - f.called = True + f.called = True # type:ignore[attr-defined] return ret return wrapped -def _requires_ips(f): +def _requires_ips(f: Callable) -> Callable: """decorator to ensure load_ips has been run before f""" - def ips_loaded(*args, **kwargs): + def ips_loaded(*args: Any, **kwargs: Any) -> Any: _load_ips() return f(*args, **kwargs) @@ -73,7 +75,7 @@ class NoIPAddresses(Exception): # noqa pass -def _populate_from_list(addrs): +def _populate_from_list(addrs: Sequence[str] | None) -> None: """populate local and public IPs from flat list of all IPs""" if not addrs: raise NoIPAddresses @@ -102,7 +104,7 @@ def _populate_from_list(addrs): _ifconfig_ipv4_pat = re.compile(r"inet\b.*?(\d+\.\d+\.\d+\.\d+)", re.IGNORECASE) -def _load_ips_ifconfig(): +def _load_ips_ifconfig() -> None: """load ip addresses from `ifconfig` output (posix)""" try: @@ -120,7 +122,7 @@ def _load_ips_ifconfig(): _populate_from_list(addrs) -def _load_ips_ip(): +def _load_ips_ip() -> None: """load ip addresses from `ip addr` output (Linux)""" out = _get_output(["ip", "-f", "inet", "addr"]) @@ -136,7 +138,7 @@ def _load_ips_ip(): _ipconfig_ipv4_pat = re.compile(r"ipv4.*?(\d+\.\d+\.\d+\.\d+)$", re.IGNORECASE) -def _load_ips_ipconfig(): +def _load_ips_ipconfig() -> None: """load ip addresses from `ipconfig` output (Windows)""" out = _get_output("ipconfig") @@ -149,9 +151,9 @@ def _load_ips_ipconfig(): _populate_from_list(addrs) -def _load_ips_netifaces(): +def _load_ips_netifaces() -> None: """load ip addresses with netifaces""" - import netifaces # type: ignore + import netifaces # type: ignore[import-not-found] global LOCALHOST local_ips = [] @@ -179,7 +181,7 @@ def _load_ips_netifaces(): PUBLIC_IPS[:] = _uniq_stable(public_ips) -def _load_ips_gethostbyname(): +def _load_ips_gethostbyname() -> None: """load ip addresses with socket.gethostbyname_ex This can be slow. @@ -211,7 +213,7 @@ def _load_ips_gethostbyname(): LOCALHOST = LOCAL_IPS[0] -def _load_ips_dumb(): +def _load_ips_dumb() -> None: """Fallback in case of unexpected failure""" global LOCALHOST LOCALHOST = "127.0.0.1" @@ -220,7 +222,7 @@ def _load_ips_dumb(): @_only_once -def _load_ips(suppress_exceptions=True): +def _load_ips(suppress_exceptions: bool = True) -> None: """load the IPs that point to this machine This function will only ever be called once. @@ -266,30 +268,30 @@ def _load_ips(suppress_exceptions=True): @_requires_ips -def local_ips(): +def local_ips() -> list[str]: """return the IP addresses that point to this machine""" return LOCAL_IPS @_requires_ips -def public_ips(): +def public_ips() -> list[str]: """return the IP addresses for this machine that are visible to other machines""" return PUBLIC_IPS @_requires_ips -def localhost(): +def localhost() -> str: """return ip for localhost (almost always 127.0.0.1)""" return LOCALHOST @_requires_ips -def is_local_ip(ip): +def is_local_ip(ip: str) -> bool: """does `ip` point to this machine?""" return ip in LOCAL_IPS @_requires_ips -def is_public_ip(ip): +def is_public_ip(ip: str) -> bool: """is `ip` a publicly visible address?""" return ip in PUBLIC_IPS diff --git a/jupyter_client/manager.py b/jupyter_client/manager.py index 564966ed..088acd6c 100644 --- a/jupyter_client/manager.py +++ b/jupyter_client/manager.py @@ -9,6 +9,7 @@ import sys import typing as t import uuid +import warnings from asyncio.futures import Future from concurrent.futures import Future as CFuture from contextlib import contextmanager @@ -19,6 +20,7 @@ from traitlets import ( Any, Bool, + Dict, DottedObjectName, Float, Instance, @@ -55,7 +57,7 @@ class _ShutdownStatus(Enum): SigkillRequest = "SigkillRequest" -F = t.TypeVar('F', bound=t.Callable[..., t.Any]) +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) def _get_future() -> t.Union[Future, CFuture]: @@ -76,7 +78,7 @@ def in_pending_state(method: F) -> F: @t.no_type_check @functools.wraps(method) - async def wrapper(self, *args, **kwargs): + async def wrapper(self: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: """Create a future for the decorated method.""" if self._attempted_start or not self._ready: self._ready = _get_future() @@ -104,8 +106,17 @@ class KernelManager(ConnectionFileMixin): _ready: t.Optional[t.Union[Future, CFuture]] - def __init__(self, *args, **kwargs): + def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: """Initialize a kernel manager.""" + if args: + warnings.warn( + "Passing positional only arguments to " + "`KernelManager.__init__` is deprecated since jupyter_client" + " 8.6, and will become an error on future versions. Positional " + " arguments have been ignored since jupyter_client 7.0", + DeprecationWarning, + stacklevel=2, + ) self._owns_kernel = kwargs.pop("owns_kernel", True) super().__init__(**kwargs) self._shutdown_status = _ShutdownStatus.Unset @@ -117,7 +128,7 @@ def __init__(self, *args, **kwargs): # The PyZMQ Context to use for communication with the kernel. context: Instance = Instance(zmq.Context) - @default("context") # type:ignore[misc] + @default("context") def _context_default(self) -> zmq.Context: self._created_context = True return zmq.Context() @@ -128,11 +139,11 @@ def _context_default(self) -> zmq.Context: ) client_factory: Type = Type(klass=KernelClient) - @default("client_factory") # type:ignore[misc] + @default("client_factory") def _client_factory_default(self) -> Type: return import_item(self.client_class) - @observe("client_class") # type:ignore[misc] + @observe("client_class") def _client_class_changed(self, change: t.Dict[str, DottedObjectName]) -> None: self.client_factory = import_item(str(change["new"])) @@ -145,12 +156,12 @@ def _client_class_changed(self, change: t.Dict[str, DottedObjectName]) -> None: kernel_spec_manager: Instance = Instance(kernelspec.KernelSpecManager) - @default("kernel_spec_manager") # type:ignore[misc] + @default("kernel_spec_manager") def _kernel_spec_manager_default(self) -> kernelspec.KernelSpecManager: return kernelspec.KernelSpecManager(data_dir=self.data_dir) - @observe("kernel_spec_manager") # type:ignore[misc] - @observe_compat # type:ignore[misc] + @observe("kernel_spec_manager") + @observe_compat def _kernel_spec_manager_changed(self, change: t.Dict[str, Instance]) -> None: self._kernel_spec = None @@ -170,7 +181,7 @@ def _kernel_spec_manager_changed(self, change: t.Dict[str, Instance]) -> None: kernel_name: t.Union[str, Unicode] = Unicode(kernelspec.NATIVE_KERNEL_NAME) - @observe("kernel_name") # type:ignore[misc] + @observe("kernel_name") def _kernel_name_changed(self, change: t.Dict[str, str]) -> None: self._kernel_spec = None if change["new"] == "python": @@ -190,7 +201,7 @@ def kernel_spec(self) -> t.Optional[kernelspec.KernelSpec]: help="True if the MultiKernelManager should cache ports for this KernelManager instance", ) - @default("cache_ports") # type:ignore[misc] + @default("cache_ports") def _default_cache_ports(self) -> bool: return self.transport == "tcp" @@ -206,7 +217,7 @@ def ipykernel(self) -> bool: return self.kernel_name in {"python", "python2", "python3"} # Protected traits - _launch_args: Any = Any() + _launch_args: t.Optional["Dict[str, Any]"] = Dict(allow_none=True) _control_socket: Any = Any() _restarter: Any = Any() @@ -268,6 +279,27 @@ def client(self, **kwargs: t.Any) -> BlockingKernelClient: # Kernel management # -------------------------------------------------------------------------- + def update_env(self, *, env: t.Dict[str, str]) -> None: + """ + Allow to update the environment of a kernel manager. + + This will take effect only after kernel restart when the new env is + passed to the new kernel. + + This is useful as some of the information of the current kernel reflect + the state of the session that started it, and those session information + (like the attach file path, or name), are mutable. + + .. version-added: 8.5 + """ + # Mypy think this is unreachable as it see _launch_args as Dict, not t.Dict + if ( + isinstance(self._launch_args, dict) + and "env" in self._launch_args + and isinstance(self._launch_args["env"], dict) # type: ignore [unreachable] + ): + self._launch_args["env"].update(env) # type: ignore [unreachable] + def format_kernel_cmd(self, extra_arguments: t.Optional[t.List[str]] = None) -> t.List[str]: """Replace templated args (e.g. {connection_file})""" extra_arguments = extra_arguments or [] @@ -292,19 +324,20 @@ def format_kernel_cmd(self, extra_arguments: t.Optional[t.List[str]] = None) -> # is not usable by non python kernels because the path is being rerouted when # inside of a store app. # See this bug here: https://bugs.python.org/issue41196 - ns = { + ns: t.Dict[str, t.Any] = { "connection_file": os.path.realpath(self.connection_file), "prefix": sys.prefix, } - if self.kernel_spec: + if self.kernel_spec: # type:ignore[truthy-bool] ns["resource_dir"] = self.kernel_spec.resource_dir + assert isinstance(self._launch_args, dict) ns.update(self._launch_args) pat = re.compile(r"\{([A-Za-z0-9_]+)\}") - def from_ns(match): + def from_ns(match: t.Any) -> t.Any: """Get the key out of ns if it's there, otherwise no change.""" return ns.get(match.group(1), match.group()) @@ -354,9 +387,10 @@ async def _async_pre_start_kernel( and launching the kernel (e.g. Popen kwargs). """ self.shutting_down = False - self.kernel_id = self.kernel_id or kw.pop('kernel_id', str(uuid.uuid4())) + self.kernel_id = self.kernel_id or kw.pop("kernel_id", str(uuid.uuid4())) # save kwargs for use in restart - self._launch_args = kw.copy() + # assigning Traitlets Dicts to Dict make mypy unhappy but is ok + self._launch_args = kw.copy() # type:ignore [assignment] if self.provisioner is None: # will not be None on restarts self.provisioner = KPF.instance(parent=self.parent).create_provisioner_instance( self.kernel_id, @@ -364,7 +398,7 @@ async def _async_pre_start_kernel( parent=self, ) kw = await self.provisioner.pre_launch(**kw) - kernel_cmd = kw.pop('cmd') + kernel_cmd = kw.pop("cmd") return kernel_cmd, kw pre_start_kernel = run_sync(_async_pre_start_kernel) @@ -688,51 +722,31 @@ class AsyncKernelManager(KernelManager): # The PyZMQ Context to use for communication with the kernel. context: Instance = Instance(zmq.asyncio.Context) - @default("context") # type:ignore[misc] + @default("context") def _context_default(self) -> zmq.asyncio.Context: self._created_context = True return zmq.asyncio.Context() - def client(self, **kwargs: t.Any) -> AsyncKernelClient: # type:ignore + def client( # type:ignore[override] + self, **kwargs: t.Any + ) -> AsyncKernelClient: """Get a client for the manager.""" - return super().client(**kwargs) # type:ignore + return super().client(**kwargs) # type:ignore[return-value] _launch_kernel = KernelManager._async_launch_kernel # type:ignore[assignment] - start_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_start_kernel # type:ignore[assignment] - pre_start_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_pre_start_kernel # type:ignore[assignment] - post_start_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_post_start_kernel # type:ignore[assignment] - request_shutdown: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_request_shutdown # type:ignore[assignment] - finish_shutdown: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_finish_shutdown # type:ignore[assignment] - cleanup_resources: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_cleanup_resources # type:ignore[assignment] - shutdown_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_shutdown_kernel # type:ignore[assignment] - restart_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_restart_kernel # type:ignore[assignment] + start_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_start_kernel # type:ignore[assignment] + pre_start_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_pre_start_kernel # type:ignore[assignment] + post_start_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_post_start_kernel # type:ignore[assignment] + request_shutdown: t.Callable[..., t.Awaitable] = KernelManager._async_request_shutdown # type:ignore[assignment] + finish_shutdown: t.Callable[..., t.Awaitable] = KernelManager._async_finish_shutdown # type:ignore[assignment] + cleanup_resources: t.Callable[..., t.Awaitable] = KernelManager._async_cleanup_resources # type:ignore[assignment] + shutdown_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_shutdown_kernel # type:ignore[assignment] + restart_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_restart_kernel # type:ignore[assignment] _send_kernel_sigterm = KernelManager._async_send_kernel_sigterm # type:ignore[assignment] _kill_kernel = KernelManager._async_kill_kernel # type:ignore[assignment] - interrupt_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_interrupt_kernel # type:ignore[assignment] - signal_kernel: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_signal_kernel # type:ignore[assignment] - is_alive: t.Callable[ - ..., t.Awaitable - ] = KernelManager._async_is_alive # type:ignore[assignment] + interrupt_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_interrupt_kernel # type:ignore[assignment] + signal_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_signal_kernel # type:ignore[assignment] + is_alive: t.Callable[..., t.Awaitable] = KernelManager._async_is_alive # type:ignore[assignment] KernelManagerABC.register(KernelManager) diff --git a/jupyter_client/managerabc.py b/jupyter_client/managerabc.py index 8e33069c..c74ea1dc 100644 --- a/jupyter_client/managerabc.py +++ b/jupyter_client/managerabc.py @@ -2,6 +2,7 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import abc +from typing import Any class KernelManagerABC(metaclass=abc.ABCMeta): @@ -9,11 +10,11 @@ class KernelManagerABC(metaclass=abc.ABCMeta): The docstrings for this class can be found in the base implementation: - `jupyter_client.kernelmanager.KernelManager` + `jupyter_client.manager.KernelManager` """ @abc.abstractproperty - def kernel(self): + def kernel(self) -> Any: pass # -------------------------------------------------------------------------- @@ -21,35 +22,35 @@ def kernel(self): # -------------------------------------------------------------------------- @abc.abstractmethod - def start_kernel(self, **kw): + def start_kernel(self, **kw: Any) -> None: """Start the kernel.""" pass @abc.abstractmethod - def shutdown_kernel(self, now=False, restart=False): + def shutdown_kernel(self, now: bool = False, restart: bool = False) -> None: """Shut down the kernel.""" pass @abc.abstractmethod - def restart_kernel(self, now=False, **kw): + def restart_kernel(self, now: bool = False, **kw: Any) -> None: """Restart the kernel.""" pass @abc.abstractproperty - def has_kernel(self): + def has_kernel(self) -> bool: pass @abc.abstractmethod - def interrupt_kernel(self): + def interrupt_kernel(self) -> None: """Interrupt the kernel.""" pass @abc.abstractmethod - def signal_kernel(self, signum): + def signal_kernel(self, signum: int) -> None: """Send a signal to the kernel.""" pass @abc.abstractmethod - def is_alive(self): + def is_alive(self) -> bool: """Test whether the kernel is alive.""" pass diff --git a/jupyter_client/multikernelmanager.py b/jupyter_client/multikernelmanager.py index 16dbe410..d14a3f84 100644 --- a/jupyter_client/multikernelmanager.py +++ b/jupyter_client/multikernelmanager.py @@ -1,6 +1,8 @@ """A kernel manager for multiple kernels""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import asyncio import json import os @@ -31,7 +33,7 @@ def kernel_method(f: t.Callable) -> t.Callable: @wraps(f) def wrapped( self: t.Any, kernel_id: str, *args: t.Any, **kwargs: t.Any - ) -> t.Union[t.Callable, t.Awaitable]: + ) -> t.Callable | t.Awaitable: # get the kernel km = self.get_kernel(kernel_id) method = getattr(km, f.__name__) @@ -63,13 +65,13 @@ class MultiKernelManager(LoggingConfigurable): ).tag(config=True) @observe("kernel_manager_class") - def _kernel_manager_class_changed(self, change): + def _kernel_manager_class_changed(self, change: t.Any) -> None: self.kernel_manager_factory = self._create_kernel_manager_factory() kernel_manager_factory = Any(help="this is kernel_manager_class after import") @default("kernel_manager_factory") - def _kernel_manager_factory_default(self): + def _kernel_manager_factory_default(self) -> t.Callable: return self._create_kernel_manager_factory() def _create_kernel_manager_factory(self) -> t.Callable: @@ -98,11 +100,11 @@ def create_kernel_manager(*args: t.Any, **kwargs: t.Any) -> KernelManager: _pending_kernels = Dict() @property - def _starting_kernels(self): + def _starting_kernels(self) -> dict: """A shim for backwards compatibility.""" return self._pending_kernels - @default("context") # type:ignore[misc] + @default("context") def _context_default(self) -> zmq.Context: self._created_context = True return zmq.Context() @@ -112,11 +114,11 @@ def _context_default(self) -> zmq.Context: _kernels = Dict() - def __init__(self, *args, **kwargs): + def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: super().__init__(*args, **kwargs) - self.kernel_id_to_connection_file = {} + self.kernel_id_to_connection_file: dict[str, Path] = {} - def __del__(self): + def __del__(self) -> None: """Handle garbage collection. Destroy context if applicable.""" if self._created_context and self.context and not self.context.closed: if self.log: @@ -129,7 +131,7 @@ def __del__(self): else: super_del() - def list_kernel_ids(self) -> t.List[str]: + def list_kernel_ids(self) -> list[str]: """Return a list of the kernel ids of the active kernels.""" if self.external_connection_dir is not None: external_connection_dir = Path(self.external_connection_dir) @@ -188,8 +190,8 @@ def __contains__(self, kernel_id: str) -> bool: return kernel_id in self._kernels def pre_start_kernel( - self, kernel_name: t.Optional[str], kwargs: t.Any - ) -> t.Tuple[KernelManager, str, str]: + self, kernel_name: str | None, kwargs: t.Any + ) -> tuple[KernelManager, str, str]: # kwargs should be mutable, passing it as a dict argument. kernel_id = kwargs.pop("kernel_id", self.new_kernel_id(**kwargs)) if kernel_id in self: @@ -212,6 +214,17 @@ def pre_start_kernel( ) return km, kernel_name, kernel_id + def update_env(self, *, kernel_id: str, env: t.Dict[str, str]) -> None: + """ + Allow to update the environment of the given kernel. + + Forward the update env request to the corresponding kernel. + + .. version-added: 8.5 + """ + if kernel_id in self: + self._kernels[kernel_id].update_env(env=env) + async def _add_kernel_when_ready( self, kernel_id: str, km: KernelManager, kernel_awaitable: t.Awaitable ) -> None: @@ -232,15 +245,13 @@ async def _remove_kernel_when_ready( except Exception as e: self.log.exception(e) - def _using_pending_kernels(self): + def _using_pending_kernels(self) -> bool: """Returns a boolean; a clearer method for determining if this multikernelmanager is using pending kernels or not """ - return getattr(self, 'use_pending_kernels', False) + return getattr(self, "use_pending_kernels", False) - async def _async_start_kernel( - self, *, kernel_name: t.Optional[str] = None, **kwargs: t.Any - ) -> str: + async def _async_start_kernel(self, *, kernel_name: str | None = None, **kwargs: t.Any) -> str: """Start a new kernel. The caller can pick a kernel_id by passing one in as a keyword arg, @@ -250,12 +261,12 @@ async def _async_start_kernel( """ km, kernel_name, kernel_id = self.pre_start_kernel(kernel_name, kwargs) if not isinstance(km, KernelManager): - self.log.warning( + self.log.warning( # type:ignore[unreachable] "Kernel manager class ({km_class}) is not an instance of 'KernelManager'!".format( km_class=self.kernel_manager_class.__class__ ) ) - kwargs['kernel_id'] = kernel_id # Make kernel_id available to manager and provisioner + kwargs["kernel_id"] = kernel_id # Make kernel_id available to manager and provisioner starter = ensure_async(km.start_kernel(**kwargs)) task = asyncio.create_task(self._add_kernel_when_ready(kernel_id, km, starter)) @@ -269,7 +280,7 @@ async def _async_start_kernel( await task # raise an exception if one occurred during kernel startup. if km.ready.exception(): - raise km.ready.exception() # type: ignore + raise km.ready.exception() # type: ignore[misc] return kernel_id @@ -278,8 +289,8 @@ async def _async_start_kernel( async def _async_shutdown_kernel( self, kernel_id: str, - now: t.Optional[bool] = False, - restart: t.Optional[bool] = False, + now: bool | None = False, + restart: bool | None = False, ) -> None: """Shutdown a kernel by its kernel uuid. @@ -318,20 +329,20 @@ async def _async_shutdown_kernel( await fut # raise an exception if one occurred during kernel shutdown. if km.ready.exception(): - raise km.ready.exception() # type: ignore + raise km.ready.exception() # type: ignore[misc] shutdown_kernel = run_sync(_async_shutdown_kernel) @kernel_method - def request_shutdown(self, kernel_id: str, restart: t.Optional[bool] = False) -> None: + def request_shutdown(self, kernel_id: str, restart: bool | None = False) -> None: """Ask a kernel to shut down by its kernel uuid""" @kernel_method def finish_shutdown( self, kernel_id: str, - waittime: t.Optional[float] = None, - pollinterval: t.Optional[float] = 0.1, + waittime: float | None = None, + pollinterval: float | None = 0.1, ) -> None: """Wait for a kernel to finish shutting down, and kill it if it doesn't""" self.log.info("Kernel shutdown: %s", kernel_id) @@ -468,7 +479,7 @@ def remove_restart_callback( """remove a callback for the KernelRestarter""" @kernel_method - def get_connection_info(self, kernel_id: str) -> t.Dict[str, t.Any]: # type:ignore[empty-body] + def get_connection_info(self, kernel_id: str) -> dict[str, t.Any]: # type:ignore[empty-body] """Return a dictionary of connection data for a kernel. Parameters @@ -487,7 +498,7 @@ def get_connection_info(self, kernel_id: str) -> t.Dict[str, t.Any]: # type:ign @kernel_method def connect_iopub( # type:ignore[empty-body] - self, kernel_id: str, identity: t.Optional[bytes] = None + self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the iopub channel. @@ -505,7 +516,7 @@ def connect_iopub( # type:ignore[empty-body] @kernel_method def connect_shell( # type:ignore[empty-body] - self, kernel_id: str, identity: t.Optional[bytes] = None + self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the shell channel. @@ -523,7 +534,7 @@ def connect_shell( # type:ignore[empty-body] @kernel_method def connect_control( # type:ignore[empty-body] - self, kernel_id: str, identity: t.Optional[bytes] = None + self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the control channel. @@ -541,7 +552,7 @@ def connect_control( # type:ignore[empty-body] @kernel_method def connect_stdin( # type:ignore[empty-body] - self, kernel_id: str, identity: t.Optional[bytes] = None + self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the stdin channel. @@ -559,7 +570,7 @@ def connect_stdin( # type:ignore[empty-body] @kernel_method def connect_hb( # type:ignore[empty-body] - self, kernel_id: str, identity: t.Optional[bytes] = None + self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the hb channel. @@ -602,20 +613,12 @@ class AsyncMultiKernelManager(MultiKernelManager): context = Instance("zmq.asyncio.Context") - @default("context") # type:ignore[misc] + @default("context") def _context_default(self) -> zmq.asyncio.Context: self._created_context = True return zmq.asyncio.Context() - start_kernel: t.Callable[ - ..., t.Awaitable - ] = MultiKernelManager._async_start_kernel # type:ignore[assignment] - restart_kernel: t.Callable[ - ..., t.Awaitable - ] = MultiKernelManager._async_restart_kernel # type:ignore[assignment] - shutdown_kernel: t.Callable[ - ..., t.Awaitable - ] = MultiKernelManager._async_shutdown_kernel # type:ignore[assignment] - shutdown_all: t.Callable[ - ..., t.Awaitable - ] = MultiKernelManager._async_shutdown_all # type:ignore[assignment] + start_kernel: t.Callable[..., t.Awaitable] = MultiKernelManager._async_start_kernel # type:ignore[assignment] + restart_kernel: t.Callable[..., t.Awaitable] = MultiKernelManager._async_restart_kernel # type:ignore[assignment] + shutdown_kernel: t.Callable[..., t.Awaitable] = MultiKernelManager._async_shutdown_kernel # type:ignore[assignment] + shutdown_all: t.Callable[..., t.Awaitable] = MultiKernelManager._async_shutdown_all # type:ignore[assignment] diff --git a/jupyter_client/provisioning/factory.py b/jupyter_client/provisioning/factory.py index de2b6a2d..bad7c15c 100644 --- a/jupyter_client/provisioning/factory.py +++ b/jupyter_client/provisioning/factory.py @@ -8,7 +8,7 @@ # See compatibility note on `group` keyword in https://docs.python.org/3/library/importlib.metadata.html#entry-points if sys.version_info < (3, 10): # pragma: no cover - from importlib_metadata import EntryPoint, entry_points + from importlib_metadata import EntryPoint, entry_points # type:ignore[import-not-found] else: # pragma: no cover from importlib.metadata import EntryPoint, entry_points @@ -32,7 +32,7 @@ class KernelProvisionerFactory(SingletonConfigurable): :class:`LocalProvisioner`. """ - GROUP_NAME = 'jupyter_client.kernel_provisioners' + GROUP_NAME = "jupyter_client.kernel_provisioners" provisioners: Dict[str, EntryPoint] = {} default_provisioner_name_env = "JUPYTER_DEFAULT_PROVISIONER_NAME" @@ -42,8 +42,8 @@ class KernelProvisionerFactory(SingletonConfigurable): entry is present in the kernelspec.""", ) - @default('default_provisioner_name') - def _default_provisioner_name_default(self): + @default("default_provisioner_name") + def _default_provisioner_name_default(self) -> str: """The default provisioner name.""" return getenv(self.default_provisioner_name_env, "local-provisioner") @@ -63,7 +63,7 @@ def is_provisioner_available(self, kernel_spec: Any) -> bool: """ is_available: bool = True provisioner_cfg = self._get_provisioner_config(kernel_spec) - provisioner_name = str(provisioner_cfg.get('provisioner_name')) + provisioner_name = str(provisioner_cfg.get("provisioner_name")) if not self._check_availability(provisioner_name): is_available = False self.log.warning( @@ -87,7 +87,7 @@ def create_provisioner_instance( `ModuleNotFoundError` is raised. """ provisioner_cfg = self._get_provisioner_config(kernel_spec) - provisioner_name = str(provisioner_cfg.get('provisioner_name')) + provisioner_name = str(provisioner_cfg.get("provisioner_name")) if not self._check_availability(provisioner_name): msg = f"Kernel provisioner '{provisioner_name}' has not been registered." raise ModuleNotFoundError(msg) @@ -97,7 +97,7 @@ def create_provisioner_instance( f"kernel provisioner: {provisioner_name}" ) provisioner_class = self.provisioners[provisioner_name].load() - provisioner_config = provisioner_cfg.get('config') + provisioner_config = provisioner_cfg.get("config") provisioner: KernelProvisionerBase = provisioner_class( kernel_id=kernel_id, kernel_spec=kernel_spec, parent=parent, **provisioner_config ) @@ -142,10 +142,10 @@ def _get_provisioner_config(self, kernel_spec: Any) -> Dict[str, Any]: the default information. If no `config` sub-dictionary exists, an empty `config` dictionary will be added. """ - env_provisioner = kernel_spec.metadata.get('kernel_provisioner', {}) - if 'provisioner_name' in env_provisioner: # If no provisioner_name, return default + env_provisioner = kernel_spec.metadata.get("kernel_provisioner", {}) + if "provisioner_name" in env_provisioner: # If no provisioner_name, return default if ( - 'config' not in env_provisioner + "config" not in env_provisioner ): # if provisioner_name, but no config stanza, add one env_provisioner.update({"config": {}}) return env_provisioner # Return what we found (plus config stanza if necessary) @@ -182,7 +182,7 @@ def _get_provisioner(self, name: str) -> EntryPoint: # resulting in a violation of a supposed invariant condition. To address this scenario, # we will log a warning message indicating this situation, then build the entrypoint # instance ourselves - since we have that information. - if name == 'local-provisioner': + if name == "local-provisioner": distros = glob.glob(f"{path.dirname(path.dirname(__file__))}-*") self.log.warning( f"Kernel Provisioning: The 'local-provisioner' is not found. This is likely " @@ -194,7 +194,7 @@ def _get_provisioner(self, name: str) -> EntryPoint: f"and used.\nThe candidate distribution locations are: {distros}" ) return EntryPoint( - 'local-provisioner', 'jupyter_client.provisioning', 'LocalProvisioner' + "local-provisioner", "jupyter_client.provisioning", "LocalProvisioner" ) raise diff --git a/jupyter_client/provisioning/local_provisioner.py b/jupyter_client/provisioning/local_provisioner.py index be79eedf..42d8d32d 100644 --- a/jupyter_client/provisioning/local_provisioner.py +++ b/jupyter_client/provisioning/local_provisioner.py @@ -5,7 +5,7 @@ import os import signal import sys -from typing import Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional from ..connect import KernelConnectionInfo, LocalPortCache from ..launcher import launch_kernel @@ -40,7 +40,7 @@ async def poll(self) -> Optional[int]: """Poll the provisioner.""" ret = 0 if self.process: - ret = self.process.poll() + ret = self.process.poll() # type:ignore[unreachable] return ret async def wait(self) -> Optional[int]: @@ -51,13 +51,13 @@ async def wait(self) -> Optional[int]: # not alive. If we find the process is no longer alive, complete # its cleanup via the blocking wait(). Callers are responsible for # issuing calls to wait() using a timeout (see kill()). - while await self.poll() is None: + while await self.poll() is None: # type:ignore[unreachable] await asyncio.sleep(0.1) # Process is no longer alive, wait and clear ret = self.process.wait() # Make sure all the fds get closed. - for attr in ['stdout', 'stderr', 'stdin']: + for attr in ["stdout", "stderr", "stdin"]: fid = getattr(self.process, attr) if fid: fid.close() @@ -74,7 +74,7 @@ async def send_signal(self, signum: int) -> None: applicable code on Windows in that case. """ if self.process: - if signum == signal.SIGINT and sys.platform == 'win32': + if signum == signal.SIGINT and sys.platform == "win32": # type:ignore[unreachable] from ..win_interrupt import send_interrupt send_interrupt(self.process.win32_interrupt_event) @@ -95,7 +95,7 @@ async def send_signal(self, signum: int) -> None: async def kill(self, restart: bool = False) -> None: """Kill the provisioner and optionally restart.""" if self.process: - if hasattr(signal, "SIGKILL"): + if hasattr(signal, "SIGKILL"): # type:ignore[unreachable] # If available, give preference to signalling the process-group over `kill()`. try: await self.send_signal(signal.SIGKILL) @@ -110,7 +110,7 @@ async def kill(self, restart: bool = False) -> None: async def terminate(self, restart: bool = False) -> None: """Terminate the provisioner and optionally restart.""" if self.process: - if hasattr(signal, "SIGTERM"): + if hasattr(signal, "SIGTERM"): # type:ignore[unreachable] # If available, give preference to signalling the process group over `terminate()`. try: await self.send_signal(signal.SIGTERM) @@ -126,7 +126,7 @@ async def terminate(self, restart: bool = False) -> None: def _tolerate_no_process(os_error: OSError) -> None: # In Windows, we will get an Access Denied error if the process # has already terminated. Ignore it. - if sys.platform == 'win32': + if sys.platform == "win32": if os_error.winerror != 5: raise # On Unix, we may get an ESRCH error (or ProcessLookupError instance) if @@ -143,13 +143,15 @@ async def cleanup(self, restart: bool = False) -> None: # provisioner is about to be destroyed, return cached ports lpc = LocalPortCache.instance() ports = ( - self.connection_info['shell_port'], - self.connection_info['iopub_port'], - self.connection_info['stdin_port'], - self.connection_info['hb_port'], - self.connection_info['control_port'], + self.connection_info["shell_port"], + self.connection_info["iopub_port"], + self.connection_info["stdin_port"], + self.connection_info["hb_port"], + self.connection_info["control_port"], ) for port in ports: + if TYPE_CHECKING: + assert isinstance(port, int) lpc.return_port(port) async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: @@ -164,17 +166,17 @@ async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: # This should be considered temporary until a better division of labor can be defined. km = self.parent if km: - if km.transport == 'tcp' and not is_local_ip(km.ip): + if km.transport == "tcp" and not is_local_ip(km.ip): msg = ( "Can only launch a kernel on a local interface. " - "This one is not: {}." + f"This one is not: {km.ip}." "Make sure that the '*_address' attributes are " "configured properly. " - "Currently valid addresses are: {}".format(km.ip, local_ips()) + f"Currently valid addresses are: {local_ips()}" ) raise RuntimeError(msg) # build the Popen cmd - extra_arguments = kwargs.pop('extra_arguments', []) + extra_arguments = kwargs.pop("extra_arguments", []) # write connection file / get default ports # TODO - change when handshake pattern is adopted @@ -186,8 +188,8 @@ async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: km.hb_port = lpc.find_available_port(km.ip) km.control_port = lpc.find_available_port(km.ip) self.ports_cached = True - if 'env' in kwargs: - jupyter_session = kwargs['env'].get("JPY_SESSION_NAME", "") + if "env" in kwargs: + jupyter_session = kwargs["env"].get("JPY_SESSION_NAME", "") km.write_connection_file(jupyter_session=jupyter_session) else: km.write_connection_file() @@ -197,7 +199,7 @@ async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: extra_arguments=extra_arguments ) # This needs to remain here for b/c else: - extra_arguments = kwargs.pop('extra_arguments', []) + extra_arguments = kwargs.pop("extra_arguments", []) kernel_cmd = self.kernel_spec.argv + extra_arguments return await super().pre_launch(cmd=kernel_cmd, **kwargs) @@ -220,7 +222,7 @@ async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnection @staticmethod def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]: """Remove any keyword arguments that Popen does not tolerate.""" - keywords_to_scrub: List[str] = ['extra_arguments', 'kernel_id'] + keywords_to_scrub: List[str] = ["extra_arguments", "kernel_id"] scrubbed_kwargs = kwargs.copy() for kw in keywords_to_scrub: scrubbed_kwargs.pop(kw, None) @@ -229,12 +231,12 @@ def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]: async def get_provisioner_info(self) -> Dict: """Captures the base information necessary for persistence relative to this instance.""" provisioner_info = await super().get_provisioner_info() - provisioner_info.update({'pid': self.pid, 'pgid': self.pgid, 'ip': self.ip}) + provisioner_info.update({"pid": self.pid, "pgid": self.pgid, "ip": self.ip}) return provisioner_info async def load_provisioner_info(self, provisioner_info: Dict) -> None: """Loads the base information necessary for persistence relative to this instance.""" await super().load_provisioner_info(provisioner_info) - self.pid = provisioner_info['pid'] - self.pgid = provisioner_info['pgid'] - self.ip = provisioner_info['ip'] + self.pid = provisioner_info["pid"] + self.pgid = provisioner_info["pgid"] + self.ip = provisioner_info["ip"] diff --git a/jupyter_client/provisioning/provisioner_base.py b/jupyter_client/provisioning/provisioner_base.py index 7ab2a9b9..eff89432 100644 --- a/jupyter_client/provisioning/provisioner_base.py +++ b/jupyter_client/provisioning/provisioner_base.py @@ -10,7 +10,7 @@ from ..connect import KernelConnectionInfo -class KernelProvisionerMeta(ABCMeta, type(LoggingConfigurable)): # type: ignore +class KernelProvisionerMeta(ABCMeta, type(LoggingConfigurable)): # type: ignore[misc] pass @@ -29,7 +29,7 @@ class KernelProvisionerBase( # type:ignore[misc] """ # The kernel specification associated with this provisioner - kernel_spec: Any = Instance('jupyter_client.kernelspec.KernelSpec', allow_none=True) + kernel_spec: Any = Instance("jupyter_client.kernelspec.KernelSpec", allow_none=True) kernel_id: Union[str, Unicode] = Unicode(None, allow_none=True) connection_info: KernelConnectionInfo = {} @@ -154,10 +154,10 @@ async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: Returns the (potentially updated) keyword arguments that are passed to :meth:`launch_kernel()`. """ - env = kwargs.pop('env', os.environ).copy() + env = kwargs.pop("env", os.environ).copy() env.update(self.__apply_env_substitutions(env)) self._finalize_env(env) - kwargs['env'] = env + kwargs["env"] = env return kwargs @@ -181,8 +181,8 @@ async def get_provisioner_info(self) -> Dict[str, Any]: NOTE: The superclass method must always be called first to ensure proper serialization. """ provisioner_info: Dict[str, Any] = {} - provisioner_info['kernel_id'] = self.kernel_id - provisioner_info['connection_info'] = self.connection_info + provisioner_info["kernel_id"] = self.kernel_id + provisioner_info["connection_info"] = self.connection_info return provisioner_info async def load_provisioner_info(self, provisioner_info: Dict) -> None: @@ -196,8 +196,8 @@ async def load_provisioner_info(self, provisioner_info: Dict) -> None: NOTE: The superclass method must always be called first to ensure proper deserialization. """ - self.kernel_id = provisioner_info['kernel_id'] - self.connection_info = provisioner_info['connection_info'] + self.kernel_id = provisioner_info["kernel_id"] + self.connection_info = provisioner_info["connection_info"] def get_shutdown_wait_time(self, recommended: float = 5.0) -> float: """ @@ -231,7 +231,7 @@ def _finalize_env(self, env: Dict[str, str]) -> None: if self.kernel_spec.language and self.kernel_spec.language.lower().startswith("python"): # Don't allow PYTHONEXECUTABLE to be passed to kernel process. # If set, it can bork all the things. - env.pop('PYTHONEXECUTABLE', None) + env.pop("PYTHONEXECUTABLE", None) def __apply_env_substitutions(self, substitution_values: Dict[str, str]) -> Dict[str, str]: """ diff --git a/jupyter_client/restarter.py b/jupyter_client/restarter.py index 194ba907..d41890f6 100644 --- a/jupyter_client/restarter.py +++ b/jupyter_client/restarter.py @@ -7,7 +7,10 @@ """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import time +import typing as t from traitlets import Bool, Dict, Float, Instance, Integer, default from traitlets.config.configurable import LoggingConfigurable @@ -52,25 +55,25 @@ class KernelRestarter(LoggingConfigurable): _last_dead = Float() @default("_last_dead") - def _default_last_dead(self): + def _default_last_dead(self) -> float: return time.time() callbacks = Dict() - def _callbacks_default(self): + def _callbacks_default(self) -> dict[str, list]: return {"restart": [], "dead": []} - def start(self): + def start(self) -> None: """Start the polling of the kernel.""" msg = "Must be implemented in a subclass" raise NotImplementedError(msg) - def stop(self): + def stop(self) -> None: """Stop the kernel polling.""" msg = "Must be implemented in a subclass" raise NotImplementedError(msg) - def add_callback(self, f, event="restart"): + def add_callback(self, f: t.Callable[..., t.Any], event: str = "restart") -> None: """register a callback to fire on a particular event Possible values for event: @@ -81,7 +84,7 @@ def add_callback(self, f, event="restart"): """ self.callbacks[event].append(f) - def remove_callback(self, f, event="restart"): + def remove_callback(self, f: t.Callable[..., t.Any], event: str = "restart") -> None: """unregister a callback to fire on a particular event Possible values for event: @@ -95,7 +98,7 @@ def remove_callback(self, f, event="restart"): except ValueError: pass - def _fire_callbacks(self, event): + def _fire_callbacks(self, event: t.Any) -> None: """fire our callbacks for a particular event""" for callback in self.callbacks[event]: try: @@ -108,7 +111,7 @@ def _fire_callbacks(self, event): exc_info=True, ) - def poll(self): + def poll(self) -> None: if self.debug: self.log.debug("Polling kernel...") if self.kernel_manager.shutting_down: diff --git a/jupyter_client/runapp.py b/jupyter_client/runapp.py index fb115852..9ed4b154 100644 --- a/jupyter_client/runapp.py +++ b/jupyter_client/runapp.py @@ -1,10 +1,13 @@ """A Jupyter console app to run files.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import queue import signal import sys import time +import typing as t from jupyter_core.application import JupyterApp, base_aliases, base_flags from traitlets import Any, Dict, Float @@ -35,7 +38,7 @@ frontend_flags = set(frontend_flags_dict.keys()) -class RunApp(JupyterApp, JupyterConsoleApp): +class RunApp(JupyterApp, JupyterConsoleApp): # type:ignore[misc] """An Jupyter Console app to run files.""" version = __version__ @@ -57,14 +60,14 @@ class RunApp(JupyterApp, JupyterConsoleApp): """, ) - def parse_command_line(self, argv=None): + def parse_command_line(self, argv: list[str] | None = None) -> None: """Parse the command line arguments.""" super().parse_command_line(argv) self.build_kernel_argv(self.extra_args) self.filenames_to_run = self.extra_args[:] @catch_config_error - def initialize(self, argv=None): + def initialize(self, argv: list[str] | None = None) -> None: # type:ignore[override] """Initialize the app.""" self.log.debug("jupyter run: initialize...") super().initialize(argv) @@ -72,14 +75,14 @@ def initialize(self, argv=None): signal.signal(signal.SIGINT, self.handle_sigint) self.init_kernel_info() - def handle_sigint(self, *args): + def handle_sigint(self, *args: t.Any) -> None: """Handle SIGINT.""" if self.kernel_manager: self.kernel_manager.interrupt_kernel() else: self.log.error("Cannot interrupt kernels we didn't start.\n") - def init_kernel_info(self): + def init_kernel_info(self) -> None: """Wait for a kernel to be ready, and store kernel info""" timeout = self.kernel_timeout tic = time.time() @@ -97,7 +100,7 @@ def init_kernel_info(self): self.kernel_info = reply["content"] return - def start(self): + def start(self) -> None: """Start the application.""" self.log.debug("jupyter run: starting...") super().start() diff --git a/jupyter_client/session.py b/jupyter_client/session.py index ca9d9bbe..c387cd06 100644 --- a/jupyter_client/session.py +++ b/jupyter_client/session.py @@ -10,6 +10,8 @@ """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import hashlib import hmac import json @@ -25,8 +27,6 @@ from hmac import compare_digest # We are using compare_digest to limit the surface of timing attacks -from typing import Optional, Union - import zmq.asyncio from tornado.ioloop import IOLoop from traitlets import ( @@ -61,7 +61,7 @@ # ----------------------------------------------------------------------------- -def squash_unicode(obj): +def squash_unicode(obj: t.Any) -> t.Any: """coerce unicode back to bytestrings.""" if isinstance(obj, dict): for key in list(obj.keys()): @@ -89,7 +89,7 @@ def squash_unicode(obj): # disallow nan, because it's not actually valid JSON -def json_packer(obj): +def json_packer(obj: t.Any) -> bytes: """Convert a json object to a bytes.""" try: return json.dumps( @@ -117,14 +117,14 @@ def json_packer(obj): return packed -def json_unpacker(s): +def json_unpacker(s: str | bytes) -> t.Any: """Convert a json bytes or string to an object.""" if isinstance(s, bytes): s = s.decode("utf8", "replace") return json.loads(s) -def pickle_packer(o): +def pickle_packer(o: t.Any) -> bytes: """Pack an object using the pickle module.""" return pickle.dumps(squash_dates(o), PICKLE_PROTOCOL) @@ -197,7 +197,7 @@ def default_secure(cfg: t.Any) -> None: # pragma: no cover def utcnow() -> datetime: """Return timezone-aware UTC timestamp""" - return datetime.utcnow().replace(tzinfo=utc) # noqa + return datetime.now(utc) # ----------------------------------------------------------------------------- @@ -212,7 +212,7 @@ class SessionFactory(LoggingConfigurable): logname = Unicode("") - @observe("logname") # type:ignore[misc] + @observe("logname") def _logname_changed(self, change: t.Any) -> None: self.log = logging.getLogger(change["new"]) @@ -226,10 +226,10 @@ def _context_default(self) -> zmq.Context: loop = Instance("tornado.ioloop.IOLoop") - def _loop_default(self): + def _loop_default(self) -> IOLoop: return IOLoop.current() - def __init__(self, **kwargs): + def __init__(self, **kwargs: t.Any) -> None: """Initialize a session factory.""" super().__init__(**kwargs) @@ -244,7 +244,7 @@ class Message: A Message can be created from a dict and a dict from a Message instance simply by calling dict(msg_obj).""" - def __init__(self, msg_dict: t.Dict[str, t.Any]) -> None: + def __init__(self, msg_dict: dict[str, t.Any]) -> None: """Initialize a message.""" dct = self.__dict__ for k, v in dict(msg_dict).items(): @@ -269,14 +269,16 @@ def __getitem__(self, k: str) -> t.Any: return self.__dict__[k] -def msg_header(msg_id: str, msg_type: str, username: str, session: "Session") -> t.Dict[str, t.Any]: +def msg_header( + msg_id: str, msg_type: str, username: str, session: Session | str +) -> dict[str, t.Any]: """Create a new message header""" date = utcnow() version = protocol_version return locals() -def extract_header(msg_or_header: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: +def extract_header(msg_or_header: dict[str, t.Any]) -> dict[str, t.Any]: """Given a message or header, return the header.""" if not msg_or_header: return {} @@ -357,7 +359,7 @@ class Session(Configurable): ) @observe("packer") - def _packer_changed(self, change): + def _packer_changed(self, change: t.Any) -> None: new = change["new"] if new.lower() == "json": self.pack = json_packer @@ -378,7 +380,7 @@ def _packer_changed(self, change): ) @observe("unpacker") - def _unpacker_changed(self, change): + def _unpacker_changed(self, change: t.Any) -> None: new = change["new"] if new.lower() == "json": self.pack = json_packer @@ -399,7 +401,7 @@ def _session_default(self) -> str: return u @observe("session") - def _session_changed(self, change): + def _session_changed(self, change: t.Any) -> None: self.bsession = self.session.encode("ascii") # bsession is the session as bytes @@ -429,7 +431,7 @@ def _key_default(self) -> bytes: return new_id_bytes() @observe("key") - def _key_changed(self, change): + def _key_changed(self, change: t.Any) -> None: self._new_auth() signature_scheme = Unicode( @@ -440,7 +442,7 @@ def _key_changed(self, change): ) @observe("signature_scheme") - def _signature_scheme_changed(self, change): + def _signature_scheme_changed(self, change: t.Any) -> None: new = change["new"] if not new.startswith("hmac-"): raise TraitError("signature_scheme must start with 'hmac-', got %r" % new) @@ -477,7 +479,7 @@ def _new_auth(self) -> None: keyfile = Unicode("", config=True, help="""path to file containing execution key.""") @observe("keyfile") - def _keyfile_changed(self, change): + def _keyfile_changed(self, change: t.Any) -> None: with open(change["new"], "rb") as f: self.key = f.read().strip() @@ -489,7 +491,7 @@ def _keyfile_changed(self, change): pack = Any(default_packer) # the actual packer function @observe("pack") - def _pack_changed(self, change): + def _pack_changed(self, change: t.Any) -> None: new = change["new"] if not callable(new): raise TypeError("packer must be callable, not %s" % type(new)) @@ -497,7 +499,7 @@ def _pack_changed(self, change): unpack = Any(default_unpacker) # the actual packer function @observe("unpack") - def _unpack_changed(self, change): + def _unpack_changed(self, change: t.Any) -> None: # unpacker is not checked - it is assumed to be new = change["new"] if not callable(new): @@ -523,7 +525,7 @@ def _unpack_changed(self, change): """, ) - def __init__(self, **kwargs): + def __init__(self, **kwargs: t.Any) -> None: """create a Session object Parameters @@ -575,7 +577,7 @@ def __init__(self, **kwargs): "Message signing is disabled. This is insecure and not recommended!" ) - def clone(self) -> "Session": + def clone(self) -> Session: """Create a copy of this Session Useful when connecting multiple times to a given kernel. @@ -640,18 +642,18 @@ def _check_packers(self) -> None: self.pack = lambda o: pack(squash_dates(o)) self.unpack = lambda s: unpack(s) - def msg_header(self, msg_type: str) -> t.Dict[str, t.Any]: + def msg_header(self, msg_type: str) -> dict[str, t.Any]: """Create a header for a message type.""" return msg_header(self.msg_id, msg_type, self.username, self.session) def msg( self, msg_type: str, - content: t.Optional[t.Dict] = None, - parent: t.Optional[t.Dict[str, t.Any]] = None, - header: t.Optional[t.Dict[str, t.Any]] = None, - metadata: t.Optional[t.Dict[str, t.Any]] = None, - ) -> t.Dict[str, t.Any]: + content: dict | None = None, + parent: dict[str, t.Any] | None = None, + header: dict[str, t.Any] | None = None, + metadata: dict[str, t.Any] | None = None, + ) -> dict[str, t.Any]: """Return the nested message dict. This format is different from what is sent over the wire. The @@ -670,7 +672,7 @@ def msg( msg["metadata"].update(metadata) return msg - def sign(self, msg_list: t.List) -> bytes: + def sign(self, msg_list: list) -> bytes: """Sign a message with HMAC digest. If no auth, return b''. Parameters @@ -687,9 +689,9 @@ def sign(self, msg_list: t.List) -> bytes: def serialize( self, - msg: t.Dict[str, t.Any], - ident: t.Optional[t.Union[t.List[bytes], bytes]] = None, - ) -> t.List[bytes]: + msg: dict[str, t.Any], + ident: list[bytes] | bytes | None = None, + ) -> list[bytes]: """Serialize the message components to bytes. This is roughly the inverse of deserialize. The serialize/deserialize @@ -751,16 +753,16 @@ def serialize( def send( self, - stream: Optional[Union[zmq.sugar.socket.Socket, ZMQStream]], - msg_or_type: t.Union[t.Dict[str, t.Any], str], - content: t.Optional[t.Dict[str, t.Any]] = None, - parent: t.Optional[t.Dict[str, t.Any]] = None, - ident: t.Optional[t.Union[bytes, t.List[bytes]]] = None, - buffers: t.Optional[t.List[bytes]] = None, + stream: zmq.sugar.socket.Socket | ZMQStream | None, + msg_or_type: dict[str, t.Any] | str, + content: dict[str, t.Any] | None = None, + parent: dict[str, t.Any] | None = None, + ident: bytes | list[bytes] | None = None, + buffers: list[bytes] | None = None, track: bool = False, - header: t.Optional[t.Dict[str, t.Any]] = None, - metadata: t.Optional[t.Dict[str, t.Any]] = None, - ) -> t.Optional[t.Dict[str, t.Any]]: + header: dict[str, t.Any] | None = None, + metadata: dict[str, t.Any] | None = None, + ) -> dict[str, t.Any] | None: """Build and send a message via stream or socket. The message format used by this function internally is as follows: @@ -809,7 +811,7 @@ def send( track = False if isinstance(stream, zmq.asyncio.Socket): - assert stream is not None + assert stream is not None # type:ignore[unreachable] stream = zmq.Socket.shadow(stream.underlying) if isinstance(msg_or_type, (Message, dict)): @@ -859,6 +861,8 @@ def send( # use dummy tracker, which will be done immediately tracker = DONE stream.send_multipart(to_send, copy=copy) + else: + tracker = DONE if self.debug: pprint.pprint(msg) # noqa @@ -872,10 +876,10 @@ def send( def send_raw( self, stream: zmq.sugar.socket.Socket, - msg_list: t.List, + msg_list: list, flags: int = 0, copy: bool = True, - ident: t.Optional[t.Union[bytes, t.List[bytes]]] = None, + ident: bytes | list[bytes] | None = None, ) -> None: """Send a raw message via ident path. @@ -912,7 +916,7 @@ def recv( mode: int = zmq.NOBLOCK, content: bool = True, copy: bool = True, - ) -> t.Tuple[t.Optional[t.List[bytes]], t.Optional[t.Dict[str, t.Any]]]: + ) -> tuple[list[bytes] | None, dict[str, t.Any] | None]: """Receive and unpack a message. Parameters @@ -926,8 +930,8 @@ def recv( [idents] is a list of idents and msg is a nested message dict of same format as self.msg returns. """ - if isinstance(socket, ZMQStream): - socket = socket.socket + if isinstance(socket, ZMQStream): # type:ignore[unreachable] + socket = socket.socket # type:ignore[unreachable] if isinstance(socket, zmq.asyncio.Socket): socket = zmq.Socket.shadow(socket.underlying) @@ -950,8 +954,8 @@ def recv( raise e def feed_identities( - self, msg_list: t.Union[t.List[bytes], t.List[zmq.Message]], copy: bool = True - ) -> t.Tuple[t.List[bytes], t.Union[t.List[bytes], t.List[zmq.Message]]]: + self, msg_list: list[bytes] | list[zmq.Message], copy: bool = True + ) -> tuple[list[bytes], list[bytes] | list[zmq.Message]]: """Split the identities from the rest of the message. Feed until DELIM is reached, then return the prefix as idents and @@ -1017,10 +1021,10 @@ def _cull_digest_history(self) -> None: def deserialize( self, - msg_list: t.Union[t.List[bytes], t.List[zmq.Message]], + msg_list: list[bytes] | list[zmq.Message], content: bool = True, copy: bool = True, - ) -> t.Dict[str, t.Any]: + ) -> dict[str, t.Any]: """Unserialize a msg_list to a nested message dict. This is roughly the inverse of serialize. The serialize/deserialize @@ -1092,7 +1096,7 @@ def deserialize( # adapt to the current version return adapt(message) - def unserialize(self, *args: t.Any, **kwargs: t.Any) -> t.Dict[str, t.Any]: + def unserialize(self, *args: t.Any, **kwargs: t.Any) -> dict[str, t.Any]: """**DEPRECATED** Use deserialize instead.""" # pragma: no cover warnings.warn( diff --git a/jupyter_client/ssh/forward.py b/jupyter_client/ssh/forward.py index 9a6f2870..e2f28d21 100644 --- a/jupyter_client/ssh/forward.py +++ b/jupyter_client/ssh/forward.py @@ -14,7 +14,7 @@ # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # -# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. @@ -85,7 +85,7 @@ def handle(self): logger.debug("Tunnel closed ") -def forward_tunnel(local_port, remote_host, remote_port, transport): +def forward_tunnel(local_port: int, remote_host: str, remote_port: int, transport: t.Any) -> None: """Forward an ssh tunnel.""" # this is a little convoluted, but lets me configure things for the Handler diff --git a/jupyter_client/ssh/tunnel.py b/jupyter_client/ssh/tunnel.py index 3400458b..3b1b533c 100644 --- a/jupyter_client/ssh/tunnel.py +++ b/jupyter_client/ssh/tunnel.py @@ -5,6 +5,8 @@ # Copyright (C) 2011- PyZMQ Developers # # Redistributed from IPython under the terms of the BSD License. +from __future__ import annotations + import atexit import os import re @@ -14,6 +16,7 @@ import warnings from getpass import getpass, getuser from multiprocessing import Process +from typing import Any, cast try: with warnings.catch_warnings(): @@ -24,19 +27,19 @@ except ImportError: paramiko = None # type:ignore[assignment] - class SSHException(Exception): # type: ignore # noqa + class SSHException(Exception): # type:ignore[no-redef] # noqa pass else: from .forward import forward_tunnel try: - import pexpect # type: ignore + import pexpect # type: ignore[import-untyped] except ImportError: pexpect = None -def select_random_ports(n): +def select_random_ports(n: int) -> list[int]: """Select and return n random ports that are available.""" ports = [] sockets = [] @@ -53,10 +56,10 @@ def select_random_ports(n): # ----------------------------------------------------------------------------- # Check for passwordless login # ----------------------------------------------------------------------------- -_password_pat = re.compile((br"pass(word|phrase):"), re.IGNORECASE) +_password_pat = re.compile((rb"pass(word|phrase):"), re.IGNORECASE) -def try_passwordless_ssh(server, keyfile, paramiko=None): +def try_passwordless_ssh(server: str, keyfile: str | None, paramiko: Any = None) -> Any: """Attempt to make an ssh connection without a password. This is mainly used for requiring password input only once when many tunnels may be connected to the same server. @@ -69,7 +72,7 @@ def try_passwordless_ssh(server, keyfile, paramiko=None): return f(server, keyfile) -def _try_passwordless_openssh(server, keyfile): +def _try_passwordless_openssh(server: str, keyfile: str | None) -> bool: """Try passwordless login with shell ssh command.""" if pexpect is None: msg = "pexpect unavailable, use paramiko" @@ -99,10 +102,10 @@ def _try_passwordless_openssh(server, keyfile): return False -def _try_passwordless_paramiko(server, keyfile): +def _try_passwordless_paramiko(server: str, keyfile: str | None) -> bool: """Try passwordless login with paramiko.""" if paramiko is None: - msg = "Paramiko unavailable, " + msg = "Paramiko unavailable, " # type:ignore[unreachable] if sys.platform == "win32": msg += "Paramiko is required for ssh tunneled connections on Windows." else: @@ -121,7 +124,15 @@ def _try_passwordless_paramiko(server, keyfile): return True -def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60): +def tunnel_connection( + socket: socket.socket, + addr: str, + server: str, + keyfile: str | None = None, + password: str | None = None, + paramiko: Any = None, + timeout: int = 60, +) -> int: """Connect a socket to an address via an ssh tunnel. This is a wrapper for socket.connect(addr), when addr is not accessible @@ -142,7 +153,14 @@ def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramik return tunnel -def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): +def open_tunnel( + addr: str, + server: str, + keyfile: str | None = None, + password: str | None = None, + paramiko: Any = None, + timeout: int = 60, +) -> tuple[str, int]: """Open a tunneled connection from a 0MQ url. For use inside tunnel_connection. @@ -157,25 +175,31 @@ def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeou lport = select_random_ports(1)[0] _, addr = addr.split("://") ip, rport = addr.split(":") - rport = int(rport) + rport_int = int(rport) paramiko = sys.platform == "win32" if paramiko is None else paramiko_tunnel tunnelf = paramiko_tunnel if paramiko else openssh_tunnel tunnel = tunnelf( lport, - rport, + rport_int, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout, ) - return "tcp://127.0.0.1:%i" % lport, tunnel + return "tcp://127.0.0.1:%i" % lport, cast(int, tunnel) def openssh_tunnel( - lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60 -): + lport: int, + rport: int, + server: str, + remoteip: str = "127.0.0.1", + keyfile: str | None = None, + password: str | None | bool = None, + timeout: int = 60, +) -> int: """Create an ssh tunnel using command-line ssh that connects port lport on this machine to localhost:rport on server. The tunnel will automatically close when not in use, remaining open @@ -277,26 +301,32 @@ def openssh_tunnel( failed = True -def _stop_tunnel(cmd): +def _stop_tunnel(cmd: Any) -> None: pexpect.run(cmd) -def _split_server(server): +def _split_server(server: str) -> tuple[str, str, int]: if "@" in server: username, server = server.split("@", 1) else: username = getuser() if ":" in server: - server, port = server.split(":") - port = int(port) + server, port_str = server.split(":") + port = int(port_str) else: port = 22 return username, server, port def paramiko_tunnel( - lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60 -): + lport: int, + rport: int, + server: str, + remoteip: str = "127.0.0.1", + keyfile: str | None = None, + password: str | None = None, + timeout: float = 60, +) -> Process: """launch a tunner with paramiko in a subprocess. This should only be used when shell ssh is unavailable (e.g. Windows). @@ -337,7 +367,7 @@ def paramiko_tunnel( """ if paramiko is None: - msg = "Paramiko not available" + msg = "Paramiko not available" # type:ignore[unreachable] raise ImportError(msg) if password is None and not _try_passwordless_paramiko(server, keyfile): @@ -353,7 +383,14 @@ def paramiko_tunnel( return p -def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None): +def _paramiko_tunnel( + lport: int, + rport: int, + server: str, + remoteip: str, + keyfile: str | None = None, + password: str | None = None, +) -> None: """Function for actually starting a paramiko tunnel, to be passed to multiprocessing.Process(target=this), and not called directly. """ diff --git a/jupyter_client/threaded.py b/jupyter_client/threaded.py index b0f28d92..48e0ae77 100644 --- a/jupyter_client/threaded.py +++ b/jupyter_client/threaded.py @@ -57,7 +57,7 @@ def __init__( self.ioloop = loop f: Future = Future() - def setup_stream(): + def setup_stream() -> None: try: assert self.socket is not None self.stream = zmqstream.ZMQStream(self.socket, self.ioloop) @@ -92,7 +92,7 @@ def close(self) -> None: # c.f.Future for threadsafe results f: Future = Future() - def close_stream(): + def close_stream() -> None: try: if self.stream is not None: self.stream.close(linger=0) @@ -129,7 +129,7 @@ def send(self, msg: Dict[str, Any]) -> None: thread control of the action. """ - def thread_send(): + def thread_send() -> None: assert self.session is not None self.session.send(self.stream, msg) @@ -147,7 +147,7 @@ def _handle_recv(self, msg_list: List) -> None: msg = self.session.deserialize(smsg) # let client inspect messages if self._inspect: - self._inspect(msg) + self._inspect(msg) # type:ignore[unreachable] self.call_handlers(msg) def call_handlers(self, msg: Dict[str, Any]) -> None: @@ -192,7 +192,7 @@ def flush(self, timeout: float = 1.0) -> None: _msg = "Attempt to flush closed stream" raise OSError(_msg) - def flush(f): + def flush(f: Any) -> None: try: self._flush() except Exception as e: @@ -224,7 +224,7 @@ class IOLoopThread(Thread): _exiting = False ioloop = None - def __init__(self): + def __init__(self) -> None: """Initialize an io loop thread.""" super().__init__() self.daemon = True @@ -254,7 +254,7 @@ def run(self) -> None: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) - async def assign_ioloop(): + async def assign_ioloop() -> None: self.ioloop = IOLoop.current() loop.run_until_complete(assign_ioloop()) @@ -265,7 +265,7 @@ async def assign_ioloop(): loop.run_until_complete(self._async_run()) - async def _async_run(self): + async def _async_run(self) -> None: """Run forever (until self._exiting is set)""" while not self._exiting: await asyncio.sleep(1) @@ -282,7 +282,7 @@ def stop(self) -> None: self.close() self.ioloop = None - def __del__(self): + def __del__(self) -> None: self.close() def close(self) -> None: @@ -298,8 +298,10 @@ class ThreadedKernelClient(KernelClient): """A KernelClient that provides thread-safe sockets with async callbacks on message replies.""" @property - def ioloop(self): - return self.ioloop_thread.ioloop + def ioloop(self) -> Optional[IOLoop]: # type:ignore[override] + if self.ioloop_thread: + return self.ioloop_thread.ioloop + return None ioloop_thread = Instance(IOLoopThread, allow_none=True) @@ -329,14 +331,14 @@ def _check_kernel_info_reply(self, msg: Dict[str, Any]) -> None: def stop_channels(self) -> None: """Stop the channels on the client.""" super().stop_channels() - if self.ioloop_thread.is_alive(): + if self.ioloop_thread and self.ioloop_thread.is_alive(): self.ioloop_thread.stop() - iopub_channel_class = Type(ThreadedZMQSocketChannel) - shell_channel_class = Type(ThreadedZMQSocketChannel) - stdin_channel_class = Type(ThreadedZMQSocketChannel) - hb_channel_class = Type(HBChannel) - control_channel_class = Type(ThreadedZMQSocketChannel) + iopub_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] + shell_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] + stdin_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] + hb_channel_class = Type(HBChannel) # type:ignore[arg-type] + control_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] def is_alive(self) -> bool: """Is the kernel process still running?""" diff --git a/jupyter_client/utils.py b/jupyter_client/utils.py index ab1cbcaa..55577747 100644 --- a/jupyter_client/utils.py +++ b/jupyter_client/utils.py @@ -3,17 +3,21 @@ - provides utility wrappers to run asynchronous functions in a blocking environment. - vendor functions from ipython_genutils that should be retired at some point. """ +from __future__ import annotations + import os -from datetime import datetime, timedelta, tzinfo +from typing import Sequence from jupyter_core.utils import ensure_async, run_sync # noqa: F401 # noqa: F401 +from .session import utcnow # noqa + -def _filefind(filename, path_dirs=None): +def _filefind(filename: str, path_dirs: str | Sequence[str] | None = None) -> str: """Find a file by looking through a sequence of paths. This iterates through a sequence of paths looking for a file and returns - the full, absolute path of the first occurence of the file. If no set of + the full, absolute path of the first occurrence of the file. If no set of path dirs is given, the filename is tested as is, after running through :func:`expandvars` and :func:`expanduser`. Thus a simple call:: @@ -63,7 +67,7 @@ def _filefind(filename, path_dirs=None): raise OSError(msg) -def _expand_path(s): +def _expand_path(s: str) -> str: """Expand $VARS and ~names in a string, like a shell :Examples: @@ -84,35 +88,3 @@ def _expand_path(s): if os.name == "nt": s = s.replace("IPYTHON_TEMP", "$\\") return s - - -# constant for zero offset -ZERO = timedelta(0) - - -class tzUTC(tzinfo): # noqa - """tzinfo object for UTC (zero offset)""" - - def utcoffset(self, d): - """Compute utcoffset.""" - return ZERO - - def dst(self, d): - """Compute dst.""" - return ZERO - - -UTC = tzUTC() # type:ignore - - -def utc_aware(unaware): - """decorator for adding UTC tzinfo to datetime's utcfoo methods""" - - def utc_method(*args, **kwargs): - dt = unaware(*args, **kwargs) - return dt.replace(tzinfo=UTC) - - return utc_method - - -utcnow = utc_aware(datetime.utcnow) diff --git a/jupyter_client/win_interrupt.py b/jupyter_client/win_interrupt.py index 20a3a7f6..1ba2c3af 100644 --- a/jupyter_client/win_interrupt.py +++ b/jupyter_client/win_interrupt.py @@ -4,11 +4,10 @@ ipykernel.parentpoller.ParentPollerWindows for a Python implementation. """ import ctypes -from typing import no_type_check +from typing import Any -@no_type_check -def create_interrupt_event(): +def create_interrupt_event() -> Any: """Create an interrupt event handle. The parent process should call this to create the @@ -33,12 +32,14 @@ class SECURITY_ATTRIBUTES(ctypes.Structure): # noqa sa.lpSecurityDescriptor = 0 sa.bInheritHandle = 1 - return ctypes.windll.kernel32.CreateEventA( - sa_p, False, False, "" # lpEventAttributes # bManualReset # bInitialState + return ctypes.windll.kernel32.CreateEventA( # type:ignore[attr-defined] + sa_p, + False, + False, + "", # lpEventAttributes # bManualReset # bInitialState ) # lpName -@no_type_check -def send_interrupt(interrupt_handle): +def send_interrupt(interrupt_handle: Any) -> None: """Sends an interrupt event using the specified handle.""" - ctypes.windll.kernel32.SetEvent(interrupt_handle) + ctypes.windll.kernel32.SetEvent(interrupt_handle) # type:ignore[attr-defined] diff --git a/pyproject.toml b/pyproject.toml index 962a4c25..38ea8b70 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,11 +16,7 @@ classifiers = [ "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3" ] requires-python = ">=3.8" dependencies = [ @@ -102,31 +98,28 @@ test = "python -m pytest -vv --cov jupyter_client --cov-branch --cov-report term nowarn = "test -W default {args}" [tool.hatch.envs.typing] -features = ["test"] -dependencies = ["mypy>=0.990"] +dependencies = ["pre-commit"] +detached = true [tool.hatch.envs.typing.scripts] -test = "mypy --install-types --non-interactive {args:.}" +test = "pre-commit run --all-files --hook-stage manual mypy" [tool.hatch.envs.lint] -dependencies = [ - "black[jupyter]==23.3.0", - "mdformat>0.7", - "ruff==0.0.276", -] +dependencies = ["pre-commit"] +detached = true [tool.hatch.envs.lint.scripts] -style = [ - "ruff {args:.}", - "black --check --diff {args:.}", - "mdformat --check {args:docs *.md}" -] -fmt = [ - "black {args:.}", - "ruff --fix {args:.}", - "mdformat {args:docs *.md}" +build = [ + "pre-commit run --all-files ruff", + "pre-commit run --all-files ruff-format" ] [tool.pytest.ini_options] -addopts = "-raXs --durations 10 --color=yes --doctest-modules" +minversion = "6.0" +xfail_strict = true +log_cli_level = "info" +addopts = [ + "-raXs", "--durations=10", "--color=yes", "--doctest-modules", + "--showlocals", "--strict-markers", "--strict-config" +] testpaths = [ "jupyter_client", "tests/" @@ -137,6 +130,9 @@ timeout_method = "thread" filterwarnings= [ # Fail on warnings "error", + # from python-dateutil + "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning", + "ignore:datetime.datetime.utcnow:DeprecationWarning", ] [tool.coverage.report] @@ -161,31 +157,22 @@ relative_files = true source = ["jupyter_client"] [tool.mypy] -check_untyped_defs = true +files = "jupyter_client" +python_version = "3.8" +strict = true disallow_any_generics = false -disallow_incomplete_defs = true -disallow_untyped_decorators = true -no_implicit_optional = true +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] no_implicit_reexport = false pretty = true show_error_context = true show_error_codes = true -strict_equality = true -strict_optional = true -warn_unused_configs = true -warn_redundant_casts = true warn_return_any = false -warn_unused_ignores = true - - -[tool.black] -line-length = 100 -skip-string-normalization = true -target-version = ["py38"] +warn_unreachable = true [tool.ruff] -target-version = "py38" line-length = 100 + +[tool.ruff.lint] select = [ "A", "B", @@ -197,7 +184,6 @@ select = [ "FBT", "I", "ICN", - "ISC", "N", "PLC", "PLE", @@ -251,6 +237,11 @@ ignore = [ "PLW0603", # Mutable class attributes should be annotated with `typing.ClassVar` "RUF012", + # non-pep585-annotation + "UP006", + # non-pep604-annotation + "UP007", + ] unfixable = [ # Don't touch print statements @@ -261,7 +252,7 @@ unfixable = [ "F401", ] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] # B011 Do not call assert False since python -O removes these calls # F841 local variable 'foo' is assigned to but never used # C408 Unnecessary `dict` call @@ -287,3 +278,6 @@ ignore-nested-functions=true ignore-nested-classes=true fail-under=90 exclude = ["docs", "test"] + +[tool.repo-review] +ignore = ["PY007", "PP308", "GH102"] diff --git a/tests/conftest.py b/tests/conftest.py index 4352bf04..b154b32a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,7 +7,7 @@ import pytest # Must be set before importing from `jupyter_core`. -os.environ['JUPYTER_PLATFORM_DIRS'] = '1' +os.environ["JUPYTER_PLATFORM_DIRS"] = "1" pytest_plugins = ["pytest_jupyter", "pytest_jupyter.jupyter_client"] diff --git a/tests/problemkernel.py b/tests/problemkernel.py index f9cfd910..a20cf708 100644 --- a/tests/problemkernel.py +++ b/tests/problemkernel.py @@ -18,7 +18,7 @@ class ProblemTestKernel(Kernel): class ProblemTestApp(IPKernelApp): - kernel_class = ProblemTestKernel + kernel_class = ProblemTestKernel # type:ignore[assignment] def init_io(self): # Overridden to disable stdout/stderr capture @@ -26,7 +26,7 @@ def init_io(self): def init_sockets(self): if os.environ.get("FAIL_ON_START") == "1": - # Simulates e.g. a port binding issue (Adress already in use) + # Simulates e.g. a port binding issue (Address already in use) raise RuntimeError("Failed for testing purposes") return super().init_sockets() diff --git a/tests/signalkernel.py b/tests/signalkernel.py index 887d4d32..65fdb687 100644 --- a/tests/signalkernel.py +++ b/tests/signalkernel.py @@ -62,7 +62,7 @@ def do_execute( class SignalTestApp(IPKernelApp): - kernel_class = SignalTestKernel + kernel_class = SignalTestKernel # type:ignore[assignment] def init_io(self): # Overridden to disable stdout/stderr capture diff --git a/tests/test_client.py b/tests/test_client.py index 65da4263..53ae5253 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -124,11 +124,11 @@ def _check_reply(self, reply_type, reply): assert reply["parent_header"]["msg_type"] == reply_type + "_request" @pytest.mark.skipif( - sys.platform != 'linux' or platform.python_implementation().lower() == 'pypy', - reason='only works with cpython on ubuntu in ci', + sys.platform != "linux" or platform.python_implementation().lower() == "pypy", + reason="only works with cpython on ubuntu in ci", ) async def test_input_request(self, kc): - with mock.patch('builtins.input', return_value='test\n'): + with mock.patch("builtins.input", return_value="test\n"): reply = await kc.execute_interactive("a = input()", timeout=TIMEOUT) assert reply["content"]["status"] == "ok" @@ -137,7 +137,7 @@ async def test_output_hook(self, kc): def output_hook(msg): nonlocal called - if msg['header']['msg_type'] == 'stream': + if msg["header"]["msg_type"] == "stream": called = True reply = await kc.execute_interactive( @@ -192,7 +192,7 @@ async def test_shutdown_id(self, kc): class ThreadedKernelManager(KernelManager): - client_class = DottedObjectName('tests.test_client.CustomThreadedKernelClient') + client_class = DottedObjectName("tests.test_client.CustomThreadedKernelClient") class CustomThreadedZMQSocketChannel(ThreadedZMQSocketChannel): @@ -208,10 +208,10 @@ def call_handlers(self, msg): class CustomThreadedKernelClient(ThreadedKernelClient): - iopub_channel_class = Type(CustomThreadedZMQSocketChannel) - shell_channel_class = Type(CustomThreadedZMQSocketChannel) - stdin_channel_class = Type(CustomThreadedZMQSocketChannel) - control_channel_class = Type(CustomThreadedZMQSocketChannel) + iopub_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] + shell_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] + stdin_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] + control_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] class TestThreadedKernelClient(TestKernelClient): @@ -235,7 +235,7 @@ def _check_reply(self, reply_type, reply): self.assertEqual(reply["parent_header"]["msg_type"], reply_type + "_request") def test_execute_interactive(self): - pytest.skip('Not supported') + pytest.skip("Not supported") def test_history(self): kc = self.kc @@ -298,4 +298,4 @@ def test_validate_string_dict(): with pytest.raises(ValueError): validate_string_dict(dict(a=1)) # type:ignore with pytest.raises(ValueError): - validate_string_dict({1: 'a'}) # type:ignore + validate_string_dict({1: "a"}) # type:ignore diff --git a/tests/test_jsonutil.py b/tests/test_jsonutil.py index 7c9f2a8e..992eb870 100644 --- a/tests/test_jsonutil.py +++ b/tests/test_jsonutil.py @@ -162,7 +162,7 @@ def test_json_default(): (1, None), # start with scalars (1.123, None), (1.0, None), - ('a', None), + ("a", None), (True, None), (False, None), (None, None), @@ -172,7 +172,7 @@ def test_json_default(): ((1, 2), [1, 2]), ({1, 2}, [1, 2]), (dict(x=1), None), - ({'x': 1, 'y': [1, 2, 3], '1': 'int'}, None), + ({"x": 1, "y": [1, 2, 3], "1": "int"}, None), # More exotic objects ((x for x in range(3)), [0, 1, 2]), (iter([1, 2]), [1, 2]), diff --git a/tests/test_kernelspec.py b/tests/test_kernelspec.py index 115d9a29..480d13ae 100644 --- a/tests/test_kernelspec.py +++ b/tests/test_kernelspec.py @@ -41,13 +41,13 @@ def test_find_kernel_specs(self): def test_allowed_kernel_names(self): ksm = kernelspec.KernelSpecManager() - ksm.allowed_kernelspecs = ["foo"] + ksm.allowed_kernelspecs = {"foo"} kernels = ksm.find_kernel_specs() assert not len(kernels) def test_deprecated_whitelist(self): ksm = kernelspec.KernelSpecManager() - ksm.whitelist = ["bar"] + ksm.whitelist = {"bar"} kernels = ksm.find_kernel_specs() assert not len(kernels) diff --git a/tests/test_kernelspecapp.py b/tests/test_kernelspecapp.py index 7fa161ee..a8cb9718 100644 --- a/tests/test_kernelspecapp.py +++ b/tests/test_kernelspecapp.py @@ -15,8 +15,8 @@ def test_kernelspec_sub_apps(jp_kernel_dir): app = InstallKernelSpec() - prefix = os.path.dirname(os.environ['JUPYTER_DATA_DIR']) - kernel_dir = os.path.join(prefix, 'share/jupyter/kernels') + prefix = os.path.dirname(os.environ["JUPYTER_DATA_DIR"]) + kernel_dir = os.path.join(prefix, "share/jupyter/kernels") app.kernel_spec_manager.kernel_dirs.append(kernel_dir) app.prefix = prefix = prefix app.initialize([str(jp_kernel_dir)]) @@ -27,16 +27,16 @@ def test_kernelspec_sub_apps(jp_kernel_dir): app1 = ListKernelSpecs() app1.kernel_spec_manager.kernel_dirs.append(kernel_dir) specs = app1.start() - assert 'echo' in specs + assert specs and "echo" in specs - app2 = RemoveKernelSpec(spec_names=['echo'], force=True) + app2 = RemoveKernelSpec(spec_names=["echo"], force=True) app2.kernel_spec_manager.kernel_dirs.append(kernel_dir) app2.start() app3 = ListKernelSpecs() app3.kernel_spec_manager.kernel_dirs.append(kernel_dir) specs = app3.start() - assert 'echo' not in specs + assert specs and "echo" not in specs def test_kernelspec_app(): diff --git a/tests/test_localinterfaces.py b/tests/test_localinterfaces.py index 86edc4e5..100a7152 100644 --- a/tests/test_localinterfaces.py +++ b/tests/test_localinterfaces.py @@ -11,19 +11,19 @@ def test_load_ips(): # Override the machinery that skips it if it was called before - localinterfaces._load_ips.called = False + localinterfaces._load_ips.called = False # type:ignore[attr-defined] # Just check this doesn't error localinterfaces._load_ips(suppress_exceptions=False) - localinterfaces.is_local_ip('8.8.8.8') - localinterfaces.is_public_ip('127.0.0.1') + localinterfaces.is_local_ip("8.8.8.8") + localinterfaces.is_public_ip("127.0.0.1") ips = localinterfaces.local_ips() - assert '127.0.0.1' in ips + assert "127.0.0.1" in ips localinterfaces._load_ips_gethostbyname() localinterfaces._load_ips_dumb() - if sys.platform == 'linux': + if sys.platform == "linux": localinterfaces._load_ips_ip() localinterfaces._load_ips_ifconfig() diff --git a/tests/test_manager.py b/tests/test_manager.py index e3d6ea22..23c246af 100644 --- a/tests/test_manager.py +++ b/tests/test_manager.py @@ -32,3 +32,33 @@ def test_connection_file_real_path(): km._launch_args = {} cmds = km.format_kernel_cmd() assert cmds[4] == "foobar" + + +def test_env_update_launch_args_not_set(): + km = KernelManager() + km.update_env(env={"A": "A"}) + + +def test_env_update_launch_args_not_dict(): + km = KernelManager() + km._launch_args = None + km.update_env(env={"B": "B"}) + + +def test_env_update_launch_args_no_env(): + km = KernelManager() + km._launch_args = {} + km.update_env(env={"C": "C"}) + + +def test_env_update_launch_args_env_not_dict(): + km = KernelManager() + km._launch_args = {"env": None} + km.update_env(env={"D": "D"}) + + +def test_env_update_launch_args_env_dic(): + km = KernelManager() + km._launch_args = {"env": {}} + km.update_env(env={"E": "E"}) + assert km._launch_args["env"]["E"] == "E" diff --git a/tests/test_multikernelmanager.py b/tests/test_multikernelmanager.py index fc82d544..b3fc5797 100644 --- a/tests/test_multikernelmanager.py +++ b/tests/test_multikernelmanager.py @@ -170,7 +170,7 @@ def test_start_parallel_thread_kernels(self): ) @pytest.mark.skipif( sys.platform == "linux", - reason='Kernel refuses to start in process pool', + reason="Kernel refuses to start in process pool", ) def test_start_parallel_process_kernels(self): self.test_tcp_lifecycle() diff --git a/tests/test_provisioning.py b/tests/test_provisioning.py index a49841bb..f9c847d0 100644 --- a/tests/test_provisioning.py +++ b/tests/test_provisioning.py @@ -61,7 +61,7 @@ async def wait(self) -> Optional[int]: # Process is no longer alive, wait and clear ret = self.process.wait() # Make sure all the fds get closed. - for attr in ['stdout', 'stderr', 'stdin']: + for attr in ["stdout", "stderr", "stdin"]: fid = getattr(self.process, attr) if fid: fid.close() @@ -70,7 +70,7 @@ async def wait(self) -> Optional[int]: async def send_signal(self, signum: int) -> None: if self.process: - if signum == signal.SIGINT and sys.platform == 'win32': + if signum == signal.SIGINT and sys.platform == "win32": from jupyter_client.win_interrupt import send_interrupt send_interrupt(self.process.win32_interrupt_event) @@ -99,7 +99,7 @@ async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: # save kwargs for use in restart km._launch_args = kwargs.copy() # build the Popen cmd - extra_arguments = kwargs.pop('extra_arguments', []) + extra_arguments = kwargs.pop("extra_arguments", []) # write connection file / get default ports km.write_connection_file() @@ -136,57 +136,57 @@ class NewTestProvisioner(CustomTestProvisioner): # type:ignore def build_kernelspec(name: str, provisioner: Optional[str] = None) -> None: spec: dict = { - 'argv': [ + "argv": [ sys.executable, - '-m', - 'tests.signalkernel', - '-f', - '{connection_file}', + "-m", + "tests.signalkernel", + "-f", + "{connection_file}", ], - 'display_name': f"Signal Test Kernel w {provisioner}", - 'env': {'TEST_VARS': '${TEST_VARS}:test_var_2'}, - 'metadata': {}, + "display_name": f"Signal Test Kernel w {provisioner}", + "env": {"TEST_VARS": "${TEST_VARS}:test_var_2"}, + "metadata": {}, } if provisioner: - kernel_provisioner = {'kernel_provisioner': {'provisioner_name': provisioner}} - spec['metadata'].update(kernel_provisioner) - if provisioner != 'local-provisioner': - spec['metadata']['kernel_provisioner']['config'] = { - 'config_var_1': 42, - 'config_var_2': name, + kernel_provisioner = {"kernel_provisioner": {"provisioner_name": provisioner}} + spec["metadata"].update(kernel_provisioner) + if provisioner != "local-provisioner": + spec["metadata"]["kernel_provisioner"]["config"] = { + "config_var_1": 42, + "config_var_2": name, } - kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', name) + kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name) os.makedirs(kernel_dir) - with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f: + with open(pjoin(kernel_dir, "kernel.json"), "w") as f: f.write(json.dumps(spec)) def new_provisioner(): - build_kernelspec('new_provisioner', 'new-test-provisioner') + build_kernelspec("new_provisioner", "new-test-provisioner") def custom_provisioner(): - build_kernelspec('custom_provisioner', 'custom-test-provisioner') + build_kernelspec("custom_provisioner", "custom-test-provisioner") @pytest.fixture def all_provisioners(): - build_kernelspec('no_provisioner') - build_kernelspec('missing_provisioner', 'missing-provisioner') - build_kernelspec('default_provisioner', 'local-provisioner') - build_kernelspec('subclassed_provisioner', 'subclassed-test-provisioner') + build_kernelspec("no_provisioner") + build_kernelspec("missing_provisioner", "missing-provisioner") + build_kernelspec("default_provisioner", "local-provisioner") + build_kernelspec("subclassed_provisioner", "subclassed-test-provisioner") custom_provisioner() @pytest.fixture( params=[ - 'no_provisioner', - 'default_provisioner', - 'missing_provisioner', - 'custom_provisioner', - 'subclassed_provisioner', + "no_provisioner", + "default_provisioner", + "missing_provisioner", + "custom_provisioner", + "subclassed_provisioner", ] ) def akm(request, all_provisioners): @@ -194,9 +194,9 @@ def akm(request, all_provisioners): initial_provisioner_map = { - 'local-provisioner': 'jupyter_client.provisioning:LocalProvisioner', - 'subclassed-test-provisioner': 'tests.test_provisioning:SubclassedTestProvisioner', - 'custom-test-provisioner': 'tests.test_provisioning:CustomTestProvisioner', + "local-provisioner": "jupyter_client.provisioning:LocalProvisioner", + "subclassed-test-provisioner": "tests.test_provisioning:SubclassedTestProvisioner", + "custom-test-provisioner": "tests.test_provisioning:CustomTestProvisioner", } @@ -208,26 +208,26 @@ def mock_get_all_provisioners() -> List[EntryPoint]: def mock_get_provisioner(_: str, name: str) -> EntryPoint: - if name == 'new-test-provisioner': + if name == "new-test-provisioner": return EntryPoint( - 'new-test-provisioner', - 'tests.test_provisioning:NewTestProvisioner', + "new-test-provisioner", + "tests.test_provisioning:NewTestProvisioner", KernelProvisionerFactory.GROUP_NAME, ) if name in initial_provisioner_map: return EntryPoint(name, initial_provisioner_map[name], KernelProvisionerFactory.GROUP_NAME) - raise ValueError('No such entry point') + raise ValueError("No such entry point") @pytest.fixture def kpf(monkeypatch): """Setup the Kernel Provisioner Factory, mocking the entrypoint fetch calls.""" monkeypatch.setattr( - KernelProvisionerFactory, '_get_all_provisioners', mock_get_all_provisioners + KernelProvisionerFactory, "_get_all_provisioners", mock_get_all_provisioners ) - monkeypatch.setattr(KernelProvisionerFactory, '_get_provisioner', mock_get_provisioner) + monkeypatch.setattr(KernelProvisionerFactory, "_get_provisioner", mock_get_provisioner) factory = KernelProvisionerFactory.instance() return factory @@ -239,23 +239,23 @@ def test_find_all_specs(self, kpf, all_provisioners): # Ensure specs for initial provisioners exist, # and missing_provisioner & new_provisioner don't - assert 'no_provisioner' in kernels - assert 'default_provisioner' in kernels - assert 'subclassed_provisioner' in kernels - assert 'custom_provisioner' in kernels - assert 'missing_provisioner' not in kernels - assert 'new_provisioner' not in kernels + assert "no_provisioner" in kernels + assert "default_provisioner" in kernels + assert "subclassed_provisioner" in kernels + assert "custom_provisioner" in kernels + assert "missing_provisioner" not in kernels + assert "new_provisioner" not in kernels def test_get_missing(self, all_provisioners): ksm = KernelSpecManager() with pytest.raises(NoSuchKernel): - ksm.get_kernel_spec('missing_provisioner') + ksm.get_kernel_spec("missing_provisioner") def test_get_new(self, kpf): new_provisioner() # Introduce provisioner after initialization of KPF ksm = KernelSpecManager() - kernel = ksm.get_kernel_spec('new_provisioner') - assert kernel.metadata['kernel_provisioner']['provisioner_name'] == 'new-test-provisioner' + kernel = ksm.get_kernel_spec("new_provisioner") + assert kernel.metadata["kernel_provisioner"]["provisioner_name"] == "new-test-provisioner" class TestRuntime: @@ -263,7 +263,7 @@ async def akm_test(self, kernel_mgr): """Starts a kernel, validates the associated provisioner's config, shuts down kernel""" assert kernel_mgr.provisioner is None - if kernel_mgr.kernel_name == 'missing_provisioner': + if kernel_mgr.kernel_name == "missing_provisioner": with pytest.raises(NoSuchKernel): await kernel_mgr.start_kernel() else: @@ -280,12 +280,12 @@ async def test_existing(self, kpf, akm): async def test_new(self, kpf): new_provisioner() # Introduce provisioner after initialization of KPF - new_km = AsyncKernelManager(kernel_name='new_provisioner') + new_km = AsyncKernelManager(kernel_name="new_provisioner") await self.akm_test(new_km) async def test_custom_lifecycle(self, kpf): custom_provisioner() - async_km = AsyncKernelManager(kernel_name='custom_provisioner') + async_km = AsyncKernelManager(kernel_name="custom_provisioner") await async_km.start_kernel(stdout=PIPE, stderr=PIPE) is_alive = await async_km.is_alive() assert is_alive @@ -300,8 +300,8 @@ async def test_custom_lifecycle(self, kpf): assert async_km.context.closed async def test_default_provisioner_config(self, kpf, all_provisioners): - kpf.default_provisioner_name = 'custom-test-provisioner' - async_km = AsyncKernelManager(kernel_name='no_provisioner') + kpf.default_provisioner_name = "custom-test-provisioner" + async_km = AsyncKernelManager(kernel_name="no_provisioner") await async_km.start_kernel(stdout=PIPE, stderr=PIPE) is_alive = await async_km.is_alive() assert is_alive @@ -320,22 +320,22 @@ def validate_provisioner(akm: AsyncKernelManager) -> None: assert akm.provisioner is not None and akm.provisioner.has_process # Validate provisioner config - if akm.kernel_name in ['no_provisioner', 'default_provisioner']: - assert not hasattr(akm.provisioner, 'config_var_1') - assert not hasattr(akm.provisioner, 'config_var_2') + if akm.kernel_name in ["no_provisioner", "default_provisioner"]: + assert not hasattr(akm.provisioner, "config_var_1") + assert not hasattr(akm.provisioner, "config_var_2") else: assert akm.provisioner.config_var_1 == 42 # type:ignore assert akm.provisioner.config_var_2 == akm.kernel_name # type:ignore # Validate provisioner class - if akm.kernel_name in ['no_provisioner', 'default_provisioner', 'subclassed_provisioner']: + if akm.kernel_name in ["no_provisioner", "default_provisioner", "subclassed_provisioner"]: assert isinstance(akm.provisioner, LocalProvisioner) - if akm.kernel_name == 'subclassed_provisioner': + if akm.kernel_name == "subclassed_provisioner": assert isinstance(akm.provisioner, SubclassedTestProvisioner) else: assert not isinstance(akm.provisioner, SubclassedTestProvisioner) else: assert isinstance(akm.provisioner, CustomTestProvisioner) assert not isinstance(akm.provisioner, LocalProvisioner) - if akm.kernel_name == 'new_provisioner': + if akm.kernel_name == "new_provisioner": assert isinstance(akm.provisioner, NewTestProvisioner) diff --git a/tests/test_restarter.py b/tests/test_restarter.py index b216842f..78997c3d 100644 --- a/tests/test_restarter.py +++ b/tests/test_restarter.py @@ -80,7 +80,7 @@ def debug_logging(): win_skip = pytest.mark.skipif( - os.name == 'nt', + os.name == "nt", reason='"RuntimeError: Cannot run the event loop while another loop is running" error on Windows', ) @@ -88,7 +88,7 @@ def debug_logging(): @win_skip async def test_restart_check(config, install_kernel, debug_logging): """Test that the kernel is restarted and recovers""" - # If this test failes, run it with --log-cli-level=DEBUG to inspect + # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 1 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True @@ -106,7 +106,7 @@ def cb(): try: km.start_kernel() - km.add_restart_callback(cb, 'restart') + km.add_restart_callback(cb, "restart") except BaseException: if km.has_kernel: km.shutdown_kernel() @@ -144,7 +144,7 @@ def cb(): @win_skip async def test_restarter_gives_up(config, install_fail_kernel, debug_logging): """Test that the restarter gives up after reaching the restart limit""" - # If this test failes, run it with --log-cli-level=DEBUG to inspect + # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 1 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True @@ -167,8 +167,8 @@ def on_death(): try: km.start_kernel() - km.add_restart_callback(cb, 'restart') - km.add_restart_callback(on_death, 'dead') + km.add_restart_callback(cb, "restart") + km.add_restart_callback(on_death, "dead") except BaseException: if km.has_kernel: km.shutdown_kernel() @@ -188,7 +188,7 @@ def on_death(): async def test_async_restart_check(config, install_kernel, debug_logging): """Test that the kernel is restarted and recovers""" - # If this test failes, run it with --log-cli-level=DEBUG to inspect + # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 1 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True @@ -206,7 +206,7 @@ def cb(): try: await km.start_kernel() - km.add_restart_callback(cb, 'restart') + km.add_restart_callback(cb, "restart") except BaseException: if km.has_kernel: await km.shutdown_kernel() @@ -243,7 +243,7 @@ def cb(): async def test_async_restarter_gives_up(config, install_slow_fail_kernel, debug_logging): """Test that the restarter gives up after reaching the restart limit""" - # If this test failes, run it with --log-cli-level=DEBUG to inspect + # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 2 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True @@ -267,8 +267,8 @@ def on_death(): try: await km.start_kernel() - km.add_restart_callback(cb, 'restart') - km.add_restart_callback(on_death, 'dead') + km.add_restart_callback(cb, "restart") + km.add_restart_callback(on_death, "dead") except BaseException: if km.has_kernel: await km.shutdown_kernel() diff --git a/tests/test_session.py b/tests/test_session.py index e6cec015..f30f44f4 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -249,7 +249,7 @@ def test_args(self, session): self.assertEqual(s.session, u) self.assertEqual(s.username, "carrot") - @pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason='Test fails on PyPy') + @pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Test fails on PyPy") def test_tracking_sync(self, session): """test tracking messages""" ctx = zmq.Context() @@ -278,7 +278,7 @@ def test_tracking_sync(self, session): b.close() ctx.term() - @pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason='Test fails on PyPy') + @pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Test fails on PyPy") async def test_tracking(self, session): """test tracking messages""" ctx = zmq.asyncio.Context() @@ -509,9 +509,9 @@ def test_clone(self, session): def test_squash_unicode(): - assert ss.squash_unicode(dict(a='1')) == {b'a': b'1'} - assert ss.squash_unicode(['a', 1]) == [b'a', 1] - assert ss.squash_unicode('hi') == b'hi' + assert ss.squash_unicode(dict(a="1")) == {b"a": b"1"} + assert ss.squash_unicode(["a", 1]) == [b"a", 1] + assert ss.squash_unicode("hi") == b"hi" def test_json_packer(): @@ -527,9 +527,9 @@ def test_json_packer(): def test_message_cls(): m = ss.Message(dict(a=1)) foo = dict(m) # type:ignore - assert foo['a'] == 1 - assert m['a'] == 1, m['a'] - assert 'a' in m + assert foo["a"] == 1 + assert m["a"] == 1, m["a"] + assert "a" in m assert str(m) == "{'a': 1}"