From d9ff43cc057419086c7cf035ea0a451933b66121 Mon Sep 17 00:00:00 2001 From: Dan W Anderson Date: Mon, 6 Jul 2020 15:04:53 -0700 Subject: [PATCH 1/3] two-space indent (#1) --- src/black/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/black/__init__.py b/src/black/__init__.py index 2b2d3d88c73..a77a410fa42 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -1776,7 +1776,7 @@ def __str__(self) -> str: if not self: return "\n" - indent = " " * self.depth + indent = " " * self.depth leaves = iter(self.leaves) first = next(leaves) res = f"{first.prefix}{indent}{first.value}" @@ -2089,7 +2089,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: if prev_siblings_are( leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] ) and is_multiline_string(leaf): - prefix = " " * self.current_line.depth + prefix = " " * self.current_line.depth docstring = fix_docstring(leaf.value[3:-3], prefix) leaf.value = leaf.value[0:3] + docstring + leaf.value[-3:] normalize_string_quotes(leaf) From 23a7e4ef99a604ac5ef27a1bef52ad1c19ffc3e0 Mon Sep 17 00:00:00 2001 From: Taylor Jackle Spriggs Date: Mon, 31 Aug 2020 14:05:00 -0700 Subject: [PATCH 2/3] Update to 20.8b1 (#2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Find project root correctly (#1518) Ensure root dir is a common parent of all inputs Fixes #1493 * Add pip install from GitHub command to README.md (#1529) * Add pip install from GitHub command to README.md * Make it prettier ... * Mozilla uses black too (#1531) * add Quora to orgs that use Black (#1532) * ISSUE 1533: Fix --config argument description (#1534) Change --config argument description to "Read configuration from FILE." The "--config FILE Read configuration from FILE path" * Spelling fix in CONTRIBUTING.md (#1547) * Ensure path for finding root is absolute (#1550) As Path.resolve() is buggy on windows (see https://bugs.python.org/issue38671) an absolute path is ensured by prepending the Path.cwd() * Update curl command to use stable branch (#1543) * pre-commit: show diff on failure on CI (#1552) * pre-commit: --show-diff-on-failure * pre-commit: --show-diff-on-failure * docs: Improve pre-commit use (#1551) Stable tag wasn't available and crashed when attempting to set initial pre-commit. Also the python version needs to be installed so it would be better to use the generic "python3" command. * Update to accomodate isort 5 release changes. (#1559) Isort 5 introduced profiles and ensure_newline_before_comments options. Either needs to be added to work correctly with black. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * fix spelling (#1567) Co-authored-by: Hugo van Kemenade * Remove slow assertion (#1592) Partial fix for #1581 This assertion produces behavior quadratic in the number of leaves in a line, which is making Black extremely slow on files with very long expressions. On my benchmark file this change makes Black 10x faster. * Add the direnv base directory to the default excludes (#1564) Co-authored-by: Chris Rose * Make --exclude only apply to recursively found files (#1591) Ever since --force-exclude was added, --exclude started to touch files that were given to Black through the CLI too. This is not documented behaviour and neither expected as --exclude and --force-exclude now behave the same! Before this commit, get_sources() when encountering a file that was passed explicitly through the CLI would pass a single Path object list to gen_python_files(). This causes bad behaviour since that function doesn't treat the exclude and force_exclude regexes differently. Which is fine for recursively found files, but *not* for files given through the CLI. Now when get_sources() iterates through srcs and encounters a file, it checks if the force_exclude regex matches, if not, then the file will be added to the computed sources set. A new function had to be created since before you can do regex matching, the path must be normalized. The full process of normalizing the path is somewhat long as there is special error handling. I didn't want to duplicate this logic in get_sources() and gen_python_files() so that's why there is a new helper function. * in verbose mode, print stack trace (#1594) Make Black failures easier to debug * fix some docstring crashes (#1593) Allow removing some trailing whitespace * don't strip brackets before lsqb (#1575) (#1590) if the string contains a PERCENT, it's not safe to remove brackets that follow and operator with the same or higher precedence than PERCENT * fix unary op detection (#1600) * Fix inline code style in README (#1608) Refer to `pyproject.toml` in HTML section of README with HTML code tags * Update all dependencies to latest versions * Disable string splitting/merging by default (#1609) * put experimental string stuff behind a flag * update tests * don't need an output section if it's the same as the input * Primer: Expect no formatting changes in attrs, hypothesis and poetry with --experimental-string-processing off Co-authored-by: Hugo van Kemenade * Upgrade docs to Sphinx 3+ and add doc build test (#1613) * Upgrade docs to Sphinx 3+ * Fix all the warnings... - Fixed bad docstrings - Fixed bad fenced code blocks in documentation - Blocklisted some sections from being generated from the README - Added missing documentation to index.rst - Fixed an invalid autofunction directive in reference/reference_functions.rst - Pin another documentation dependency * Add documentation build test * Reset trailing comma handling * Re-implement magic trailing comma handling: - when a trailing comma is specified in any bracket pair, that signals to Black that this bracket pair needs to be always exploded, e.g. presented as "one item per line"; - this causes some changes to previously formatted code that erroneously left trailing commas embedded into single-line expressions; - internally, Black needs to be able to identify trailing commas that it put itself compared to pre-existing trailing commas. We do this by using/abusing lib2to3's `was_checked` attribute. It's True for internally generated trailing commas and False for pre-existing ones (in fact, for all pre-existing leaves and nodes). Fixes #1288 * Reformat docs/conf.py according to the new style * Open file explicitly with UTF-8 so it works on Windows, too * Mark Primer projects that will change formatting * Require Sphinx 3 * Use properly renamed function name in docs * Fix dealing with generated files in docs * Property-based fuzz test * Update the changelog * Make doc generation a little smarter, update doc sections * Add more trailing comma test variants * Run trailing comma tests with TargetVersion.PY38 * Address pre-existing trailing commas when not in the rightmost bracket pair This required some hackery. Long story short, we need to reuse the ability to omit rightmost bracket pairs (which glues them together and splits on something else instead), for use with pre-existing trailing commas. This form of user-controlled formatting is brittle so we have to be careful not to cause a scenario where Black first formats code without trailing commas in one way, and then looks at the same file with pre-existing trailing commas (that it itself put on the previous run) and decides to format the code again. One particular ugly edge case here is handling of optional parentheses. In particular, the long-standing `line_length=1` hack got in the way of pre-existing trailing commas and had to be removed. Instead, a more intelligent but costly solution was put in place: a "second opinion" if the formatting that omits optional parentheses ended up causing lines to be too long. Again, for efficiency purposes, Black reuses Leaf objects from blib2to3 and modifies them in place, which was invalid for having two separate formattings. Line cloning was used to mitigate this. Fixes #1619 * Improve docstring re-indentation handling This addresses a few crashers, namely: * producing non-equivalent code due to mangling escaped newlines, * invalid hugging quote characters in the docstring body to the docstring outer triple quotes (causing a quadruple quote which is a syntax error), * lack of handling for docstrings that start on the same line as the `def`, and * invalid stripping of outer triple quotes when the docstring contained a string prefix. As a bonus, tests now also run when string normalization is disabled. * Add links regarding Spotless integration for gradle/maven users (#1622) Co-authored-by: Łukasz Langa * Primer update config - enable pytest (#1626) Reformatted projects I have acceess to: - aioexabgp - bandersnatch - flake8-bugbear ``` -- primer results 📊 -- 13 / 16 succeeded (81.25%) ✅ 0 / 16 FAILED (0.0%) 💩 - 3 projects disabled by config - 0 projects skipped due to Python version - 0 skipped due to long checkout ``` * Also re-enable pytest ``` -- primer results 📊 -- 14 / 16 succeeded (87.5%) ✅ 0 / 16 FAILED (0.0%) 💩 - 2 projects disabled by config - 0 projects skipped due to Python version - 0 skipped due to long checkout real 2m26.207s user 17m55.404s sys 0m43.061s ``` * v20.8b0 * Treat all trailing commas as pre-existing, as they effectively are On a second pass of Black on the same file, inserted trailing commas are now pre-existing. Doesn't make sense to differentiate between the passes then. * Include mode information for unstable formattings * Add expected failure tests with the unstable formattings * Make dependency on Click 7.0, regex 2020.1.8, and toml 0.10.1 explicit * v20.8b1 Co-authored-by: Lihu Ben-Ezri-Ravin Co-authored-by: Cooper Lees Co-authored-by: Sylvestre Ledru Co-authored-by: Jelle Zijlstra Co-authored-by: Olexiy Co-authored-by: jtpavlock Co-authored-by: dhaug-op <56020126+dhaug-op@users.noreply.github.com> Co-authored-by: Steven Maude Co-authored-by: Hugo van Kemenade Co-authored-by: Vinicius Gubiani Ferreira Co-authored-by: Maximilian Cosmo Sitter <48606431+mcsitter@users.noreply.github.com> Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> Co-authored-by: Kaligule Co-authored-by: Chris Rose Co-authored-by: Chris Rose Co-authored-by: David Szotten Co-authored-by: Daanyaal Syed Co-authored-by: Łukasz Langa Co-authored-by: Zac-HD Co-authored-by: Ned Twigg --- .github/workflows/doc.yml | 36 + .github/workflows/fuzz.yml | 31 + .github/workflows/lint.yml | 2 +- .gitignore | 1 + .travis.yml | 2 +- CHANGES.md | 51 +- CONTRIBUTING.md | 2 +- Pipfile | 10 +- Pipfile.lock | 647 +++++++--------- README.md | 26 +- docs/authors.md | 184 +++++ docs/black_primer.md | 6 +- docs/change_log.md | 503 ++++++++++++ docs/compatible_configs.md | 17 +- docs/conf.py | 66 +- docs/contributing_to_black.md | 70 ++ docs/editor_integration.md | 10 +- docs/github_actions.md | 19 + docs/ignoring_unmodified_files.md | 23 + docs/index.rst | 3 +- docs/installation_and_usage.md | 179 +++++ docs/pyproject_toml.md | 88 +++ docs/reference/reference_functions.rst | 4 +- docs/requirements.txt | 5 +- docs/show_your_style.md | 19 + docs/the_black_code_style.md | 13 +- docs/version_control_integration.md | 28 + fuzz.py | 59 ++ gallery/gallery.py | 5 +- setup.py | 5 +- src/black/__init__.py | 727 +++++++++++------- src/black_primer/primer.json | 10 +- src/blib2to3/pgen2/driver.py | 2 +- tests/data/cantfit.py | 12 +- tests/data/collections.py | 35 +- tests/data/comments2.py | 8 +- tests/data/comments7.py | 151 +++- tests/data/composition_no_trailing_comma.py | 367 +++++++++ tests/data/docstring.py | 77 +- tests/data/expression.diff | 27 +- tests/data/expression.py | 22 +- tests/data/fmtonoff4.py | 7 +- tests/data/function.py | 5 +- tests/data/function2.py | 5 +- tests/data/function_trailing_comma.py | 79 +- tests/data/import_spacing.py | 8 +- tests/data/long_strings.py | 32 +- tests/data/long_strings__regression.py | 12 +- tests/data/long_strings_flag_disabled.py | 289 +++++++ tests/data/percent_precedence.py | 41 + tests/data/trailing_comma_optional_parens1.py | 3 + tests/data/trailing_comma_optional_parens2.py | 3 + tests/data/trailing_comma_optional_parens3.py | 8 + tests/test_black.py | 357 ++++++--- 54 files changed, 3573 insertions(+), 828 deletions(-) create mode 100644 .github/workflows/doc.yml create mode 100644 .github/workflows/fuzz.yml create mode 100644 docs/authors.md create mode 100644 docs/change_log.md create mode 100644 docs/contributing_to_black.md create mode 100644 docs/github_actions.md create mode 100644 docs/ignoring_unmodified_files.md create mode 100644 docs/installation_and_usage.md create mode 100644 docs/pyproject_toml.md create mode 100644 docs/show_your_style.md create mode 100644 docs/version_control_integration.md create mode 100644 fuzz.py create mode 100644 tests/data/composition_no_trailing_comma.py create mode 100644 tests/data/long_strings_flag_disabled.py create mode 100644 tests/data/percent_precedence.py create mode 100644 tests/data/trailing_comma_optional_parens1.py create mode 100644 tests/data/trailing_comma_optional_parens2.py create mode 100644 tests/data/trailing_comma_optional_parens3.py diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml new file mode 100644 index 00000000000..6023a02a7f7 --- /dev/null +++ b/.github/workflows/doc.yml @@ -0,0 +1,36 @@ +name: Documentation Build + +on: + push: + paths: + - "docs/**" + - "README.md" + - "CHANGES.md" + - "CONTRIBUTING.md" + pull_request: + paths: + - "docs/**" + - "README.md" + - "CHANGES.md" + - "CONTRIBUTING.md" + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + python -m pip install -e "." + python -m pip install -r "docs/requirements.txt" + + - name: Build documentation + run: sphinx-build -a -b html -W docs/ docs/_build/ diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml new file mode 100644 index 00000000000..92caa0fd5c1 --- /dev/null +++ b/.github/workflows/fuzz.yml @@ -0,0 +1,31 @@ +name: Fuzz + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.6, 3.7, 3.8] + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade coverage + python -m pip install --upgrade hypothesmith + python -m pip install -e ".[d]" + + - name: Run fuzz tests + run: | + coverage run fuzz.py + coverage report diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index cb8c534c9ac..fa7286eec1f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,4 +24,4 @@ jobs: python -m pip install -e '.[d]' - name: Lint - run: pre-commit run --all-files + run: pre-commit run --all-files --show-diff-on-failure diff --git a/.gitignore b/.gitignore index 509797e65c4..6b94cacd183 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ src/_black_version.py .eggs .dmypy.json *.swp +.hypothesis/ diff --git a/.travis.yml b/.travis.yml index b2b127cfeb0..86cf24df51b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,7 +20,7 @@ matrix: - name: "lint" python: 3.7 env: - - TEST_CMD="pre-commit run --all-files" + - TEST_CMD="pre-commit run --all-files --show-diff-on-failure" - name: "3.6" python: 3.6 - name: "3.7" diff --git a/CHANGES.md b/CHANGES.md index 6d418b9bec8..11341779f58 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,18 +1,57 @@ ## Change Log -### Unreleased +### 20.8b1 + +#### _Packaging_ + +- explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions + older than 7.0 + +### 20.8b0 #### _Black_ -- reindent docstrings when reindenting code around it (#1053) +- re-implemented support for explicit trailing commas: now it works consistently within + any bracket pair, including nested structures (#1288 and duplicates) + +- `Black` now reindents docstrings when reindenting code around it (#1053) + +- `Black` now shows colored diffs (#1266) + +- `Black` is now packaged using 'py3' tagged wheels (#1388) + +- `Black` now supports Python 3.8 code, e.g. star expressions in return statements + (#1121) + +- `Black` no longer normalizes capital R-string prefixes as those have a + community-accepted meaning (#1244) + +- `Black` now uses exit code 2 when specified configuration file doesn't exit (#1361) + +- `Black` now works on AWS Lambda (#1141) + +- added `--force-exclude` argument (#1032) + +- removed deprecated `--py36` option (#1236) + +- fixed `--diff` output when EOF is encountered (#526) + +- fixed `# fmt: off` handling around decorators (#560) + +- fixed unstable formatting with some `# type: ignore` comments (#1113) -- show colored diffs (#1266) +- fixed invalid removal on organizing brackets followed by indexing (#1575) -- move to 'py3' tagged wheels (#1388) +- introduced `black-primer`, a CI tool that allows us to run regression tests against + existing open source users of Black (#1402) -- remove deprecated `--py36` option (#1236) +- introduced property-based fuzzing to our test suite based on Hypothesis and + Hypothersmith (#1566) -- add `--force-exclude` argument (#1032) +- implemented experimental and disabled by default long string rewrapping (#1132), + hidden under a `--experimental-string-processing` flag while it's being worked on; + this is an undocumented and unsupported feature, you lose Internet points for + depending on it (#1609) #### Vim plugin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 525cb9c183d..0687aaeee52 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,7 +45,7 @@ $ black-primer [-k -w /tmp/black_test_repos] ## black-primer `black-primer` is used by CI to pull down well-known _Black_ formatted projects and see -if we get soure code changes. It will error on formatting changes or errors. Please run +if we get source code changes. It will error on formatting changes or errors. Please run before pushing your PR to see if you get the actions you would expect from _Black_ with your PR. You may need to change [primer.json](https://github.com/psf/black/blob/master/src/black_primer/primer.json) diff --git a/Pipfile b/Pipfile index 3afe3bb9efd..44f57f6773c 100644 --- a/Pipfile +++ b/Pipfile @@ -4,13 +4,13 @@ url = "https://pypi.python.org/simple" verify_ssl = true [dev-packages] -Sphinx = "*" +Sphinx = ">=3.1.2" coverage = "*" docutils = "==0.15" # not a direct dependency, see https://github.com/pypa/pipenv/issues/3865 flake8 = "*" flake8-bugbear = "*" flake8-mypy = "*" -mypy = ">=0.740" +mypy = ">=0.782" pre-commit = "*" readme_renderer = "*" recommonmark = "*" @@ -18,16 +18,16 @@ setuptools = ">=39.2.0" setuptools-scm = "*" twine = ">=1.11.0" wheel = ">=0.31.1" -black = {editable = true,extras = ["d"],path = "."} +black = {editable = true, extras = ["d"], path = "."} [packages] aiohttp = ">=3.3.2" aiohttp-cors = "*" appdirs = "*" -click = ">=6.5" +click = ">=7.0" mypy_extensions = ">=0.4.3" pathspec = ">=0.6" -regex = ">=2019.8" +regex = ">=2020.1.8" toml = ">=0.10.1" typed-ast = "==1.4.0" typing_extensions = ">=3.7.4" diff --git a/Pipfile.lock b/Pipfile.lock index b0da4743793..32b8012ff0e 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "4a6956c7c81b496d3fd7a4e3395b332d4dc9a5bed468e36e729a4039c739ad2d" + "sha256": "61d09a6b8a8c310becd5e108ed08e0eeae50c7323c08c8040367abded0cb1031" }, "pipfile-spec": 6, "requires": {}, @@ -42,25 +42,27 @@ }, "appdirs": { "hashes": [ - "sha256:9e5896d1372858f8dd3344faf4e5014d21849c756c8d5701f78f8a103b372d92", - "sha256:d8b24664561d0d34ddfaec54636d502d7cea6e29c3eaf68f3df6180863e2166e" + "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", + "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" ], "index": "pypi", - "version": "==1.4.3" + "version": "==1.4.4" }, "async-timeout": { "hashes": [ "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f", "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3" ], + "markers": "python_full_version >= '3.5.3'", "version": "==3.0.1" }, "attrs": { "hashes": [ - "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", - "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" + "sha256:0ef97238856430dcf9228e07f316aefc17e8939fc8507e18c6501b761ef1a42a", + "sha256:2867b7b9f8326499ab5b0e2d12801fa5c98842d2cbd22b35112ae04bf85b4dff" ], - "version": "==19.3.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==20.1.0" }, "black": { "editable": true, @@ -78,11 +80,11 @@ }, "click": { "hashes": [ - "sha256:8a18b4ea89d8820c5d0c7da8a64b2c324b4dabb695804dbfea19b9be9d88c0cc", - "sha256:e345d143d80bf5ee7534056164e5e112ea5e22716bbb1ce727941f4c8b471b9a" + "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", + "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], "index": "pypi", - "version": "==7.1.1" + "version": "==7.1.2" }, "dataclasses": { "hashes": [ @@ -90,36 +92,40 @@ "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84" ], "index": "pypi", - "version": "==0.6" + "python_version <": "3.7", + "version": "==0.6", + "version >": "0.6" }, "idna": { "hashes": [ - "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", - "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "version": "==2.9" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.10" }, "multidict": { "hashes": [ - "sha256:317f96bc0950d249e96d8d29ab556d01dd38888fbe68324f46fd834b430169f1", - "sha256:42f56542166040b4474c0c608ed051732033cd821126493cf25b6c276df7dd35", - "sha256:4b7df040fb5fe826d689204f9b544af469593fb3ff3a069a6ad3409f742f5928", - "sha256:544fae9261232a97102e27a926019100a9db75bec7b37feedd74b3aa82f29969", - "sha256:620b37c3fea181dab09267cd5a84b0f23fa043beb8bc50d8474dd9694de1fa6e", - "sha256:6e6fef114741c4d7ca46da8449038ec8b1e880bbe68674c01ceeb1ac8a648e78", - "sha256:7774e9f6c9af3f12f296131453f7b81dabb7ebdb948483362f5afcaac8a826f1", - "sha256:85cb26c38c96f76b7ff38b86c9d560dea10cf3459bb5f4caf72fc1bb932c7136", - "sha256:a326f4240123a2ac66bb163eeba99578e9d63a8654a59f4688a79198f9aa10f8", - "sha256:ae402f43604e3b2bc41e8ea8b8526c7fa7139ed76b0d64fc48e28125925275b2", - "sha256:aee283c49601fa4c13adc64c09c978838a7e812f85377ae130a24d7198c0331e", - "sha256:b51249fdd2923739cd3efc95a3d6c363b67bbf779208e9f37fd5e68540d1a4d4", - "sha256:bb519becc46275c594410c6c28a8a0adc66fe24fef154a9addea54c1adb006f5", - "sha256:c2c37185fb0af79d5c117b8d2764f4321eeb12ba8c141a95d0aa8c2c1d0a11dd", - "sha256:dc561313279f9d05a3d0ffa89cd15ae477528ea37aa9795c4654588a3287a9ab", - "sha256:e439c9a10a95cb32abd708bb8be83b2134fa93790a4fb0535ca36db3dda94d20", - "sha256:fc3b4adc2ee8474cb3cd2a155305d5f8eda0a9c91320f83e55748e1fcb68f8e3" - ], - "version": "==4.7.5" + "sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a", + "sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000", + "sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2", + "sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507", + "sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5", + "sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7", + "sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d", + "sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463", + "sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19", + "sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3", + "sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b", + "sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c", + "sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87", + "sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7", + "sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430", + "sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255", + "sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d" + ], + "markers": "python_version >= '3.5'", + "version": "==4.7.6" }, "mypy-extensions": { "hashes": [ @@ -131,38 +137,38 @@ }, "pathspec": { "hashes": [ - "sha256:163b0632d4e31cef212976cf57b43d9fd6b0bac6e67c26015d611a647d5e7424", - "sha256:562aa70af2e0d434367d9790ad37aed893de47f1693e4201fd1d3dca15d19b96" + "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0", + "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061" ], "index": "pypi", - "version": "==0.7.0" + "version": "==0.8.0" }, "regex": { "hashes": [ - "sha256:01b2d70cbaed11f72e57c1cfbaca71b02e3b98f739ce33f5f26f71859ad90431", - "sha256:046e83a8b160aff37e7034139a336b660b01dbfe58706f9d73f5cdc6b3460242", - "sha256:113309e819634f499d0006f6200700c8209a2a8bf6bd1bdc863a4d9d6776a5d1", - "sha256:200539b5124bc4721247a823a47d116a7a23e62cc6695744e3eb5454a8888e6d", - "sha256:25f4ce26b68425b80a233ce7b6218743c71cf7297dbe02feab1d711a2bf90045", - "sha256:269f0c5ff23639316b29f31df199f401e4cb87529eafff0c76828071635d417b", - "sha256:5de40649d4f88a15c9489ed37f88f053c15400257eeb18425ac7ed0a4e119400", - "sha256:7f78f963e62a61e294adb6ff5db901b629ef78cb2a1cfce3cf4eeba80c1c67aa", - "sha256:82469a0c1330a4beb3d42568f82dffa32226ced006e0b063719468dcd40ffdf0", - "sha256:8c2b7fa4d72781577ac45ab658da44c7518e6d96e2a50d04ecb0fd8f28b21d69", - "sha256:974535648f31c2b712a6b2595969f8ab370834080e00ab24e5dbb9d19b8bfb74", - "sha256:99272d6b6a68c7ae4391908fc15f6b8c9a6c345a46b632d7fdb7ef6c883a2bbb", - "sha256:9b64a4cc825ec4df262050c17e18f60252cdd94742b4ba1286bcfe481f1c0f26", - "sha256:9e9624440d754733eddbcd4614378c18713d2d9d0dc647cf9c72f64e39671be5", - "sha256:9ff16d994309b26a1cdf666a6309c1ef51ad4f72f99d3392bcd7b7139577a1f2", - "sha256:b33ebcd0222c1d77e61dbcd04a9fd139359bded86803063d3d2d197b796c63ce", - "sha256:bba52d72e16a554d1894a0cc74041da50eea99a8483e591a9edf1025a66843ab", - "sha256:bed7986547ce54d230fd8721aba6fd19459cdc6d315497b98686d0416efaff4e", - "sha256:c7f58a0e0e13fb44623b65b01052dae8e820ed9b8b654bb6296bc9c41f571b70", - "sha256:d58a4fa7910102500722defbde6e2816b0372a4fcc85c7e239323767c74f5cbc", - "sha256:f1ac2dc65105a53c1c2d72b1d3e98c2464a133b4067a51a3d2477b28449709a0" - ], - "index": "pypi", - "version": "==2020.2.20" + "sha256:0dc64ee3f33cd7899f79a8d788abfbec168410be356ed9bd30bbd3f0a23a7204", + "sha256:1269fef3167bb52631ad4fa7dd27bf635d5a0790b8e6222065d42e91bede4162", + "sha256:14a53646369157baa0499513f96091eb70382eb50b2c82393d17d7ec81b7b85f", + "sha256:3a3af27a8d23143c49a3420efe5b3f8cf1a48c6fc8bc6856b03f638abc1833bb", + "sha256:46bac5ca10fb748d6c55843a931855e2727a7a22584f302dd9bb1506e69f83f6", + "sha256:4c037fd14c5f4e308b8370b447b469ca10e69427966527edcab07f52d88388f7", + "sha256:51178c738d559a2d1071ce0b0f56e57eb315bcf8f7d4cf127674b533e3101f88", + "sha256:5ea81ea3dbd6767873c611687141ec7b06ed8bab43f68fad5b7be184a920dc99", + "sha256:6961548bba529cac7c07af2fd4d527c5b91bb8fe18995fed6044ac22b3d14644", + "sha256:75aaa27aa521a182824d89e5ab0a1d16ca207318a6b65042b046053cfc8ed07a", + "sha256:7a2dd66d2d4df34fa82c9dc85657c5e019b87932019947faece7983f2089a840", + "sha256:8a51f2c6d1f884e98846a0a9021ff6861bdb98457879f412fdc2b42d14494067", + "sha256:9c568495e35599625f7b999774e29e8d6b01a6fb684d77dee1f56d41b11b40cd", + "sha256:9eddaafb3c48e0900690c1727fba226c4804b8e6127ea409689c3bb492d06de4", + "sha256:bbb332d45b32df41200380fff14712cb6093b61bd142272a10b16778c418e98e", + "sha256:bc3d98f621898b4a9bc7fecc00513eec8f40b5b83913d74ccb445f037d58cd89", + "sha256:c11d6033115dc4887c456565303f540c44197f4fc1a2bfb192224a301534888e", + "sha256:c50a724d136ec10d920661f1442e4a8b010a4fe5aebd65e0c2241ea41dbe93dc", + "sha256:d0a5095d52b90ff38592bbdc2644f17c6d495762edf47d876049cfd2968fbccf", + "sha256:d6cff2276e502b86a25fd10c2a96973fdb45c7a977dca2138d661417f3728341", + "sha256:e46d13f38cfcbb79bfdb2964b0fe12561fe633caf964a77a5f8d4e45fe5d2ef7" + ], + "index": "pypi", + "version": "==2020.7.14" }, "setuptools-scm": { "hashes": [ @@ -180,10 +186,10 @@ }, "toml": { "hashes": [ - "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88", - "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", "sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c", - "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e" + "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e", + "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", + "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88" ], "index": "pypi", "version": "==0.10.1" @@ -216,34 +222,35 @@ }, "typing-extensions": { "hashes": [ - "sha256:091ecc894d5e908ac75209f10d5b4f118fbdb2eb1ede6a63544054bb1edb41f2", - "sha256:910f4656f54de5993ad9304959ce9bb903f90aadc7c67a0bef07e678014e892d", - "sha256:cf8b63fedea4d89bab840ecbb93e75578af28f76f66c35889bd7065f5af88575" + "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918", + "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c", + "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f" ], "index": "pypi", - "version": "==3.7.4.1" + "version": "==3.7.4.3" }, "yarl": { "hashes": [ - "sha256:0c2ab325d33f1b824734b3ef51d4d54a54e0e7a23d13b86974507602334c2cce", - "sha256:0ca2f395591bbd85ddd50a82eb1fde9c1066fafe888c5c7cc1d810cf03fd3cc6", - "sha256:2098a4b4b9d75ee352807a95cdf5f10180db903bc5b7270715c6bbe2551f64ce", - "sha256:25e66e5e2007c7a39541ca13b559cd8ebc2ad8fe00ea94a2aad28a9b1e44e5ae", - "sha256:26d7c90cb04dee1665282a5d1a998defc1a9e012fdca0f33396f81508f49696d", - "sha256:308b98b0c8cd1dfef1a0311dc5e38ae8f9b58349226aa0533f15a16717ad702f", - "sha256:3ce3d4f7c6b69c4e4f0704b32eca8123b9c58ae91af740481aa57d7857b5e41b", - "sha256:58cd9c469eced558cd81aa3f484b2924e8897049e06889e8ff2510435b7ef74b", - "sha256:5b10eb0e7f044cf0b035112446b26a3a2946bca9d7d7edb5e54a2ad2f6652abb", - "sha256:6faa19d3824c21bcbfdfce5171e193c8b4ddafdf0ac3f129ccf0cdfcb083e462", - "sha256:944494be42fa630134bf907714d40207e646fd5a94423c90d5b514f7b0713fea", - "sha256:a161de7e50224e8e3de6e184707476b5a989037dcb24292b391a3d66ff158e70", - "sha256:a4844ebb2be14768f7994f2017f70aca39d658a96c786211be5ddbe1c68794c1", - "sha256:c2b509ac3d4b988ae8769901c66345425e361d518aecbe4acbfc2567e416626a", - "sha256:c9959d49a77b0e07559e579f38b2f3711c2b8716b8410b320bf9713013215a1b", - "sha256:d8cdee92bc930d8b09d8bd2043cedd544d9c8bd7436a77678dd602467a993080", - "sha256:e15199cdb423316e15f108f51249e44eb156ae5dba232cb73be555324a1d49c2" - ], - "version": "==1.4.2" + "sha256:040b237f58ff7d800e6e0fd89c8439b841f777dd99b4a9cca04d6935564b9409", + "sha256:17668ec6722b1b7a3a05cc0167659f6c95b436d25a36c2d52db0eca7d3f72593", + "sha256:3a584b28086bc93c888a6c2aa5c92ed1ae20932f078c46509a66dce9ea5533f2", + "sha256:4439be27e4eee76c7632c2427ca5e73703151b22cae23e64adb243a9c2f565d8", + "sha256:48e918b05850fffb070a496d2b5f97fc31d15d94ca33d3d08a4f86e26d4e7c5d", + "sha256:9102b59e8337f9874638fcfc9ac3734a0cfadb100e47d55c20d0dc6087fb4692", + "sha256:9b930776c0ae0c691776f4d2891ebc5362af86f152dd0da463a6614074cb1b02", + "sha256:b3b9ad80f8b68519cc3372a6ca85ae02cc5a8807723ac366b53c0f089db19e4a", + "sha256:bc2f976c0e918659f723401c4f834deb8a8e7798a71be4382e024bcc3f7e23a8", + "sha256:c22c75b5f394f3d47105045ea551e08a3e804dc7e01b37800ca35b58f856c3d6", + "sha256:c52ce2883dc193824989a9b97a76ca86ecd1fa7955b14f87bf367a61b6232511", + "sha256:ce584af5de8830d8701b8979b18fcf450cef9a382b1a3c8ef189bedc408faf1e", + "sha256:da456eeec17fa8aa4594d9a9f27c0b1060b6a75f2419fe0c00609587b2695f4a", + "sha256:db6db0f45d2c63ddb1a9d18d1b9b22f308e52c83638c26b422d520a815c4b3fb", + "sha256:df89642981b94e7db5596818499c4b2219028f2a528c9c37cc1de45bf2fd3a3f", + "sha256:f18d68f2be6bf0e89f1521af2b1bb46e66ab0018faafa81d70f358153170a317", + "sha256:f379b7f83f23fe12823085cd6b906edc49df969eb99757f58ff382349a3303c6" + ], + "markers": "python_version >= '3.5'", + "version": "==1.5.1" } }, "develop": { @@ -282,25 +289,27 @@ }, "appdirs": { "hashes": [ - "sha256:9e5896d1372858f8dd3344faf4e5014d21849c756c8d5701f78f8a103b372d92", - "sha256:d8b24664561d0d34ddfaec54636d502d7cea6e29c3eaf68f3df6180863e2166e" + "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", + "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" ], "index": "pypi", - "version": "==1.4.3" + "version": "==1.4.4" }, "async-timeout": { "hashes": [ "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f", "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3" ], + "markers": "python_full_version >= '3.5.3'", "version": "==3.0.1" }, "attrs": { "hashes": [ - "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", - "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" + "sha256:0ef97238856430dcf9228e07f316aefc17e8939fc8507e18c6501b761ef1a42a", + "sha256:2867b7b9f8326499ab5b0e2d12801fa5c98842d2cbd22b35112ae04bf85b4dff" ], - "version": "==19.3.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==20.1.0" }, "babel": { "hashes": [ @@ -327,51 +336,18 @@ }, "certifi": { "hashes": [ - "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", - "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" - ], - "version": "==2020.4.5.2" - }, - "cffi": { - "hashes": [ - "sha256:001bf3242a1bb04d985d63e138230802c6c8d4db3668fb545fb5005ddf5bb5ff", - "sha256:00789914be39dffba161cfc5be31b55775de5ba2235fe49aa28c148236c4e06b", - "sha256:028a579fc9aed3af38f4892bdcc7390508adabc30c6af4a6e4f611b0c680e6ac", - "sha256:14491a910663bf9f13ddf2bc8f60562d6bc5315c1f09c704937ef17293fb85b0", - "sha256:1cae98a7054b5c9391eb3249b86e0e99ab1e02bb0cc0575da191aedadbdf4384", - "sha256:2089ed025da3919d2e75a4d963d008330c96751127dd6f73c8dc0c65041b4c26", - "sha256:2d384f4a127a15ba701207f7639d94106693b6cd64173d6c8988e2c25f3ac2b6", - "sha256:337d448e5a725bba2d8293c48d9353fc68d0e9e4088d62a9571def317797522b", - "sha256:399aed636c7d3749bbed55bc907c3288cb43c65c4389964ad5ff849b6370603e", - "sha256:3b911c2dbd4f423b4c4fcca138cadde747abdb20d196c4a48708b8a2d32b16dd", - "sha256:3d311bcc4a41408cf5854f06ef2c5cab88f9fded37a3b95936c9879c1640d4c2", - "sha256:62ae9af2d069ea2698bf536dcfe1e4eed9090211dbaafeeedf5cb6c41b352f66", - "sha256:66e41db66b47d0d8672d8ed2708ba91b2f2524ece3dee48b5dfb36be8c2f21dc", - "sha256:675686925a9fb403edba0114db74e741d8181683dcf216be697d208857e04ca8", - "sha256:7e63cbcf2429a8dbfe48dcc2322d5f2220b77b2e17b7ba023d6166d84655da55", - "sha256:8a6c688fefb4e1cd56feb6c511984a6c4f7ec7d2a1ff31a10254f3c817054ae4", - "sha256:8c0ffc886aea5df6a1762d0019e9cb05f825d0eec1f520c51be9d198701daee5", - "sha256:95cd16d3dee553f882540c1ffe331d085c9e629499ceadfbda4d4fde635f4b7d", - "sha256:99f748a7e71ff382613b4e1acc0ac83bf7ad167fb3802e35e90d9763daba4d78", - "sha256:b8c78301cefcf5fd914aad35d3c04c2b21ce8629b5e4f4e45ae6812e461910fa", - "sha256:c420917b188a5582a56d8b93bdd8e0f6eca08c84ff623a4c16e809152cd35793", - "sha256:c43866529f2f06fe0edc6246eb4faa34f03fe88b64a0a9a942561c8e22f4b71f", - "sha256:cab50b8c2250b46fe738c77dbd25ce017d5e6fb35d3407606e7a4180656a5a6a", - "sha256:cef128cb4d5e0b3493f058f10ce32365972c554572ff821e175dbc6f8ff6924f", - "sha256:cf16e3cf6c0a5fdd9bc10c21687e19d29ad1fe863372b5543deaec1039581a30", - "sha256:e56c744aa6ff427a607763346e4170629caf7e48ead6921745986db3692f987f", - "sha256:e577934fc5f8779c554639376beeaa5657d54349096ef24abe8c74c5d9c117c3", - "sha256:f2b0fa0c01d8a0c7483afd9f31d7ecf2d71760ca24499c8697aeb5ca37dc090c" - ], - "version": "==1.14.0" + "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", + "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" + ], + "version": "==2020.6.20" }, "cfgv": { "hashes": [ - "sha256:1ccf53320421aeeb915275a196e23b3b8ae87dea8ac6698b1638001d4a486d53", - "sha256:c8e8f552ffcc6194f4e18dd4f68d9aef0c0d58ae7e7be8c82bee3c5e9edfa513" + "sha256:32e43d604bbe7896fe7c248a9c2276447dbef840feb28fe20494f62af110211d", + "sha256:cf22deb93d4bcf92f345a5c3cd39d3d41d6340adc60c78bbbd6588c384fda6a1" ], "markers": "python_full_version >= '3.6.1'", - "version": "==3.1.0" + "version": "==3.2.0" }, "chardet": { "hashes": [ @@ -382,11 +358,19 @@ }, "click": { "hashes": [ - "sha256:8a18b4ea89d8820c5d0c7da8a64b2c324b4dabb695804dbfea19b9be9d88c0cc", - "sha256:e345d143d80bf5ee7534056164e5e112ea5e22716bbb1ce727941f4c8b471b9a" + "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", + "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], "index": "pypi", - "version": "==7.1.1" + "version": "==7.1.2" + }, + "colorama": { + "hashes": [ + "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff", + "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==0.4.3" }, "commonmark": { "hashes": [ @@ -397,71 +381,50 @@ }, "coverage": { "hashes": [ - "sha256:03f630aba2b9b0d69871c2e8d23a69b7fe94a1e2f5f10df5049c0df99db639a0", - "sha256:046a1a742e66d065d16fb564a26c2a15867f17695e7f3d358d7b1ad8a61bca30", - "sha256:0a907199566269e1cfa304325cc3b45c72ae341fbb3253ddde19fa820ded7a8b", - "sha256:165a48268bfb5a77e2d9dbb80de7ea917332a79c7adb747bd005b3a07ff8caf0", - "sha256:1b60a95fc995649464e0cd48cecc8288bac5f4198f21d04b8229dc4097d76823", - "sha256:1f66cf263ec77af5b8fe14ef14c5e46e2eb4a795ac495ad7c03adc72ae43fafe", - "sha256:2e08c32cbede4a29e2a701822291ae2bc9b5220a971bba9d1e7615312efd3037", - "sha256:3844c3dab800ca8536f75ae89f3cf566848a3eb2af4d9f7b1103b4f4f7a5dad6", - "sha256:408ce64078398b2ee2ec08199ea3fcf382828d2f8a19c5a5ba2946fe5ddc6c31", - "sha256:443be7602c790960b9514567917af538cac7807a7c0c0727c4d2bbd4014920fd", - "sha256:4482f69e0701139d0f2c44f3c395d1d1d37abd81bfafbf9b6efbe2542679d892", - "sha256:4a8a259bf990044351baf69d3b23e575699dd60b18460c71e81dc565f5819ac1", - "sha256:513e6526e0082c59a984448f4104c9bf346c2da9961779ede1fc458e8e8a1f78", - "sha256:5f587dfd83cb669933186661a351ad6fc7166273bc3e3a1531ec5c783d997aac", - "sha256:62061e87071497951155cbccee487980524d7abea647a1b2a6eb6b9647df9006", - "sha256:641e329e7f2c01531c45c687efcec8aeca2a78a4ff26d49184dce3d53fc35014", - "sha256:65a7e00c00472cd0f59ae09d2fb8a8aaae7f4a0cf54b2b74f3138d9f9ceb9cb2", - "sha256:6ad6ca45e9e92c05295f638e78cd42bfaaf8ee07878c9ed73e93190b26c125f7", - "sha256:73aa6e86034dad9f00f4bbf5a666a889d17d79db73bc5af04abd6c20a014d9c8", - "sha256:7c9762f80a25d8d0e4ab3cb1af5d9dffbddb3ee5d21c43e3474c84bf5ff941f7", - "sha256:85596aa5d9aac1bf39fe39d9fa1051b0f00823982a1de5766e35d495b4a36ca9", - "sha256:86a0ea78fd851b313b2e712266f663e13b6bc78c2fb260b079e8b67d970474b1", - "sha256:8a620767b8209f3446197c0e29ba895d75a1e272a36af0786ec70fe7834e4307", - "sha256:922fb9ef2c67c3ab20e22948dcfd783397e4c043a5c5fa5ff5e9df5529074b0a", - "sha256:9fad78c13e71546a76c2f8789623eec8e499f8d2d799f4b4547162ce0a4df435", - "sha256:a37c6233b28e5bc340054cf6170e7090a4e85069513320275a4dc929144dccf0", - "sha256:c3fc325ce4cbf902d05a80daa47b645d07e796a80682c1c5800d6ac5045193e5", - "sha256:cda33311cb9fb9323958a69499a667bd728a39a7aa4718d7622597a44c4f1441", - "sha256:db1d4e38c9b15be1521722e946ee24f6db95b189d1447fa9ff18dd16ba89f732", - "sha256:eda55e6e9ea258f5e4add23bcf33dc53b2c319e70806e180aecbff8d90ea24de", - "sha256:f372cdbb240e09ee855735b9d85e7f50730dcfb6296b74b95a3e5dea0615c4c1" - ], - "index": "pypi", - "version": "==5.0.4" - }, - "cryptography": { - "hashes": [ - "sha256:091d31c42f444c6f519485ed528d8b451d1a0c7bf30e8ca583a0cac44b8a0df6", - "sha256:18452582a3c85b96014b45686af264563e3e5d99d226589f057ace56196ec78b", - "sha256:1dfa985f62b137909496e7fc182dac687206d8d089dd03eaeb28ae16eec8e7d5", - "sha256:1e4014639d3d73fbc5ceff206049c5a9a849cefd106a49fa7aaaa25cc0ce35cf", - "sha256:22e91636a51170df0ae4dcbd250d318fd28c9f491c4e50b625a49964b24fe46e", - "sha256:3b3eba865ea2754738616f87292b7f29448aec342a7c720956f8083d252bf28b", - "sha256:651448cd2e3a6bc2bb76c3663785133c40d5e1a8c1a9c5429e4354201c6024ae", - "sha256:726086c17f94747cedbee6efa77e99ae170caebeb1116353c6cf0ab67ea6829b", - "sha256:844a76bc04472e5135b909da6aed84360f522ff5dfa47f93e3dd2a0b84a89fa0", - "sha256:88c881dd5a147e08d1bdcf2315c04972381d026cdb803325c03fe2b4a8ed858b", - "sha256:96c080ae7118c10fcbe6229ab43eb8b090fccd31a09ef55f83f690d1ef619a1d", - "sha256:a0c30272fb4ddda5f5ffc1089d7405b7a71b0b0f51993cb4e5dbb4590b2fc229", - "sha256:bb1f0281887d89617b4c68e8db9a2c42b9efebf2702a3c5bf70599421a8623e3", - "sha256:c447cf087cf2dbddc1add6987bbe2f767ed5317adb2d08af940db517dd704365", - "sha256:c4fd17d92e9d55b84707f4fd09992081ba872d1a0c610c109c18e062e06a2e55", - "sha256:d0d5aeaedd29be304848f1c5059074a740fa9f6f26b84c5b63e8b29e73dfc270", - "sha256:daf54a4b07d67ad437ff239c8a4080cfd1cc7213df57d33c97de7b4738048d5e", - "sha256:e993468c859d084d5579e2ebee101de8f5a27ce8e2159959b6673b418fd8c785", - "sha256:f118a95c7480f5be0df8afeb9a11bd199aa20afab7a96bcf20409b411a3a85f0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.9.2" + "sha256:098a703d913be6fbd146a8c50cc76513d726b022d170e5e98dc56d958fd592fb", + "sha256:16042dc7f8e632e0dcd5206a5095ebd18cb1d005f4c89694f7f8aafd96dd43a3", + "sha256:1adb6be0dcef0cf9434619d3b892772fdb48e793300f9d762e480e043bd8e716", + "sha256:27ca5a2bc04d68f0776f2cdcb8bbd508bbe430a7bf9c02315cd05fb1d86d0034", + "sha256:28f42dc5172ebdc32622a2c3f7ead1b836cdbf253569ae5673f499e35db0bac3", + "sha256:2fcc8b58953d74d199a1a4d633df8146f0ac36c4e720b4a1997e9b6327af43a8", + "sha256:304fbe451698373dc6653772c72c5d5e883a4aadaf20343592a7abb2e643dae0", + "sha256:30bc103587e0d3df9e52cd9da1dd915265a22fad0b72afe54daf840c984b564f", + "sha256:40f70f81be4d34f8d491e55936904db5c527b0711b2a46513641a5729783c2e4", + "sha256:4186fc95c9febeab5681bc3248553d5ec8c2999b8424d4fc3a39c9cba5796962", + "sha256:46794c815e56f1431c66d81943fa90721bb858375fb36e5903697d5eef88627d", + "sha256:4869ab1c1ed33953bb2433ce7b894a28d724b7aa76c19b11e2878034a4e4680b", + "sha256:4f6428b55d2916a69f8d6453e48a505c07b2245653b0aa9f0dee38785939f5e4", + "sha256:52f185ffd3291196dc1aae506b42e178a592b0b60a8610b108e6ad892cfc1bb3", + "sha256:538f2fd5eb64366f37c97fdb3077d665fa946d2b6d95447622292f38407f9258", + "sha256:64c4f340338c68c463f1b56e3f2f0423f7b17ba6c3febae80b81f0e093077f59", + "sha256:675192fca634f0df69af3493a48224f211f8db4e84452b08d5fcebb9167adb01", + "sha256:700997b77cfab016533b3e7dbc03b71d33ee4df1d79f2463a318ca0263fc29dd", + "sha256:8505e614c983834239f865da2dd336dcf9d72776b951d5dfa5ac36b987726e1b", + "sha256:962c44070c281d86398aeb8f64e1bf37816a4dfc6f4c0f114756b14fc575621d", + "sha256:9e536783a5acee79a9b308be97d3952b662748c4037b6a24cbb339dc7ed8eb89", + "sha256:9ea749fd447ce7fb1ac71f7616371f04054d969d412d37611716721931e36efd", + "sha256:a34cb28e0747ea15e82d13e14de606747e9e484fb28d63c999483f5d5188e89b", + "sha256:a3ee9c793ffefe2944d3a2bd928a0e436cd0ac2d9e3723152d6fd5398838ce7d", + "sha256:aab75d99f3f2874733946a7648ce87a50019eb90baef931698f96b76b6769a46", + "sha256:b1ed2bdb27b4c9fc87058a1cb751c4df8752002143ed393899edb82b131e0546", + "sha256:b360d8fd88d2bad01cb953d81fd2edd4be539df7bfec41e8753fe9f4456a5082", + "sha256:b8f58c7db64d8f27078cbf2a4391af6aa4e4767cc08b37555c4ae064b8558d9b", + "sha256:c1bbb628ed5192124889b51204de27c575b3ffc05a5a91307e7640eff1d48da4", + "sha256:c2ff24df02a125b7b346c4c9078c8936da06964cc2d276292c357d64378158f8", + "sha256:c890728a93fffd0407d7d37c1e6083ff3f9f211c83b4316fae3778417eab9811", + "sha256:c96472b8ca5dc135fb0aa62f79b033f02aa434fb03a8b190600a5ae4102df1fd", + "sha256:ce7866f29d3025b5b34c2e944e66ebef0d92e4a4f2463f7266daa03a1332a651", + "sha256:e26c993bd4b220429d4ec8c1468eca445a4064a61c74ca08da7429af9bc53bb0" + ], + "index": "pypi", + "version": "==5.2.1" }, "distlib": { "hashes": [ - "sha256:2e166e231a26b36d6dfe35a48c4464346620f8645ed0ace01ee31822b288de21" + "sha256:8c09de2c67b3e7deef7184574fc060ab8a793e7adbb183d942c389c8b13c52fb", + "sha256:edf6116872c863e1aa9d5bb7cb5e05a022c519a4594dc703843343a9ddd9bff1" ], - "version": "==0.3.0" + "version": "==0.3.1" }, "docutils": { "hashes": [ @@ -472,13 +435,6 @@ "index": "pypi", "version": "==0.15" }, - "entrypoints": { - "hashes": [ - "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19", - "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451" - ], - "version": "==0.3" - }, "filelock": { "hashes": [ "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59", @@ -488,11 +444,11 @@ }, "flake8": { "hashes": [ - "sha256:45681a117ecc81e870cbf1262835ae4af5e7a8b08e40b944a8a6e6b895914cfb", - "sha256:49356e766643ad15072a789a20915d3c91dc89fd313ccd71802303fd67e4deca" + "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c", + "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208" ], "index": "pypi", - "version": "==3.7.9" + "version": "==3.8.3" }, "flake8-bugbear": { "hashes": [ @@ -512,18 +468,19 @@ }, "identify": { "hashes": [ - "sha256:249ebc7e2066d6393d27c1b1be3b70433f824a120b1d8274d362f1eb419e3b52", - "sha256:781fd3401f5d2b17b22a8b18b493a48d5d948e3330634e82742e23f9c20234ef" + "sha256:9f5fcf22b665eaece583bd395b103c2769772a0f646ffabb5b1f155901b07de2", + "sha256:b1aa2e05863dc80242610d46a7b49105e2eafe00ef0c8ff311c1828680760c76" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.4.19" + "version": "==1.4.29" }, "idna": { "hashes": [ - "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", - "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "version": "==2.9" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.10" }, "imagesize": { "hashes": [ @@ -533,14 +490,6 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, - "jeepney": { - "hashes": [ - "sha256:3479b861cc2b6407de5188695fa1a8d57e5072d7059322469b62628869b8e36e", - "sha256:d6c6b49683446d2407d2fe3acb7a368a77ff063f9182fe427da15d622adc24cf" - ], - "markers": "sys_platform == 'linux'", - "version": "==0.4.3" - }, "jinja2": { "hashes": [ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", @@ -551,11 +500,11 @@ }, "keyring": { "hashes": [ - "sha256:3401234209015144a5d75701e71cb47239e552b0882313e9f51e8976f9e27843", - "sha256:c53e0e5ccde3ad34284a40ce7976b5b3a3d6de70344c3f8ee44364cc340976ec" + "sha256:182f94fc0381546489e3e4d90384a8c1d43cc09ffe2eb4a826e7312df6e1be7c", + "sha256:cd4d486803d55bdb13e2d453eb61dbbc984773e4f2b98a455aa85b1f4bc421e4" ], "markers": "python_version >= '3.6'", - "version": "==21.2.1" + "version": "==21.3.1" }, "markupsafe": { "hashes": [ @@ -605,45 +554,46 @@ }, "multidict": { "hashes": [ - "sha256:317f96bc0950d249e96d8d29ab556d01dd38888fbe68324f46fd834b430169f1", - "sha256:42f56542166040b4474c0c608ed051732033cd821126493cf25b6c276df7dd35", - "sha256:4b7df040fb5fe826d689204f9b544af469593fb3ff3a069a6ad3409f742f5928", - "sha256:544fae9261232a97102e27a926019100a9db75bec7b37feedd74b3aa82f29969", - "sha256:620b37c3fea181dab09267cd5a84b0f23fa043beb8bc50d8474dd9694de1fa6e", - "sha256:6e6fef114741c4d7ca46da8449038ec8b1e880bbe68674c01ceeb1ac8a648e78", - "sha256:7774e9f6c9af3f12f296131453f7b81dabb7ebdb948483362f5afcaac8a826f1", - "sha256:85cb26c38c96f76b7ff38b86c9d560dea10cf3459bb5f4caf72fc1bb932c7136", - "sha256:a326f4240123a2ac66bb163eeba99578e9d63a8654a59f4688a79198f9aa10f8", - "sha256:ae402f43604e3b2bc41e8ea8b8526c7fa7139ed76b0d64fc48e28125925275b2", - "sha256:aee283c49601fa4c13adc64c09c978838a7e812f85377ae130a24d7198c0331e", - "sha256:b51249fdd2923739cd3efc95a3d6c363b67bbf779208e9f37fd5e68540d1a4d4", - "sha256:bb519becc46275c594410c6c28a8a0adc66fe24fef154a9addea54c1adb006f5", - "sha256:c2c37185fb0af79d5c117b8d2764f4321eeb12ba8c141a95d0aa8c2c1d0a11dd", - "sha256:dc561313279f9d05a3d0ffa89cd15ae477528ea37aa9795c4654588a3287a9ab", - "sha256:e439c9a10a95cb32abd708bb8be83b2134fa93790a4fb0535ca36db3dda94d20", - "sha256:fc3b4adc2ee8474cb3cd2a155305d5f8eda0a9c91320f83e55748e1fcb68f8e3" - ], - "version": "==4.7.5" + "sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a", + "sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000", + "sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2", + "sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507", + "sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5", + "sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7", + "sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d", + "sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463", + "sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19", + "sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3", + "sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b", + "sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c", + "sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87", + "sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7", + "sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430", + "sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255", + "sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d" + ], + "markers": "python_version >= '3.5'", + "version": "==4.7.6" }, "mypy": { "hashes": [ - "sha256:15b948e1302682e3682f11f50208b726a246ab4e6c1b39f9264a8796bb416aa2", - "sha256:219a3116ecd015f8dca7b5d2c366c973509dfb9a8fc97ef044a36e3da66144a1", - "sha256:3b1fc683fb204c6b4403a1ef23f0b1fac8e4477091585e0c8c54cbdf7d7bb164", - "sha256:3beff56b453b6ef94ecb2996bea101a08f1f8a9771d3cbf4988a61e4d9973761", - "sha256:7687f6455ec3ed7649d1ae574136835a4272b65b3ddcf01ab8704ac65616c5ce", - "sha256:7ec45a70d40ede1ec7ad7f95b3c94c9cf4c186a32f6bacb1795b60abd2f9ef27", - "sha256:86c857510a9b7c3104cf4cde1568f4921762c8f9842e987bc03ed4f160925754", - "sha256:8a627507ef9b307b46a1fea9513d5c98680ba09591253082b4c48697ba05a4ae", - "sha256:8dfb69fbf9f3aeed18afffb15e319ca7f8da9642336348ddd6cab2713ddcf8f9", - "sha256:a34b577cdf6313bf24755f7a0e3f3c326d5c1f4fe7422d1d06498eb25ad0c600", - "sha256:a8ffcd53cb5dfc131850851cc09f1c44689c2812d0beb954d8138d4f5fc17f65", - "sha256:b90928f2d9eb2f33162405f32dde9f6dcead63a0971ca8a1b50eb4ca3e35ceb8", - "sha256:c56ffe22faa2e51054c5f7a3bc70a370939c2ed4de308c690e7949230c995913", - "sha256:f91c7ae919bbc3f96cd5e5b2e786b2b108343d1d7972ea130f7de27fdd547cf3" + "sha256:2c6cde8aa3426c1682d35190b59b71f661237d74b053822ea3d748e2c9578a7c", + "sha256:3fdda71c067d3ddfb21da4b80e2686b71e9e5c72cca65fa216d207a358827f86", + "sha256:5dd13ff1f2a97f94540fd37a49e5d255950ebcdf446fb597463a40d0df3fac8b", + "sha256:6731603dfe0ce4352c555c6284c6db0dc935b685e9ce2e4cf220abe1e14386fd", + "sha256:6bb93479caa6619d21d6e7160c552c1193f6952f0668cdda2f851156e85186fc", + "sha256:81c7908b94239c4010e16642c9102bfc958ab14e36048fa77d0be3289dda76ea", + "sha256:9c7a9a7ceb2871ba4bac1cf7217a7dd9ccd44c27c2950edbc6dc08530f32ad4e", + "sha256:a4a2cbcfc4cbf45cd126f531dedda8485671545b43107ded25ce952aac6fb308", + "sha256:b7fbfabdbcc78c4f6fc4712544b9b0d6bf171069c6e0e3cb82440dd10ced3406", + "sha256:c05b9e4fb1d8a41d41dec8786c94f3b95d3c5f528298d769eb8e73d293abc48d", + "sha256:d7df6eddb6054d21ca4d3c6249cae5578cb4602951fd2b6ee2f5510ffb098707", + "sha256:e0b61738ab504e656d1fe4ff0c0601387a5489ca122d55390ade31f9ca0e252d", + "sha256:eff7d4a85e9eea55afa34888dfeaccde99e7520b51f867ac28a48492c0b1130c", + "sha256:f05644db6779387ccdb468cc47a44b4356fc2ffa9287135d05b70a98dc83b89a" ], "index": "pypi", - "version": "==0.770" + "version": "==0.782" }, "mypy-extensions": { "hashes": [ @@ -655,9 +605,10 @@ }, "nodeenv": { "hashes": [ - "sha256:4b0b77afa3ba9b54f4b6396e60b0c83f59eaeb2d63dc3cc7a70f7f4af96c82bc" + "sha256:5304d424c529c997bc888453aeaa6362d242b6b4631e90f3d4bf1b290f1c84a9", + "sha256:ab45090ae383b716c4ef89e690c41ff8c2b257b85b309f01f3654df3d084bd7c" ], - "version": "==1.4.0" + "version": "==1.5.0" }, "packaging": { "hashes": [ @@ -669,11 +620,11 @@ }, "pathspec": { "hashes": [ - "sha256:163b0632d4e31cef212976cf57b43d9fd6b0bac6e67c26015d611a647d5e7424", - "sha256:562aa70af2e0d434367d9790ad37aed893de47f1693e4201fd1d3dca15d19b96" + "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0", + "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061" ], "index": "pypi", - "version": "==0.7.0" + "version": "==0.8.0" }, "pkginfo": { "hashes": [ @@ -684,11 +635,11 @@ }, "pre-commit": { "hashes": [ - "sha256:487c675916e6f99d355ec5595ad77b325689d423ef4839db1ed2f02f639c9522", - "sha256:c0aa11bce04a7b46c5544723aedf4e81a4d5f64ad1205a30a9ea12d5e81969e1" + "sha256:810aef2a2ba4f31eed1941fc270e72696a1ad5590b9751839c90807d0fff6b9a", + "sha256:c54fd3e574565fe128ecc5e7d2f91279772ddb03f8729645fa812fe809084a70" ], "index": "pypi", - "version": "==2.2.0" + "version": "==2.7.1" }, "pycodestyle": { "hashes": [ @@ -698,14 +649,6 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.6.0" }, - "pycparser": { - "hashes": [ - "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", - "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.20" - }, "pyflakes": { "hashes": [ "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92", @@ -755,11 +698,11 @@ }, "readme-renderer": { "hashes": [ - "sha256:1b6d8dd1673a0b293766b4106af766b6eff3654605f9c4f239e65de6076bc222", - "sha256:e67d64242f0174a63c3b727801a2fff4c1f38ebe5d71d95ff7ece081945a6cd4" + "sha256:cbe9db71defedd2428a1589cdc545f9bd98e59297449f69d721ef8f1cfced68d", + "sha256:cc4957a803106e820d05d14f71033092537a22daa4f406dfbdd61177e0936376" ], "index": "pypi", - "version": "==25.0" + "version": "==26.0" }, "recommonmark": { "hashes": [ @@ -771,38 +714,38 @@ }, "regex": { "hashes": [ - "sha256:01b2d70cbaed11f72e57c1cfbaca71b02e3b98f739ce33f5f26f71859ad90431", - "sha256:046e83a8b160aff37e7034139a336b660b01dbfe58706f9d73f5cdc6b3460242", - "sha256:113309e819634f499d0006f6200700c8209a2a8bf6bd1bdc863a4d9d6776a5d1", - "sha256:200539b5124bc4721247a823a47d116a7a23e62cc6695744e3eb5454a8888e6d", - "sha256:25f4ce26b68425b80a233ce7b6218743c71cf7297dbe02feab1d711a2bf90045", - "sha256:269f0c5ff23639316b29f31df199f401e4cb87529eafff0c76828071635d417b", - "sha256:5de40649d4f88a15c9489ed37f88f053c15400257eeb18425ac7ed0a4e119400", - "sha256:7f78f963e62a61e294adb6ff5db901b629ef78cb2a1cfce3cf4eeba80c1c67aa", - "sha256:82469a0c1330a4beb3d42568f82dffa32226ced006e0b063719468dcd40ffdf0", - "sha256:8c2b7fa4d72781577ac45ab658da44c7518e6d96e2a50d04ecb0fd8f28b21d69", - "sha256:974535648f31c2b712a6b2595969f8ab370834080e00ab24e5dbb9d19b8bfb74", - "sha256:99272d6b6a68c7ae4391908fc15f6b8c9a6c345a46b632d7fdb7ef6c883a2bbb", - "sha256:9b64a4cc825ec4df262050c17e18f60252cdd94742b4ba1286bcfe481f1c0f26", - "sha256:9e9624440d754733eddbcd4614378c18713d2d9d0dc647cf9c72f64e39671be5", - "sha256:9ff16d994309b26a1cdf666a6309c1ef51ad4f72f99d3392bcd7b7139577a1f2", - "sha256:b33ebcd0222c1d77e61dbcd04a9fd139359bded86803063d3d2d197b796c63ce", - "sha256:bba52d72e16a554d1894a0cc74041da50eea99a8483e591a9edf1025a66843ab", - "sha256:bed7986547ce54d230fd8721aba6fd19459cdc6d315497b98686d0416efaff4e", - "sha256:c7f58a0e0e13fb44623b65b01052dae8e820ed9b8b654bb6296bc9c41f571b70", - "sha256:d58a4fa7910102500722defbde6e2816b0372a4fcc85c7e239323767c74f5cbc", - "sha256:f1ac2dc65105a53c1c2d72b1d3e98c2464a133b4067a51a3d2477b28449709a0" - ], - "index": "pypi", - "version": "==2020.2.20" + "sha256:0dc64ee3f33cd7899f79a8d788abfbec168410be356ed9bd30bbd3f0a23a7204", + "sha256:1269fef3167bb52631ad4fa7dd27bf635d5a0790b8e6222065d42e91bede4162", + "sha256:14a53646369157baa0499513f96091eb70382eb50b2c82393d17d7ec81b7b85f", + "sha256:3a3af27a8d23143c49a3420efe5b3f8cf1a48c6fc8bc6856b03f638abc1833bb", + "sha256:46bac5ca10fb748d6c55843a931855e2727a7a22584f302dd9bb1506e69f83f6", + "sha256:4c037fd14c5f4e308b8370b447b469ca10e69427966527edcab07f52d88388f7", + "sha256:51178c738d559a2d1071ce0b0f56e57eb315bcf8f7d4cf127674b533e3101f88", + "sha256:5ea81ea3dbd6767873c611687141ec7b06ed8bab43f68fad5b7be184a920dc99", + "sha256:6961548bba529cac7c07af2fd4d527c5b91bb8fe18995fed6044ac22b3d14644", + "sha256:75aaa27aa521a182824d89e5ab0a1d16ca207318a6b65042b046053cfc8ed07a", + "sha256:7a2dd66d2d4df34fa82c9dc85657c5e019b87932019947faece7983f2089a840", + "sha256:8a51f2c6d1f884e98846a0a9021ff6861bdb98457879f412fdc2b42d14494067", + "sha256:9c568495e35599625f7b999774e29e8d6b01a6fb684d77dee1f56d41b11b40cd", + "sha256:9eddaafb3c48e0900690c1727fba226c4804b8e6127ea409689c3bb492d06de4", + "sha256:bbb332d45b32df41200380fff14712cb6093b61bd142272a10b16778c418e98e", + "sha256:bc3d98f621898b4a9bc7fecc00513eec8f40b5b83913d74ccb445f037d58cd89", + "sha256:c11d6033115dc4887c456565303f540c44197f4fc1a2bfb192224a301534888e", + "sha256:c50a724d136ec10d920661f1442e4a8b010a4fe5aebd65e0c2241ea41dbe93dc", + "sha256:d0a5095d52b90ff38592bbdc2644f17c6d495762edf47d876049cfd2968fbccf", + "sha256:d6cff2276e502b86a25fd10c2a96973fdb45c7a977dca2138d661417f3728341", + "sha256:e46d13f38cfcbb79bfdb2964b0fe12561fe633caf964a77a5f8d4e45fe5d2ef7" + ], + "index": "pypi", + "version": "==2020.7.14" }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.23.0" + "version": "==2.24.0" }, "requests-toolbelt": { "hashes": [ @@ -811,13 +754,12 @@ ], "version": "==0.9.1" }, - "secretstorage": { + "rfc3986": { "hashes": [ - "sha256:15da8a989b65498e29be338b3b279965f1b8f09b9668bd8010da183024c8bff6", - "sha256:b5ec909dde94d4ae2fa26af7c089036997030f0cf0a5cb372b4cccabd81c143b" + "sha256:112398da31a3344dc25dbf477d8df6cb34f9278a94fee2625d89e4514be8bb9d", + "sha256:af9147e9aceda37c91a05f4deb128d4b4b49d6b199775fd2d2927768abdc8f50" ], - "markers": "sys_platform == 'linux'", - "version": "==3.1.2" + "version": "==1.4.0" }, "setuptools-scm": { "hashes": [ @@ -850,11 +792,11 @@ }, "sphinx": { "hashes": [ - "sha256:b4c750d546ab6d7e05bdff6ac24db8ae3e8b8253a3569b754e445110a0a12b66", - "sha256:fc312670b56cb54920d6cc2ced455a22a547910de10b3142276495ced49231cb" + "sha256:321d6d9b16fa381a5306e5a0b76cd48ffbc588e6340059a729c6fdd66087e0e8", + "sha256:ce6fd7ff5b215af39e2fcd44d4a321f6694b4530b6f2b2109b64d120773faea0" ], "index": "pypi", - "version": "==2.4.4" + "version": "==3.2.1" }, "sphinxcontrib-applehelp": { "hashes": [ @@ -906,29 +848,29 @@ }, "toml": { "hashes": [ - "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88", - "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", "sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c", - "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e" + "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e", + "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", + "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88" ], "index": "pypi", "version": "==0.10.1" }, "tqdm": { "hashes": [ - "sha256:07c06493f1403c1380b630ae3dcbe5ae62abcf369a93bbc052502279f189ab8c", - "sha256:cd140979c2bebd2311dfb14781d8f19bd5a9debb92dcab9f6ef899c987fcf71f" + "sha256:1a336d2b829be50e46b84668691e0a2719f26c97c62846298dd5ae2937e4d5cf", + "sha256:564d632ea2b9cb52979f7956e093e831c28d441c11751682f84c86fc46e4fd21" ], "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==4.46.1" + "version": "==4.48.2" }, "twine": { "hashes": [ - "sha256:c1af8ca391e43b0a06bbc155f7f67db0bf0d19d284bfc88d1675da497a946124", - "sha256:d561a5e511f70275e5a485a6275ff61851c16ffcb3a95a602189161112d9f160" + "sha256:34352fd52ec3b9d29837e6072d5a2a7c6fe4290e97bba46bb8d478b5c598f7ab", + "sha256:ba9ff477b8d6de0c89dd450e70b2185da190514e91c42cc62f96850025c10472" ], "index": "pypi", - "version": "==3.1.1" + "version": "==3.2.0" }, "typed-ast": { "hashes": [ @@ -958,28 +900,28 @@ }, "typing-extensions": { "hashes": [ - "sha256:091ecc894d5e908ac75209f10d5b4f118fbdb2eb1ede6a63544054bb1edb41f2", - "sha256:910f4656f54de5993ad9304959ce9bb903f90aadc7c67a0bef07e678014e892d", - "sha256:cf8b63fedea4d89bab840ecbb93e75578af28f76f66c35889bd7065f5af88575" + "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918", + "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c", + "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f" ], "index": "pypi", - "version": "==3.7.4.1" + "version": "==3.7.4.3" }, "urllib3": { "hashes": [ - "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", - "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" + "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", + "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==1.25.9" + "version": "==1.25.10" }, "virtualenv": { "hashes": [ - "sha256:5102fbf1ec57e80671ef40ed98a84e980a71194cedf30c87c2b25c3a9e0b0107", - "sha256:ccfb8e1e05a1174f7bd4c163700277ba730496094fe1a58bea9d4ac140a207c8" + "sha256:43add625c53c596d38f971a465553f6318decc39d98512bc100fa1b1e839c8dc", + "sha256:e0305af10299a7fb0d69393d8f04cb2965dda9351140d11ac8db4e5e3970451b" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.0.23" + "version": "==20.0.31" }, "webencodings": { "hashes": [ @@ -990,33 +932,34 @@ }, "wheel": { "hashes": [ - "sha256:8788e9155fe14f54164c1b9eb0a319d98ef02c160725587ad60f14ddc57b6f96", - "sha256:df277cb51e61359aba502208d680f90c0493adec6f0e848af94948778aed386e" + "sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2", + "sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f" ], "index": "pypi", - "version": "==0.34.2" + "version": "==0.35.1" }, "yarl": { "hashes": [ - "sha256:0c2ab325d33f1b824734b3ef51d4d54a54e0e7a23d13b86974507602334c2cce", - "sha256:0ca2f395591bbd85ddd50a82eb1fde9c1066fafe888c5c7cc1d810cf03fd3cc6", - "sha256:2098a4b4b9d75ee352807a95cdf5f10180db903bc5b7270715c6bbe2551f64ce", - "sha256:25e66e5e2007c7a39541ca13b559cd8ebc2ad8fe00ea94a2aad28a9b1e44e5ae", - "sha256:26d7c90cb04dee1665282a5d1a998defc1a9e012fdca0f33396f81508f49696d", - "sha256:308b98b0c8cd1dfef1a0311dc5e38ae8f9b58349226aa0533f15a16717ad702f", - "sha256:3ce3d4f7c6b69c4e4f0704b32eca8123b9c58ae91af740481aa57d7857b5e41b", - "sha256:58cd9c469eced558cd81aa3f484b2924e8897049e06889e8ff2510435b7ef74b", - "sha256:5b10eb0e7f044cf0b035112446b26a3a2946bca9d7d7edb5e54a2ad2f6652abb", - "sha256:6faa19d3824c21bcbfdfce5171e193c8b4ddafdf0ac3f129ccf0cdfcb083e462", - "sha256:944494be42fa630134bf907714d40207e646fd5a94423c90d5b514f7b0713fea", - "sha256:a161de7e50224e8e3de6e184707476b5a989037dcb24292b391a3d66ff158e70", - "sha256:a4844ebb2be14768f7994f2017f70aca39d658a96c786211be5ddbe1c68794c1", - "sha256:c2b509ac3d4b988ae8769901c66345425e361d518aecbe4acbfc2567e416626a", - "sha256:c9959d49a77b0e07559e579f38b2f3711c2b8716b8410b320bf9713013215a1b", - "sha256:d8cdee92bc930d8b09d8bd2043cedd544d9c8bd7436a77678dd602467a993080", - "sha256:e15199cdb423316e15f108f51249e44eb156ae5dba232cb73be555324a1d49c2" - ], - "version": "==1.4.2" + "sha256:040b237f58ff7d800e6e0fd89c8439b841f777dd99b4a9cca04d6935564b9409", + "sha256:17668ec6722b1b7a3a05cc0167659f6c95b436d25a36c2d52db0eca7d3f72593", + "sha256:3a584b28086bc93c888a6c2aa5c92ed1ae20932f078c46509a66dce9ea5533f2", + "sha256:4439be27e4eee76c7632c2427ca5e73703151b22cae23e64adb243a9c2f565d8", + "sha256:48e918b05850fffb070a496d2b5f97fc31d15d94ca33d3d08a4f86e26d4e7c5d", + "sha256:9102b59e8337f9874638fcfc9ac3734a0cfadb100e47d55c20d0dc6087fb4692", + "sha256:9b930776c0ae0c691776f4d2891ebc5362af86f152dd0da463a6614074cb1b02", + "sha256:b3b9ad80f8b68519cc3372a6ca85ae02cc5a8807723ac366b53c0f089db19e4a", + "sha256:bc2f976c0e918659f723401c4f834deb8a8e7798a71be4382e024bcc3f7e23a8", + "sha256:c22c75b5f394f3d47105045ea551e08a3e804dc7e01b37800ca35b58f856c3d6", + "sha256:c52ce2883dc193824989a9b97a76ca86ecd1fa7955b14f87bf367a61b6232511", + "sha256:ce584af5de8830d8701b8979b18fcf450cef9a382b1a3c8ef189bedc408faf1e", + "sha256:da456eeec17fa8aa4594d9a9f27c0b1060b6a75f2419fe0c00609587b2695f4a", + "sha256:db6db0f45d2c63ddb1a9d18d1b9b22f308e52c83638c26b422d520a815c4b3fb", + "sha256:df89642981b94e7db5596818499c4b2219028f2a528c9c37cc1de45bf2fd3a3f", + "sha256:f18d68f2be6bf0e89f1521af2b1bb46e66ab0018faafa81d70f358153170a317", + "sha256:f379b7f83f23fe12823085cd6b906edc49df969eb99757f58ff382349a3303c6" + ], + "markers": "python_version >= '3.5'", + "version": "==1.5.1" } } } diff --git a/README.md b/README.md index beed8ba4943..20f6fa420b2 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,12 @@ _Contents:_ **[Installation and usage](#installation-and-usage)** | _Black_ can be installed by running `pip install black`. It requires Python 3.6.0+ to run but you can reformat Python 2 code with it, too. +#### Install from GitHub + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+git://github.com/psf/black` + ### Usage To get started right away with sensible defaults: @@ -137,7 +143,7 @@ Options: --exclude=. --version Show the version and exit. - --config FILE Read configuration from PATH. + --config FILE Read configuration from FILE path. -h, --help Show this message and exit. ``` @@ -287,7 +293,7 @@ the equivalent of r-strings in Python. Multiline strings are treated as verbose expressions by Black. Use `[ ]` to denote a significant space character.
-Example `pyproject.toml` +Example pyproject.toml ```toml [tool.black] @@ -348,8 +354,8 @@ rolling. ## black-primer -`black-primer` is a tool built for CI (and huumans) to have _Black_ `--check` a number -of (configured in `primer.json`) Git accessible projects in parallel. +`black-primer` is a tool built for CI (and humans) to have _Black_ `--check` a number of +(configured in `primer.json`) Git accessible projects in parallel. [black_primer](https://github.com/psf/black/blob/master/docs/black_primer.md) has more information regarding its usage and configuration. @@ -364,10 +370,10 @@ Use [pre-commit](https://pre-commit.com/). Once you ```yaml repos: - repo: https://github.com/psf/black - rev: stable + rev: 19.10b0 # Replace by any tag/version: https://github.com/psf/black/tags hooks: - id: black - language_version: python3.6 + language_version: python3 # Should be a command that runs python3.6+ ``` Then run `pre-commit install` and you're ready to go. @@ -429,7 +435,7 @@ code style: pytest, tox, Pyramid, Django Channels, Hypothesis, attrs, SQLAlchemy Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), pandas, Pillow, every Datadog Agent Integration, Home Assistant. -The following organizations use _Black_: Facebook, Dropbox. +The following organizations use _Black_: Facebook, Dropbox, Mozilla, Quora. Are we missing anyone? Let us know. @@ -458,7 +464,7 @@ Twisted and CPython: Use the badge in your project's README.md: -```markdown +```md [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) ``` @@ -542,6 +548,7 @@ Multiple contributions by: - [Christian Clauss](mailto:cclauss@bluewin.ch) - [Christian Heimes](mailto:christian@python.org) - [Chuck Wooters](mailto:chuck.wooters@microsoft.com) +- [Chris Rose](mailto:offline@offby1.net) - Codey Oxley - [Cong](mailto:congusbongus@gmail.com) - [Cooper Ry Lees](mailto:me@cooperlees.com) @@ -618,7 +625,7 @@ Multiple contributions by: - [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) - MomIsBestFriend - [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) -- [Nathan Hunt](mailtoneighthan.hunt@gmail.com) +- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) - [Neraste](mailto:neraste.herr10@gmail.com) - [Nikolaus Waxweiler](mailto:madigens@gmail.com) - [Ofek Lev](mailto:ofekmeister@gmail.com) @@ -677,3 +684,4 @@ Multiple contributions by: - Yazdan - [Yngve Høiseth](mailto:yngve@hoiseth.net) - [Yurii Karabas](mailto:1998uriyyo@gmail.com) +- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) diff --git a/docs/authors.md b/docs/authors.md new file mode 100644 index 00000000000..a5349b4b9df --- /dev/null +++ b/docs/authors.md @@ -0,0 +1,184 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# Authors + +Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). + +Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), +[Carl Meyer](mailto:carl@oddbird.net), +[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), +[Mika Naylor](mailto:mail@autophagy.io), +[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com), and +[Cooper Lees](mailto:me@cooperlees.com). + +Multiple contributions by: + +- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) +- [Adam Johnson](mailto:me@adamj.eu) +- [Adam Williamson](mailto:adamw@happyassassin.net) +- [Alexander Huynh](mailto:github@grande.coffee) +- [Alex Vandiver](mailto:github@chmrr.net) +- [Allan Simon](mailto:allan.simon@supinfo.com) +- Anders-Petter Ljungquist +- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) +- [Andrew Zhou](mailto:andrewfzhou@gmail.com) +- [Andrey](mailto:dyuuus@yandex.ru) +- [Andy Freeland](mailto:andy@andyfreeland.net) +- [Anthony Sottile](mailto:asottile@umich.edu) +- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) +- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) +- [Artem Malyshev](mailto:proofit404@gmail.com) +- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) +- [Augie Fackler](mailto:raf@durin42.com) +- [Aviskar KC](mailto:aviskarkc10@gmail.com) +- Batuhan Taşkaya +- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) +- [Benjamin Woodruff](mailto:github@benjam.info) +- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) +- [Brandt Bucher](mailto:brandtbucher@gmail.com) +- [Brett Cannon](mailto:brett@python.org) +- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) +- [Bryan Forbes](mailto:bryan@reigndropsfall.net) +- [Calum Lind](mailto:calumlind@gmail.com) +- [Charles](mailto:peacech@gmail.com) +- Charles Reid +- [Christian Clauss](mailto:cclauss@bluewin.ch) +- [Christian Heimes](mailto:christian@python.org) +- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) +- [Chris Rose](mailto:offline@offby1.net) +- Codey Oxley +- [Cong](mailto:congusbongus@gmail.com) +- [Cooper Ry Lees](mailto:me@cooperlees.com) +- [Dan Davison](mailto:dandavison7@gmail.com) +- [Daniel Hahler](mailto:github@thequod.de) +- [Daniel M. Capella](mailto:polycitizen@gmail.com) +- Daniele Esposti +- [David Hotham](mailto:david.hotham@metaswitch.com) +- [David Lukes](mailto:dafydd.lukes@gmail.com) +- [David Szotten](mailto:davidszotten@gmail.com) +- [Denis Laxalde](mailto:denis@laxalde.org) +- [Douglas Thor](mailto:dthor@transphormusa.com) +- dylanjblack +- [Eli Treuherz](mailto:eli@treuherz.com) +- [Emil Hessman](mailto:emil@hessman.se) +- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) +- [Florent Thiery](mailto:fthiery@gmail.com) +- Francisco +- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) +- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) +- [Gregory P. Smith](mailto:greg@krypto.org) +- Gustavo Camargo +- hauntsaninja +- [Heaford](mailto:dan@heaford.com) +- [Hugo Barrera](mailto::hugo@barrera.io) +- Hugo van Kemenade +- [Hynek Schlawack](mailto:hs@ox.cx) +- [Ivan Katanić](mailto:ivan.katanic@gmail.com) +- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) +- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) +- [Jan Hnátek](mailto:jan.hnatek@gmail.com) +- [Jason Fried](mailto:me@jasonfried.info) +- [Jason Friedland](mailto:jason@friedland.id.au) +- [jgirardet](mailto:ijkl@netc.fr) +- Jim Brännlund +- [Jimmy Jia](mailto:tesrin@gmail.com) +- [Joe Antonakakis](mailto:jma353@cornell.edu) +- [Jon Dufresne](mailto:jon.dufresne@gmail.com) +- [Jonas Obrist](mailto:ojiidotch@gmail.com) +- [Jonty Wareing](mailto:jonty@jonty.co.uk) +- [Jose Nazario](mailto:jose.monkey.org@gmail.com) +- [Joseph Larson](mailto:larson.joseph@gmail.com) +- [Josh Bode](mailto:joshbode@fastmail.com) +- [Josh Holland](mailto:anowlcalledjosh@gmail.com) +- [José Padilla](mailto:jpadilla@webapplicate.com) +- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) +- [kaiix](mailto:kvn.hou@gmail.com) +- [Katie McLaughlin](mailto:katie@glasnt.com) +- Katrin Leinweber +- [Keith Smiley](mailto:keithbsmiley@gmail.com) +- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) +- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) +- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) +- [Kyle Sunden](mailto:sunden@wisc.edu) +- Lawrence Chan +- [Linus Groh](mailto:mail@linusgroh.de) +- [Loren Carvalho](mailto:comradeloren@gmail.com) +- [Luka Sterbic](mailto:luka.sterbic@gmail.com) +- [LukasDrude](mailto:mail@lukas-drude.de) +- Mahmoud Hossam +- Mariatta +- [Matt VanEseltine](mailto:vaneseltine@gmail.com) +- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) +- [Matthew Walster](mailto:matthew@walster.org) +- Max Smolens +- [Michael Aquilina](mailto:michaelaquilina@gmail.com) +- [Michael Flaxman](mailto:michael.flaxman@gmail.com) +- [Michael J. Sullivan](mailto:sully@msully.net) +- [Michael McClimon](mailto:michael@mcclimon.org) +- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) +- [Mike](mailto:roshi@fedoraproject.org) +- [mikehoyio](mailto:mikehoy@gmail.com) +- [Min ho Kim](mailto:minho42@gmail.com) +- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) +- MomIsBestFriend +- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) +- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) +- [Neraste](mailto:neraste.herr10@gmail.com) +- [Nikolaus Waxweiler](mailto:madigens@gmail.com) +- [Ofek Lev](mailto:ofekmeister@gmail.com) +- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) +- [otstrel](mailto:otstrel@gmail.com) +- [Pablo Galindo](mailto:Pablogsal@gmail.com) +- [Paul Ganssle](mailto:p.ganssle@gmail.com) +- [Paul Meinhardt](mailto:mnhrdt@gmail.com) +- [Peter Bengtsson](mailto:mail@peterbe.com) +- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) +- pmacosta +- [Quentin Pradet](mailto:quentin@pradet.me) +- [Ralf Schmitt](mailto:ralf@systemexit.de) +- [Ramón Valles](mailto:mroutis@protonmail.com) +- [Richard Fearn](mailto:richardfearn@gmail.com) +- Richard Si +- [Rishikesh Jha](mailto:rishijha424@gmail.com) +- [Rupert Bedford](mailto:rupert@rupertb.com) +- Russell Davis +- [Rémi Verschelde](mailto:rverschelde@gmail.com) +- [Sami Salonen](mailto:sakki@iki.fi) +- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) +- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) +- Sergi +- [Scott Stevenson](mailto:scott@stevenson.io) +- Shantanu +- [shaoran](mailto:shaoran@sakuranohana.org) +- [Shinya Fujino](mailto:shf0811@gmail.com) +- springstan +- [Stavros Korokithakis](mailto:hi@stavros.io) +- [Stephen Rosen](mailto:sirosen@globus.org) +- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) +- [Sunil Kapil](mailto:snlkapil@gmail.com) +- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) +- [Tal Amuyal](mailto:TalAmuyal@gmail.com) +- [Terrance](mailto:git@terrance.allofti.me) +- [Thom Lu](mailto:thomas.c.lu@gmail.com) +- [Thomas Grainger](mailto:tagrain@gmail.com) +- [Tim Gates](mailto:tim.gates@iress.com) +- [Tim Swast](mailto:swast@google.com) +- [Timo](mailto:timo_tk@hotmail.com) +- Toby Fleming +- [Tom Christie](mailto:tom@tomchristie.com) +- [Tony Narlock](mailto:tony@git-pull.com) +- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) +- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) +- [Tzu-ping Chung](mailto:uranusjr@gmail.com) +- [Utsav Shah](mailto:ukshah2@illinois.edu) +- utsav-dbx +- vezeli +- [Ville Skyttä](mailto:ville.skytta@iki.fi) +- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) +- [Vlad Emelianov](mailto:volshebnyi@gmail.com) +- [williamfzc](mailto:178894043@qq.com) +- [wouter bolsterlee](mailto:wouter@bolsterl.ee) +- Yazdan +- [Yngve Høiseth](mailto:yngve@hoiseth.net) +- [Yurii Karabas](mailto:1998uriyyo@gmail.com) +- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) diff --git a/docs/black_primer.md b/docs/black_primer.md index af4184233e2..a2dd964b7dc 100644 --- a/docs/black_primer.md +++ b/docs/black_primer.md @@ -71,7 +71,7 @@ each parameter is explained below: "expect_formatting_changes": true, "git_clone_url": "https://github.com/cooperlees/aioexabgp.git", "long_checkout": false, - "py_versions": ["all", "3.8"] // "all" ignores all other versions + "py_versions": ["all", "3.8"] } } } @@ -103,9 +103,9 @@ Failed projects: +++ tests/b303_b304.py 2020-05-17 20:06:42.753851 +0000 @@ -26,11 +26,11 @@ maxint = 5 # this is okay - # the following shouldn't crash + # the following should not crash (a, b, c) = list(range(3)) - # it's different than this + # it is different than this a, b, c = list(range(3)) - a, b, c, = list(range(3)) + a, b, c = list(range(3)) diff --git a/docs/change_log.md b/docs/change_log.md new file mode 100644 index 00000000000..658414bf967 --- /dev/null +++ b/docs/change_log.md @@ -0,0 +1,503 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM CHANGES.md" + +## Change Log + +### 20.8b1 + +#### _Packaging_ + +- explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions + older than 7.0 + +### 20.8b0 + +#### _Black_ + +- re-implemented support for explicit trailing commas: now it works consistently within + any bracket pair, including nested structures (#1288 and duplicates) + +- `Black` now reindents docstrings when reindenting code around it (#1053) + +- `Black` now shows colored diffs (#1266) + +- `Black` is now packaged using 'py3' tagged wheels (#1388) + +- `Black` now supports Python 3.8 code, e.g. star expressions in return statements + (#1121) + +- `Black` no longer normalizes capital R-string prefixes as those have a + community-accepted meaning (#1244) + +- `Black` now uses exit code 2 when specified configuration file doesn't exit (#1361) + +- `Black` now works on AWS Lambda (#1141) + +- added `--force-exclude` argument (#1032) + +- removed deprecated `--py36` option (#1236) + +- fixed `--diff` output when EOF is encountered (#526) + +- fixed `# fmt: off` handling around decorators (#560) + +- fixed unstable formatting with some `# type: ignore` comments (#1113) + +- fixed invalid removal on organizing brackets followed by indexing (#1575) + +- introduced `black-primer`, a CI tool that allows us to run regression tests against + existing open source users of Black (#1402) + +- introduced property-based fuzzing to our test suite based on Hypothesis and + Hypothersmith (#1566) + +- implemented experimental and disabled by default long string rewrapping (#1132), + hidden under a `--experimental-string-processing` flag while it's being worked on; + this is an undocumented and unsupported feature, you lose Internet points for + depending on it (#1609) + +#### Vim plugin + +- prefer virtualenv packages over global packages (#1383) + +### 19.10b0 + +- added support for PEP 572 assignment expressions (#711) + +- added support for PEP 570 positional-only arguments (#943) + +- added support for async generators (#593) + +- added support for pre-splitting collections by putting an explicit trailing comma + inside (#826) + +- added `black -c` as a way to format code passed from the command line (#761) + +- --safe now works with Python 2 code (#840) + +- fixed grammar selection for Python 2-specific code (#765) + +- fixed feature detection for trailing commas in function definitions and call sites + (#763) + +- `# fmt: off`/`# fmt: on` comment pairs placed multiple times within the same block of + code now behave correctly (#1005) + +- _Black_ no longer crashes on Windows machines with more than 61 cores (#838) + +- _Black_ no longer crashes on standalone comments prepended with a backslash (#767) + +- _Black_ no longer crashes on `from` ... `import` blocks with comments (#829) + +- _Black_ no longer crashes on Python 3.7 on some platform configurations (#494) + +- _Black_ no longer fails on comments in from-imports (#671) + +- _Black_ no longer fails when the file starts with a backslash (#922) + +- _Black_ no longer merges regular comments with type comments (#1027) + +- _Black_ no longer splits long lines that contain type comments (#997) + +- removed unnecessary parentheses around `yield` expressions (#834) + +- added parentheses around long tuples in unpacking assignments (#832) + +- added parentheses around complex powers when they are prefixed by a unary operator + (#646) + +- fixed bug that led _Black_ format some code with a line length target of 1 (#762) + +- _Black_ no longer introduces quotes in f-string subexpressions on string boundaries + (#863) + +- if _Black_ puts parenthesis around a single expression, it moves comments to the + wrapped expression instead of after the brackets (#872) + +- `blackd` now returns the version of _Black_ in the response headers (#1013) + +- `blackd` can now output the diff of formats on source code when the `X-Diff` header is + provided (#969) + +### 19.3b0 + +- new option `--target-version` to control which Python versions _Black_-formatted code + should target (#618) + +- deprecated `--py36` (use `--target-version=py36` instead) (#724) + +- _Black_ no longer normalizes numeric literals to include `_` separators (#696) + +- long `del` statements are now split into multiple lines (#698) + +- type comments are no longer mangled in function signatures + +- improved performance of formatting deeply nested data structures (#509) + +- _Black_ now properly formats multiple files in parallel on Windows (#632) + +- _Black_ now creates cache files atomically which allows it to be used in parallel + pipelines (like `xargs -P8`) (#673) + +- _Black_ now correctly indents comments in files that were previously formatted with + tabs (#262) + +- `blackd` now supports CORS (#622) + +### 18.9b0 + +- numeric literals are now formatted by _Black_ (#452, #461, #464, #469): + + - numeric literals are normalized to include `_` separators on Python 3.6+ code + + - added `--skip-numeric-underscore-normalization` to disable the above behavior and + leave numeric underscores as they were in the input + + - code with `_` in numeric literals is recognized as Python 3.6+ + + - most letters in numeric literals are lowercased (e.g., in `1e10`, `0x01`) + + - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) + +- added `blackd`, see [its documentation](#blackd) for more info (#349) + +- adjacent string literals are now correctly split into multiple lines (#463) + +- trailing comma is now added to single imports that don't fit on a line (#250) + +- cache is now populated when `--check` is successful for a file which speeds up + consecutive checks of properly formatted unmodified files (#448) + +- whitespace at the beginning of the file is now removed (#399) + +- fixed mangling [pweave](http://mpastell.com/pweave/) and + [Spyder IDE](https://www.spyder-ide.org/) special comments (#532) + +- fixed unstable formatting when unpacking big tuples (#267) + +- fixed parsing of `__future__` imports with renames (#389) + +- fixed scope of `# fmt: off` when directly preceding `yield` and other nodes (#385) + +- fixed formatting of lambda expressions with default arguments (#468) + +- fixed `async for` statements: _Black_ no longer breaks them into separate lines (#372) + +- note: the Vim plugin stopped registering `,=` as a default chord as it turned out to + be a bad idea (#415) + +### 18.6b4 + +- hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) + +### 18.6b3 + +- typing stub files (`.pyi`) now have blank lines added after constants (#340) + +- `# fmt: off` and `# fmt: on` are now much more dependable: + + - they now work also within bracket pairs (#329) + + - they now correctly work across function/class boundaries (#335) + + - they now work when an indentation block starts with empty lines or misaligned + comments (#334) + +- made Click not fail on invalid environments; note that Click is right but the + likelihood we'll need to access non-ASCII file paths when dealing with Python source + code is low (#277) + +- fixed improper formatting of f-strings with quotes inside interpolated expressions + (#322) + +- fixed unnecessary slowdown when long list literals where found in a file + +- fixed unnecessary slowdown on AST nodes with very many siblings + +- fixed cannibalizing backslashes during string normalization + +- fixed a crash due to symbolic links pointing outside of the project directory (#338) + +### 18.6b2 + +- added `--config` (#65) + +- added `-h` equivalent to `--help` (#316) + +- fixed improper unmodified file caching when `-S` was used + +- fixed extra space in string unpacking (#305) + +- fixed formatting of empty triple quoted strings (#313) + +- fixed unnecessary slowdown in comment placement calculation on lines without comments + +### 18.6b1 + +- hotfix: don't output human-facing information on stdout (#299) + +- hotfix: don't output cake emoji on non-zero return code (#300) + +### 18.6b0 + +- added `--include` and `--exclude` (#270) + +- added `--skip-string-normalization` (#118) + +- added `--verbose` (#283) + +- the header output in `--diff` now actually conforms to the unified diff spec + +- fixed long trivial assignments being wrapped in unnecessary parentheses (#273) + +- fixed unnecessary parentheses when a line contained multiline strings (#232) + +- fixed stdin handling not working correctly if an old version of Click was used (#276) + +- _Black_ now preserves line endings when formatting a file in place (#258) + +### 18.5b1 + +- added `--pyi` (#249) + +- added `--py36` (#249) + +- Python grammar pickle caches are stored with the formatting caches, making _Black_ + work in environments where site-packages is not user-writable (#192) + +- _Black_ now enforces a PEP 257 empty line after a class-level docstring (and/or + fields) and the first method + +- fixed invalid code produced when standalone comments were present in a trailer that + was omitted from line splitting on a large expression (#237) + +- fixed optional parentheses being removed within `# fmt: off` sections (#224) + +- fixed invalid code produced when stars in very long imports were incorrectly wrapped + in optional parentheses (#234) + +- fixed unstable formatting when inline comments were moved around in a trailer that was + omitted from line splitting on a large expression (#238) + +- fixed extra empty line between a class declaration and the first method if no class + docstring or fields are present (#219) + +- fixed extra empty line between a function signature and an inner function or inner + class (#196) + +### 18.5b0 + +- call chains are now formatted according to the + [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) + +- data structure literals (tuples, lists, dictionaries, and sets) are now also always + exploded like imports when they don't fit in a single line (#152) + +- slices are now formatted according to PEP 8 (#178) + +- parentheses are now also managed automatically on the right-hand side of assignments + and return statements (#140) + +- math operators now use their respective priorities for delimiting multiline + expressions (#148) + +- optional parentheses are now omitted on expressions that start or end with a bracket + and only contain a single operator (#177) + +- empty parentheses in a class definition are now removed (#145, #180) + +- string prefixes are now standardized to lowercase and `u` is removed on Python 3.6+ + only code and Python 2.7+ code with the `unicode_literals` future import (#188, #198, + #199) + +- typing stub files (`.pyi`) are now formatted in a style that is consistent with PEP + 484 (#207, #210) + +- progress when reformatting many files is now reported incrementally + +- fixed trailers (content with brackets) being unnecessarily exploded into their own + lines after a dedented closing bracket (#119) + +- fixed an invalid trailing comma sometimes left in imports (#185) + +- fixed non-deterministic formatting when multiple pairs of removable parentheses were + used (#183) + +- fixed multiline strings being unnecessarily wrapped in optional parentheses in long + assignments (#215) + +- fixed not splitting long from-imports with only a single name + +- fixed Python 3.6+ file discovery by also looking at function calls with unpacking. + This fixed non-deterministic formatting if trailing commas where used both in function + signatures with stars and function calls with stars but the former would be + reformatted to a single line. + +- fixed crash on dealing with optional parentheses (#193) + +- fixed "is", "is not", "in", and "not in" not considered operators for splitting + purposes + +- fixed crash when dead symlinks where encountered + +### 18.4a4 + +- don't populate the cache on `--check` (#175) + +### 18.4a3 + +- added a "cache"; files already reformatted that haven't changed on disk won't be + reformatted again (#109) + +- `--check` and `--diff` are no longer mutually exclusive (#149) + +- generalized star expression handling, including double stars; this fixes + multiplication making expressions "unsafe" for trailing commas (#132) + +- _Black_ no longer enforces putting empty lines behind control flow statements (#90) + +- _Black_ now splits imports like "Mode 3 + trailing comma" of isort (#127) + +- fixed comment indentation when a standalone comment closes a block (#16, #32) + +- fixed standalone comments receiving extra empty lines if immediately preceding a + class, def, or decorator (#56, #154) + +- fixed `--diff` not showing entire path (#130) + +- fixed parsing of complex expressions after star and double stars in function calls + (#2) + +- fixed invalid splitting on comma in lambda arguments (#133) + +- fixed missing splits of ternary expressions (#141) + +### 18.4a2 + +- fixed parsing of unaligned standalone comments (#99, #112) + +- fixed placement of dictionary unpacking inside dictionary literals (#111) + +- Vim plugin now works on Windows, too + +- fixed unstable formatting when encountering unnecessarily escaped quotes in a string + (#120) + +### 18.4a1 + +- added `--quiet` (#78) + +- added automatic parentheses management (#4) + +- added [pre-commit](https://pre-commit.com) integration (#103, #104) + +- fixed reporting on `--check` with multiple files (#101, #102) + +- fixed removing backslash escapes from raw strings (#100, #105) + +### 18.4a0 + +- added `--diff` (#87) + +- add line breaks before all delimiters, except in cases like commas, to better comply + with PEP 8 (#73) + +- standardize string literals to use double quotes (almost) everywhere (#75) + +- fixed handling of standalone comments within nested bracketed expressions; _Black_ + will no longer produce super long lines or put all standalone comments at the end of + the expression (#22) + +- fixed 18.3a4 regression: don't crash and burn on empty lines with trailing whitespace + (#80) + +- fixed 18.3a4 regression: `# yapf: disable` usage as trailing comment would cause + _Black_ to not emit the rest of the file (#95) + +- when CTRL+C is pressed while formatting many files, _Black_ no longer freaks out with + a flurry of asyncio-related exceptions + +- only allow up to two empty lines on module level and only single empty lines within + functions (#74) + +### 18.3a4 + +- `# fmt: off` and `# fmt: on` are implemented (#5) + +- automatic detection of deprecated Python 2 forms of print statements and exec + statements in the formatted file (#49) + +- use proper spaces for complex expressions in default values of typed function + arguments (#60) + +- only return exit code 1 when --check is used (#50) + +- don't remove single trailing commas from square bracket indexing (#59) + +- don't omit whitespace if the previous factor leaf wasn't a math operator (#55) + +- omit extra space in kwarg unpacking if it's the first argument (#46) + +- omit extra space in + [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) + (#68) + +### 18.3a3 + +- don't remove single empty lines outside of bracketed expressions (#19) + +- added ability to pipe formatting from stdin to stdin (#25) + +- restored ability to format code with legacy usage of `async` as a name (#20, #42) + +- even better handling of numpy-style array indexing (#33, again) + +### 18.3a2 + +- changed positioning of binary operators to occur at beginning of lines instead of at + the end, following + [a recent change to PEP 8](https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b) + (#21) + +- ignore empty bracket pairs while splitting. This avoids very weirdly looking + formattings (#34, #35) + +- remove a trailing comma if there is a single argument to a call + +- if top level functions were separated by a comment, don't put four empty lines after + the upper function + +- fixed unstable formatting of newlines with imports + +- fixed unintentional folding of post scriptum standalone comments into last statement + if it was a simple statement (#18, #28) + +- fixed missing space in numpy-style array indexing (#33) + +- fixed spurious space after star-based unary expressions (#31) + +### 18.3a1 + +- added `--check` + +- only put trailing commas in function signatures and calls if it's safe to do so. If + the file is Python 3.6+ it's always safe, otherwise only safe if there are no `*args` + or `**kwargs` used in the signature or call. (#8) + +- fixed invalid spacing of dots in relative imports (#6, #13) + +- fixed invalid splitting after comma on unpacked variables in for-loops (#23) + +- fixed spurious space in parenthesized set expressions (#7) + +- fixed spurious space after opening parentheses and in default arguments (#14, #17) + +- fixed spurious space after unary operators when the operand was a complex expression + (#15) + +### 18.3a0 + +- first published version, Happy 🍰 Day 2018! + +- alpha quality + +- date-versioned (see: https://calver.org/) diff --git a/docs/compatible_configs.md b/docs/compatible_configs.md index aa7cd96f165..25e959e3281 100644 --- a/docs/compatible_configs.md +++ b/docs/compatible_configs.md @@ -23,6 +23,7 @@ multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 use_parentheses = True +ensure_newline_before_comments = True line_length = 88 ``` @@ -62,7 +63,15 @@ The option `force_grid_wrap = 0` is just to tell isort to only wrap imports that the `line_length` limit. Finally, isort should be told to wrap imports when they surpass _Black_'s default limit -of 88 characters via `line_length = 88`. +of 88 characters via `line_length = 88` as well as +`ensure_newline_before_comments = True` to ensure spacing import sections with comments +works the same as with _Black_. + +**Please note** `ensure_newline_before_comments = True` only works since isort >= 5 but +does not break older versions so you can keep it if you are running previous versions. +If only isort >= 5 is used you can add `profile = black` instead of all the options +since [profiles](https://timothycrosley.github.io/isort/docs/configuration/profiles/) +are available and do the configuring for you. ### Formats @@ -75,6 +84,7 @@ multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 use_parentheses = True +ensure_newline_before_comments = True line_length = 88 ``` @@ -89,6 +99,7 @@ multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 use_parentheses = True +ensure_newline_before_comments = True line_length = 88 ``` @@ -103,6 +114,7 @@ multi_line_output = 3 include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true +ensure_newline_before_comments = true line_length = 88 ``` @@ -117,6 +129,7 @@ multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 use_parentheses = True +ensure_newline_before_comments = True line_length = 88 ``` @@ -228,7 +241,7 @@ characters via `max-line-length = 88`.
pylintrc -```rc +```ini [MESSAGES CONTROL] disable = C0330, C0326 diff --git a/docs/conf.py b/docs/conf.py index 3343087cfbd..7381c9d6423 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,20 +15,17 @@ from pathlib import Path import re import string -from typing import Callable, List, Optional, Pattern, Tuple, Set +from typing import Callable, Dict, List, Optional, Pattern, Tuple, Set from dataclasses import dataclass -import os import logging from pkg_resources import get_distribution -from recommonmark.parser import CommonMarkParser logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) LOG = logging.getLogger(__name__) -# Get a relative path so logs printing out SRC isn't too long. -CURRENT_DIR = Path(__file__).parent.relative_to(os.getcwd()) +CURRENT_DIR = Path(__file__).parent README = CURRENT_DIR / ".." / "README.md" REFERENCE_DIR = CURRENT_DIR / "reference" STATIC_DIR = CURRENT_DIR / "_static" @@ -102,7 +99,13 @@ def get_contents(section: DocSection) -> str: for lineno, line in enumerate(f, start=1): if lineno >= start_line and lineno < end_line: contents.append(line) - return "".join(contents) + result = "".join(contents) + # Let's make Prettier happy with the amount of trailing newlines in the sections. + if result.endswith("\n\n"): + result = result[:-1] + if not result.endswith("\n"): + result = result + "\n" + return result def get_sections_from_readme() -> List[DocSection]: @@ -162,18 +165,19 @@ def process_sections( It processes custom sections before the README generated sections so sections in the README can be overwritten with custom options. """ - processed_sections: Set[str] = set() + processed_sections: Dict[str, DocSection] = {} modified_files: Set[Path] = set() sections: List[DocSection] = custom_sections sections.extend(readme_sections) for section in sections: - LOG.info(f"Processing '{section.name}' from {section.src}") if section.name in processed_sections: - LOG.info( + LOG.warning( f"Skipping '{section.name}' from '{section.src}' as it is a duplicate" + f" of a custom section from '{processed_sections[section.name].src}'" ) continue + LOG.info(f"Processing '{section.name}' from '{section.src}'") target_path: Path = CURRENT_DIR / section.get_out_filename() if target_path in modified_files: LOG.warning( @@ -187,20 +191,18 @@ def process_sections( contents = fix_headers(contents) with open(target_path, "w", encoding="utf-8") as f: - if section.src.suffix == ".md": - f.write( - "[//]: # (NOTE: THIS FILE WAS AUTOGENERATED FROM" - f" {section.src})\n\n" - ) + if section.src.suffix == ".md" and section.src != target_path: + rel = section.src.resolve().relative_to(CURRENT_DIR.parent) + f.write(f'[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM {rel}"\n\n') f.write(contents) - processed_sections.add(section.name) + processed_sections[section.name] = section modified_files.add(target_path) # -- Project information ----------------------------------------------------- project = "Black" -copyright = "2018, Łukasz Langa and contributors to Black" +copyright = "2020, Łukasz Langa and contributors to Black" author = "Łukasz Langa and contributors to Black" # Autopopulate version @@ -212,8 +214,7 @@ def process_sections( version = version.split(sp)[0] custom_sections = [ - DocSection("the_black_code_style", CURRENT_DIR / "the_black_code_style.md",), - DocSection("pragmatism", CURRENT_DIR / "the_black_code_style.md",), + DocSection("the_black_code_style", CURRENT_DIR / "the_black_code_style.md"), DocSection("editor_integration", CURRENT_DIR / "editor_integration.md"), DocSection("blackd", CURRENT_DIR / "blackd.md"), DocSection("black_primer", CURRENT_DIR / "black_primer.md"), @@ -221,28 +222,47 @@ def process_sections( DocSection("change_log", CURRENT_DIR / ".." / "CHANGES.md"), ] +# Sphinx complains when there is a source file that isn't referenced in any of the docs. +# Since some sections autogenerated from the README are unused warnings will appear. +# +# Sections must be listed to what their name is when passed through make_filename(). +blocklisted_sections_from_readme = { + "license", + "pragmatism", + "testimonials", + "used_by", +} make_pypi_svg(release) readme_sections = get_sections_from_readme() +readme_sections = [ + x for x in readme_sections if x.name not in blocklisted_sections_from_readme +] + process_sections(custom_sections, readme_sections) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' +needs_sphinx = "3.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon"] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.napoleon", + "recommonmark", +] + +# If you need extensions of a certain version or higher, list them here. +needs_extensions = {"recommonmark": "0.5"} # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -source_parsers = {".md": CommonMarkParser} - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = [".rst", ".md"] diff --git a/docs/contributing_to_black.md b/docs/contributing_to_black.md new file mode 100644 index 00000000000..e5307adb5d0 --- /dev/null +++ b/docs/contributing_to_black.md @@ -0,0 +1,70 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM CONTRIBUTING.md" + +# Contributing to _Black_ + +Welcome! Happy to see you willing to make the project better. Have you read the entire +[user documentation](https://black.readthedocs.io/en/latest/) yet? + +## Bird's eye view + +In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is +deliberate. + +Bug reports and fixes are always welcome! Please follow the +[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. + +Before you suggest a new feature or configuration knob, ask yourself why you want it. If +it enables better integration with some workflow, fixes an inconsistency, speeds things +up, and so on - go for it! On the other hand, if your answer is "because I don't like a +particular formatting" then you're not ready to embrace _Black_ yet. Such changes are +unlikely to get accepted. You can still try but prepare to be disappointed. + +## Technicalities + +Development on the latest version of Python is preferred. As of this writing it's 3.8. +You can use any operating system. I am using macOS myself and CentOS at work. + +Install all development dependencies using: + +```console +$ pipenv install --dev +$ pipenv shell +$ pre-commit install +``` + +If you haven't used `pipenv` before but are comfortable with virtualenvs, just run +`pip install pipenv` in the virtualenv you're already using and invoke the command above +from the cloned _Black_ repo. It will do the correct thing. + +Before submitting pull requests, run lints and tests with: + +```console +$ pre-commit run -a +$ python -m unittest +$ black-primer [-k -w /tmp/black_test_repos] +``` + +## black-primer + +`black-primer` is used by CI to pull down well-known _Black_ formatted projects and see +if we get source code changes. It will error on formatting changes or errors. Please run +before pushing your PR to see if you get the actions you would expect from _Black_ with +your PR. You may need to change +[primer.json](https://github.com/psf/black/blob/master/src/black_primer/primer.json) +configuration for it to pass. + +For more `black-primer` information visit the +[documentation](https://github.com/psf/black/blob/master/docs/black_primer.md). + +## Hygiene + +If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug, +run it again to confirm it's really fixed. + +If adding a new feature, add a test. In fact, always add a test. But wait, before adding +any large feature, first open an issue for us to discuss the idea first. + +## Finally + +Thanks again for your interest in improving the project! You're taking action when most +people decide to sit and watch. diff --git a/docs/editor_integration.md b/docs/editor_integration.md index 00241f23335..73107d6a4a1 100644 --- a/docs/editor_integration.md +++ b/docs/editor_integration.md @@ -146,7 +146,7 @@ or you can copy the plugin from ``` mkdir -p ~/.vim/pack/python/start/black/plugin -curl https://raw.githubusercontent.com/psf/black/master/plugin/black.vim -o ~/.vim/pack/python/start/black/plugin/black.vim +curl https://raw.githubusercontent.com/psf/black/stable/plugin/black.vim -o ~/.vim/pack/python/start/black/plugin/black.vim ``` Let me know if this requires any changes to work with Vim 8's builtin `packadd`, or @@ -255,6 +255,10 @@ Sublime Text, Visual Studio Code and many more), you can use the Use [python-black](https://atom.io/packages/python-black). +## Gradle (the build tool) + +Use the [Spotless](https://github.com/diffplug/spotless/tree/main/plugin-gradle) plugin. + ## Kakoune Add the following hook to your kakrc, then run _Black_ with `:format`. @@ -269,9 +273,9 @@ hook global WinSetOption filetype=python %{ Use [Thonny-black-code-format](https://github.com/Franccisco/thonny-black-code-format). -## Other editors +## Other integrations -Other editors will require external contributions. +Other editors and tools will require external contributions. Patches welcome! ✨ 🍰 ✨ diff --git a/docs/github_actions.md b/docs/github_actions.md new file mode 100644 index 00000000000..7ff87540242 --- /dev/null +++ b/docs/github_actions.md @@ -0,0 +1,19 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# GitHub Actions + +Create a file named `.github/workflows/black.yml` inside your repository with: + +```yaml +name: Lint + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: psf/black@stable +``` diff --git a/docs/ignoring_unmodified_files.md b/docs/ignoring_unmodified_files.md new file mode 100644 index 00000000000..a915f4e8678 --- /dev/null +++ b/docs/ignoring_unmodified_files.md @@ -0,0 +1,23 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# Ignoring unmodified files + +_Black_ remembers files it has already formatted, unless the `--diff` flag is used or +code is passed via standard input. This information is stored per-user. The exact +location of the file depends on the _Black_ version and the system on which _Black_ is +run. The file is non-portable. The standard location on common operating systems is: + +- Windows: + `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` +- macOS: + `/Users//Library/Caches/black//cache...pickle` +- Linux: + `/home//.cache/black//cache...pickle` + +`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, +as .pyi, and whether string normalization was omitted. + +To override the location of these files on macOS or Linux, set the environment variable +`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache +in the directory you're running _Black_ from, set `XDG_CACHE_HOME=.cache`. _Black_ will +then write the above files to `.cache/black//`. diff --git a/docs/index.rst b/docs/index.rst index 676644ec6c6..f03d247d949 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -51,10 +51,12 @@ Contents installation_and_usage the_black_code_style pyproject_toml + compatible_configs editor_integration blackd black_primer version_control_integration + github_actions ignoring_unmodified_files contributing_to_black show_your_style @@ -66,5 +68,4 @@ Indices and tables ================== * :ref:`genindex` -* :ref:`modindex` * :ref:`search` diff --git a/docs/installation_and_usage.md b/docs/installation_and_usage.md new file mode 100644 index 00000000000..cc0269198a2 --- /dev/null +++ b/docs/installation_and_usage.md @@ -0,0 +1,179 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# Installation and usage + +## Installation + +_Black_ can be installed by running `pip install black`. It requires Python 3.6.0+ to +run but you can reformat Python 2 code with it, too. + +### Install from GitHub + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+git://github.com/psf/black` + +## Usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory} +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory} +``` + +## Command line options + +_Black_ doesn't provide many options. You can list them by running `black --help`: + +```text +Usage: black [OPTIONS] [SRC]... + + The uncompromising code formatter. + +Options: + -c, --code TEXT Format the code passed in as a string. + -l, --line-length INTEGER How many characters per line to allow. + [default: 88] + + -t, --target-version [py27|py33|py34|py35|py36|py37|py38] + Python versions that should be supported by + Black's output. [default: per-file auto- + detection] + + --pyi Format all input files like typing stubs + regardless of file extension (useful when + piping source on standard input). + + -S, --skip-string-normalization + Don't normalize string quotes or prefixes. + --check Don't write the files back, just return the + status. Return code 0 means nothing would + change. Return code 1 means some files + would be reformatted. Return code 123 means + there was an internal error. + + --diff Don't write the files back, just output a + diff for each file on stdout. + + --color / --no-color Show colored diff. Only applies when + `--diff` is given. + + --fast / --safe If --fast given, skip temporary sanity + checks. [default: --safe] + + --include TEXT A regular expression that matches files and + directories that should be included on + recursive searches. An empty value means + all files are included regardless of the + name. Use forward slashes for directories + on all platforms (Windows, too). Exclusions + are calculated first, inclusions later. + [default: \.pyi?$] + + --exclude TEXT A regular expression that matches files and + directories that should be excluded on + recursive searches. An empty value means no + paths are excluded. Use forward slashes for + directories on all platforms (Windows, too). + Exclusions are calculated first, inclusions + later. [default: /(\.eggs|\.git|\.hg|\.mypy + _cache|\.nox|\.tox|\.venv|\.svn|_build|buck- + out|build|dist)/] + + --force-exclude TEXT Like --exclude, but files and directories + matching this regex will be excluded even + when they are passed explicitly as arguments + + -q, --quiet Don't emit non-error messages to stderr. + Errors are still emitted; silence those with + 2>/dev/null. + + -v, --verbose Also emit messages to stderr about files + that were not changed or were ignored due to + --exclude=. + + --version Show the version and exit. + --config FILE Read configuration from FILE path. + -h, --help Show this message and exit. +``` + +_Black_ is a well-behaved Unix-style command-line tool: + +- it does nothing if no sources are passed to it; +- it will read from standard input and write to standard output if `-` is used as the + filename; +- it only outputs messages to users on standard error; +- exits with code 0 unless an internal error occurred (or `--check` was used). + +## Using _Black_ with other tools + +While _Black_ enforces formatting that conforms to PEP 8, other tools may raise warnings +about _Black_'s changes or will overwrite _Black_'s changes. A good example of this is +[isort](https://pypi.org/p/isort). Since _Black_ is barely configurable, these tools +should be configured to neither warn about nor overwrite _Black_'s changes. + +Actual details on _Black_ compatible configurations for various tools can be found in +[compatible_configs](https://github.com/psf/black/blob/master/docs/compatible_configs.md). + +## Migrating your code style without ruining git blame + +A long-standing argument against moving to automated code formatters like _Black_ is +that the migration will clutter up the output of `git blame`. This was a valid argument, +but since Git version 2.23, Git natively supports +[ignoring revisions in blame](https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt) +with the `--ignore-rev` option. You can also pass a file listing the revisions to ignore +using the `--ignore-revs-file` option. The changes made by the revision will be ignored +when assigning blame. Lines modified by an ignored revision will be blamed on the +previous revision that modified those lines. + +So when migrating your project's code style to _Black_, reformat everything and commit +the changes (preferably in one massive commit). Then put the full 40 characters commit +identifier(s) into a file. + +``` +# Migrate code style to Black +5b4ab991dede475d393e9d69ec388fd6bd949699 +``` + +Afterwards, you can pass that file to `git blame` and see clean and meaningful blame +information. + +```console +$ git blame important.py --ignore-revs-file .git-blame-ignore-revs +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 1) def very_important_function(text, file): +abdfd8b0 (Alice Doe 2019-09-23 11:39:32 -0400 2) text = text.lstrip() +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 3) with open(file, "r+") as f: +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 4) f.write(formatted) +``` + +You can even configure `git` to automatically ignore revisions listed in a file on every +call to `git blame`. + +```console +$ git config blame.ignoreRevsFile .git-blame-ignore-revs +``` + +**The one caveat is that GitHub and GitLab do not yet support ignoring revisions using +their native UI of blame.** So blame information will be cluttered with a reformatting +commit on those platforms. (If you'd like this feature, there's an open issue for +[GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423) and please let GitHub +know!) + +## NOTE: This is a beta product + +_Black_ is already [successfully used](#used-by) by many projects, small and big. It +also sports a decent test suite. However, it is still very new. Things will probably be +wonky for a while. This is made explicit by the "Beta" trove classifier, as well as by +the "b" in the version number. What this means for you is that **until the formatter +becomes stable, you should expect some formatting to change in the future**. That being +said, no drastic stylistic changes are planned, mostly responses to bug reports. + +Also, as a temporary safety measure, _Black_ will check that the reformatted code still +produces a valid AST that is equivalent to the original. This slows it down. If you're +feeling confident, use `--fast`. diff --git a/docs/pyproject_toml.md b/docs/pyproject_toml.md new file mode 100644 index 00000000000..cd313452b1e --- /dev/null +++ b/docs/pyproject_toml.md @@ -0,0 +1,88 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# pyproject.toml + +_Black_ is able to read project-specific default values for its command line options +from a `pyproject.toml` file. This is especially useful for specifying custom +`--include` and `--exclude` patterns for your project. + +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. + +## What on Earth is a `pyproject.toml` file? + +[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a +configuration file to store build system requirements for Python projects. With the help +of tools like [Poetry](https://python-poetry.org/) or +[Flit](https://flit.readthedocs.io/en/latest/) it can fully replace the need for +`setup.py` and `setup.cfg` files. + +## Where _Black_ looks for the file + +By default _Black_ looks for `pyproject.toml` starting from the common base directory of +all files and directories passed on the command line. If it's not there, it looks in +parent directories. It stops looking when it finds the file, or a `.git` directory, or a +`.hg` directory, or the root of the file system, whichever comes first. + +If you're formatting standard input, _Black_ will look for configuration starting from +the current working directory. + +You can also explicitly specify the path to a particular file that you want with +`--config`. In this situation _Black_ will not look for any other file. + +If you're running with `--verbose`, you will see a blue message if a file was found and +used. + +Please note `blackd` will not use `pyproject.toml` configuration. + +## Configuration format + +As the file extension suggests, `pyproject.toml` is a +[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for +different tools. _Black_ is using the `[tool.black]` section. The option keys are the +same as long names of options on the command line. + +Note that you have to use single-quoted strings in TOML for regular expressions. It's +the equivalent of r-strings in Python. Multiline strings are treated as verbose regular +expressions by Black. Use `[ ]` to denote a significant space character. + +
+Example pyproject.toml + +```toml +[tool.black] +line-length = 88 +target-version = ['py37'] +include = '\.pyi?$' +exclude = ''' + +( + /( + \.eggs # exclude a few common directories in the + | \.git # root of the project + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + )/ + | foo.py # also separately exclude a file named foo.py in + # the root of the project +) +''' +``` + +
+ +## Lookup hierarchy + +Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can +override those defaults. Finally, options provided by the user on the command line +override both. + +_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't +look for multiple files, and doesn't compose configuration from different levels of the +file hierarchy. diff --git a/docs/reference/reference_functions.rst b/docs/reference/reference_functions.rst index b10eea9b01f..a7184115c94 100644 --- a/docs/reference/reference_functions.rst +++ b/docs/reference/reference_functions.rst @@ -89,7 +89,7 @@ Split functions .. autofunction:: black.standalone_comment_split -.. autofunction:: black.split_line +.. autofunction:: black.transform_line Caching ------- @@ -171,7 +171,7 @@ Utilities .. autofunction:: black.re_compile_maybe_verbose -.. autofunction:: black.should_explode +.. autofunction:: black.should_split_body_explode .. autofunction:: black.shutdown diff --git a/docs/requirements.txt b/docs/requirements.txt index a36fd8a675b..4cad9bc205b 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,3 @@ -recommonmark==0.4.0 -Sphinx==1.7.2 +recommonmark==0.6.0 +Sphinx==3.2.1 +Pygments==2.6.1 \ No newline at end of file diff --git a/docs/show_your_style.md b/docs/show_your_style.md new file mode 100644 index 00000000000..67b213c3965 --- /dev/null +++ b/docs/show_your_style.md @@ -0,0 +1,19 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# Show your style + +Use the badge in your project's README.md: + +```md +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +``` + +Using the badge in README.rst: + +``` +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +``` + +Looks like this: +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) diff --git a/docs/the_black_code_style.md b/docs/the_black_code_style.md index 21f217d388e..09d58307a05 100644 --- a/docs/the_black_code_style.md +++ b/docs/the_black_code_style.md @@ -153,13 +153,14 @@ the following configuration.
A compatible `.isort.cfg` -``` +```cfg [settings] -multi_line_output=3 -include_trailing_comma=True -force_grid_wrap=0 -use_parentheses=True -line_length=88 +multi_line_output = 3 +include_trailing_comma = True +force_grid_wrap = 0 +use_parentheses = True +ensure_newline_before_comments = True +line_length = 88 ``` The equivalent command line is: diff --git a/docs/version_control_integration.md b/docs/version_control_integration.md new file mode 100644 index 00000000000..25dac308c47 --- /dev/null +++ b/docs/version_control_integration.md @@ -0,0 +1,28 @@ +[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" + +# Version control integration + +Use [pre-commit](https://pre-commit.com/). Once you +[have it installed](https://pre-commit.com/#install), add this to the +`.pre-commit-config.yaml` in your repository: + +```yaml +repos: + - repo: https://github.com/psf/black + rev: 19.10b0 # Replace by any tag/version: https://github.com/psf/black/tags + hooks: + - id: black + language_version: python3 # Should be a command that runs python3.6+ +``` + +Then run `pre-commit install` and you're ready to go. + +Avoid using `args` in the hook. Instead, store necessary configuration in +`pyproject.toml` so that editors and command-line usage of Black all behave consistently +for your project. See _Black_'s own +[pyproject.toml](https://github.com/psf/black/blob/master/pyproject.toml) for an +example. + +If you're already using Python 3.7, switch the `language_version` accordingly. Finally, +`stable` is a branch that tracks the latest release on PyPI. If you'd rather run on +master, this is also an option. diff --git a/fuzz.py b/fuzz.py new file mode 100644 index 00000000000..fdd4917f2ec --- /dev/null +++ b/fuzz.py @@ -0,0 +1,59 @@ +"""Property-based tests for Black. + +By Zac Hatfield-Dodds, based on my Hypothesmith tool for source code +generation. You can run this file with `python`, `pytest`, or (soon) +a coverage-guided fuzzer I'm working on. +""" + +import hypothesmith +from hypothesis import HealthCheck, given, settings, strategies as st + +import black + + +# This test uses the Hypothesis and Hypothesmith libraries to generate random +# syntatically-valid Python source code and run Black in odd modes. +@settings( + max_examples=1000, # roughly 1k tests/minute, or half that under coverage + derandomize=True, # deterministic mode to avoid CI flakiness + deadline=None, # ignore Hypothesis' health checks; we already know that + suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy. +) +@given( + # Note that while Hypothesmith might generate code unlike that written by + # humans, it's a general test that should pass for any *valid* source code. + # (so e.g. running it against code scraped of the internet might also help) + src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(), + # Using randomly-varied modes helps us to exercise less common code paths. + mode=st.builds( + black.FileMode, + line_length=st.just(88) | st.integers(0, 200), + string_normalization=st.booleans(), + is_pyi=st.booleans(), + ), +) +def test_idempotent_any_syntatically_valid_python( + src_contents: str, mode: black.FileMode +) -> None: + # Before starting, let's confirm that the input string is valid Python: + compile(src_contents, "", "exec") # else the bug is in hypothesmith + + # Then format the code... + try: + dst_contents = black.format_str(src_contents, mode=mode) + except black.InvalidInput: + # This is a bug - if it's valid Python code, as above, black should be + # able to code with it. See issues #970, #1012, #1358, and #1557. + # TODO: remove this try-except block when issues are resolved. + return + + # And check that we got equivalent and stable output. + black.assert_equivalent(src_contents, dst_contents) + black.assert_stable(src_contents, dst_contents, mode=mode) + + # Future test: check that pure-python and mypyc versions of black + # give identical output for identical input? + + +if __name__ == "__main__": + test_idempotent_any_syntatically_valid_python() diff --git a/gallery/gallery.py b/gallery/gallery.py index 2a56b4ed4c0..6b42ec3a6d4 100755 --- a/gallery/gallery.py +++ b/gallery/gallery.py @@ -127,7 +127,10 @@ def get_package( def download_and_extract_top_packages( - directory: Path, days: Days = 365, workers: int = 8, limit: slice = DEFAULT_SLICE, + directory: Path, + days: Days = 365, + workers: int = 8, + limit: slice = DEFAULT_SLICE, ) -> Generator[Path, None, None]: with ThreadPoolExecutor(max_workers=workers) as executor: bound_downloader = partial(get_package, version=None, directory=directory) diff --git a/setup.py b/setup.py index bff439c6d8c..12fde2568cf 100644 --- a/setup.py +++ b/setup.py @@ -68,10 +68,9 @@ def get_long_description() -> str: python_requires=">=3.6", zip_safe=False, install_requires=[ - "click>=6.5", - "attrs>=18.1.0", + "click>=7.1.2", "appdirs", - "toml>=0.9.4", + "toml>=0.10.1", "typed-ast>=1.4.0", "regex>=2020.1.8", "pathspec>=0.6, <1", diff --git a/src/black/__init__.py b/src/black/__init__.py index a77a410fa42..1d20bb416ac 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -65,7 +65,7 @@ import colorama # noqa: F401 DEFAULT_LINE_LENGTH = 88 -DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 DEFAULT_INCLUDES = r"\.pyi?$" CACHE_DIR = Path(user_cache_dir("black", version=__version__)) @@ -195,6 +195,7 @@ class Feature(Enum): ASYNC_KEYWORDS = 7 ASSIGNMENT_EXPRESSIONS = 8 POS_ONLY_ARGUMENTS = 9 + FORCE_OPTIONAL_PARENTHESES = 50 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { @@ -240,6 +241,7 @@ class Mode: target_versions: Set[TargetVersion] = field(default_factory=set) line_length: int = DEFAULT_LINE_LENGTH string_normalization: bool = True + experimental_string_processing: bool = False is_pyi: bool = False def get_cache_key(self) -> str: @@ -376,6 +378,15 @@ def target_version_option_callback( is_flag=True, help="Don't normalize string quotes or prefixes.", ) +@click.option( + "--experimental-string-processing", + is_flag=True, + hidden=True, + help=( + "Experimental option that performs more normalization on string literals." + " Currently disabled because it leads to some crashes." + ), +) @click.option( "--check", is_flag=True, @@ -471,7 +482,7 @@ def target_version_option_callback( ), is_eager=True, callback=read_pyproject_toml, - help="Read configuration from PATH.", + help="Read configuration from FILE path.", ) @click.pass_context def main( @@ -485,6 +496,7 @@ def main( fast: bool, pyi: bool, skip_string_normalization: bool, + experimental_string_processing: bool, quiet: bool, verbose: bool, include: str, @@ -505,6 +517,7 @@ def main( line_length=line_length, is_pyi=pyi, string_normalization=not skip_string_normalization, + experimental_string_processing=experimental_string_processing, ) if config and verbose: out(f"Using configuration from {config}.", bold=False, fg="blue") @@ -583,9 +596,7 @@ def get_sources( root = find_project_root(src) sources: Set[Path] = set() path_empty(src, "No Path provided. Nothing to do 😴", quiet, verbose, ctx) - exclude_regexes = [exclude_regex] - if force_exclude_regex is not None: - exclude_regexes.append(force_exclude_regex) + gitignore = get_gitignore(root) for s in src: p = Path(s) @@ -595,19 +606,30 @@ def get_sources( p.iterdir(), root, include_regex, - exclude_regexes, + exclude_regex, + force_exclude_regex, report, - get_gitignore(root), + gitignore, ) ) elif s == "-": sources.add(p) elif p.is_file(): - sources.update( - gen_python_files( - [p], root, None, exclude_regexes, report, get_gitignore(root) - ) - ) + normalized_path = normalize_path_maybe_ignore(p, root, report) + if normalized_path is None: + continue + + normalized_path = "/" + normalized_path + # Hard-exclude any files that matches the `--force-exclude` regex. + if force_exclude_regex: + force_exclude_match = force_exclude_regex.search(normalized_path) + else: + force_exclude_match = None + if force_exclude_match and force_exclude_match.group(0): + report.path_ignored(p, "matches the --force-exclude regular expression") + continue + + sources.add(p) else: err(f"invalid path: {s}") return sources @@ -655,6 +677,8 @@ def reformat_one( write_cache(cache, [src], mode) report.done(src, changed) except Exception as exc: + if report.verbose: + traceback.print_exc() report.failed(src, str(exc)) @@ -929,6 +953,7 @@ def f(arg: str = "") -> None: ... A more complex example: + >>> print( ... black.format_str( ... "def f(arg:str='')->None: hey", @@ -973,10 +998,7 @@ def f( before, after = elt.maybe_empty_lines(current_line) dst_contents.append(str(empty_line) * before) for line in transform_line( - current_line, - line_length=mode.line_length, - normalize_strings=mode.string_normalization, - features=split_line_features, + current_line, mode=mode, features=split_line_features ): dst_contents.append(str(line)) return "".join(dst_contents) @@ -1263,6 +1285,7 @@ class BracketTracker: previous: Optional[Leaf] = None _for_loop_depths: List[int] = field(default_factory=list) _lambda_argument_depths: List[int] = field(default_factory=list) + invisible: List[Leaf] = field(default_factory=list) def mark(self, leaf: Leaf) -> None: """Mark `leaf` with bracket-related metadata. Keep track of delimiters. @@ -1288,6 +1311,8 @@ def mark(self, leaf: Leaf) -> None: self.depth -= 1 opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) leaf.opening_bracket = opening_bracket + if not leaf.value: + self.invisible.append(leaf) leaf.bracket_depth = self.depth if self.depth == 0: delim = is_split_before_delimiter(leaf, self.previous) @@ -1300,6 +1325,8 @@ def mark(self, leaf: Leaf) -> None: if leaf.type in OPENING_BRACKETS: self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf self.depth += 1 + if not leaf.value: + self.invisible.append(leaf) self.previous = leaf self.maybe_increment_lambda_arguments(leaf) self.maybe_increment_for_loop_variable(leaf) @@ -1421,7 +1448,8 @@ def append(self, leaf: Leaf, preformatted: bool = False) -> None: ) if self.inside_brackets or not preformatted: self.bracket_tracker.mark(leaf) - self.maybe_remove_trailing_comma(leaf) + if self.maybe_should_explode(leaf): + self.should_explode = True if not self.append_comment(leaf): self.leaves.append(leaf) @@ -1473,69 +1501,6 @@ def is_stub_class(self) -> bool: Leaf(token.DOT, ".") for _ in range(3) ] - @property - def is_collection_with_optional_trailing_comma(self) -> bool: - """Is this line a collection literal with a trailing comma that's optional? - - Note that the trailing comma in a 1-tuple is not optional. - """ - if not self.leaves or len(self.leaves) < 4: - return False - - # Look for and address a trailing colon. - if self.leaves[-1].type == token.COLON: - closer = self.leaves[-2] - close_index = -2 - else: - closer = self.leaves[-1] - close_index = -1 - if closer.type not in CLOSING_BRACKETS or self.inside_brackets: - return False - - if closer.type == token.RPAR: - # Tuples require an extra check, because if there's only - # one element in the tuple removing the comma unmakes the - # tuple. - # - # We also check for parens before looking for the trailing - # comma because in some cases (eg assigning a dict - # literal) the literal gets wrapped in temporary parens - # during parsing. This case is covered by the - # collections.py test data. - opener = closer.opening_bracket - for _open_index, leaf in enumerate(self.leaves): - if leaf is opener: - break - - else: - # Couldn't find the matching opening paren, play it safe. - return False - - commas = 0 - comma_depth = self.leaves[close_index - 1].bracket_depth - for leaf in self.leaves[_open_index + 1 : close_index]: - if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA: - commas += 1 - if commas > 1: - # We haven't looked yet for the trailing comma because - # we might also have caught noop parens. - return self.leaves[close_index - 1].type == token.COMMA - - elif commas == 1: - return False # it's either a one-tuple or didn't have a trailing comma - - if self.leaves[close_index - 1].type in CLOSING_BRACKETS: - close_index -= 1 - closer = self.leaves[close_index] - if closer.type == token.RPAR: - # TODO: this is a gut feeling. Will we ever see this? - return False - - if self.leaves[close_index - 1].type != token.COMMA: - return False - - return True - @property def is_def(self) -> bool: """Is this a function definition? (Also returns True for async defs.)""" @@ -1660,42 +1625,28 @@ def contains_unsplittable_type_ignore(self) -> bool: def contains_multiline_strings(self) -> bool: return any(is_multiline_string(leaf) for leaf in self.leaves) - def maybe_remove_trailing_comma(self, closing: Leaf) -> bool: - """Remove trailing comma if there is one and it's safe.""" - if not (self.leaves and self.leaves[-1].type == token.COMMA): - return False - - # We remove trailing commas only in the case of importing a - # single name from a module. + def maybe_should_explode(self, closing: Leaf) -> bool: + """Return True if this line should explode (always be split), that is when: + - there's a trailing comma here; and + - it's not a one-tuple. + """ if not ( - self.leaves - and self.is_import - and len(self.leaves) > 4 + closing.type in CLOSING_BRACKETS + and self.leaves and self.leaves[-1].type == token.COMMA - and closing.type in CLOSING_BRACKETS - and self.leaves[-4].type == token.NAME - and ( - # regular `from foo import bar,` - self.leaves[-4].value == "import" - # `from foo import (bar as baz,) - or ( - len(self.leaves) > 6 - and self.leaves[-6].value == "import" - and self.leaves[-3].value == "as" - ) - # `from foo import bar as baz,` - or ( - len(self.leaves) > 5 - and self.leaves[-5].value == "import" - and self.leaves[-3].value == "as" - ) - ) - and closing.type == token.RPAR ): return False - self.remove_trailing_comma() - return True + if closing.type in {token.RBRACE, token.RSQB}: + return True + + if self.is_import: + return True + + if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves): + return True + + return False def append_comment(self, comment: Leaf) -> bool: """Add an inline or standalone comment to the line.""" @@ -2085,13 +2036,20 @@ def visit_factor(self, node: Node) -> Iterator[Line]: yield from self.visit_default(node) def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: - # Check if it's a docstring - if prev_siblings_are( - leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] - ) and is_multiline_string(leaf): - prefix = " " * self.current_line.depth - docstring = fix_docstring(leaf.value[3:-3], prefix) - leaf.value = leaf.value[0:3] + docstring + leaf.value[-3:] + if is_docstring(leaf) and "\\\n" not in leaf.value: + # We're ignoring docstrings with backslash newline escapes because changing + # indentation of those changes the AST representation of the code. + prefix = get_string_prefix(leaf.value) + lead_len = len(prefix) + 3 + tail_len = -3 + indent = " " * 4 * self.current_line.depth + docstring = fix_docstring(leaf.value[lead_len:tail_len], indent) + if docstring: + if leaf.value[lead_len - 1] == docstring[0]: + docstring = " " + docstring + if leaf.value[tail_len + 1] == docstring[-1]: + docstring = docstring + " " + leaf.value = leaf.value[0:lead_len] + docstring + leaf.value[tail_len:] normalize_string_quotes(leaf) yield from self.visit_default(leaf) @@ -2638,10 +2596,7 @@ def make_comment(content: str) -> str: def transform_line( - line: Line, - line_length: int, - normalize_strings: bool, - features: Collection[Feature] = (), + line: Line, mode: Mode, features: Collection[Feature] = () ) -> Iterator[Line]: """Transform a `line`, potentially splitting it into many lines. @@ -2657,7 +2612,7 @@ def transform_line( def init_st(ST: Type[StringTransformer]) -> StringTransformer: """Initialize StringTransformer""" - return ST(line_length, normalize_strings) + return ST(mode.line_length, mode.string_normalization) string_merge = init_st(StringMerger) string_paren_strip = init_st(StringParenStripper) @@ -2668,72 +2623,79 @@ def init_st(ST: Type[StringTransformer]) -> StringTransformer: if ( not line.contains_uncollapsable_type_comments() and not line.should_explode - and not line.is_collection_with_optional_trailing_comma and ( - is_line_short_enough(line, line_length=line_length, line_str=line_str) + is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) or line.contains_unsplittable_type_ignore() ) - and not (line.contains_standalone_comments() and line.inside_brackets) + and not (line.inside_brackets and line.contains_standalone_comments()) ): # Only apply basic string preprocessing, since lines shouldn't be split here. - transformers = [string_merge, string_paren_strip] + if mode.experimental_string_processing: + transformers = [string_merge, string_paren_strip] + else: + transformers = [] elif line.is_def: transformers = [left_hand_split] else: def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: - for omit in generate_trailers_to_omit(line, line_length): - lines = list(right_hand_split(line, line_length, features, omit=omit)) - if is_line_short_enough(lines[0], line_length=line_length): + """Wraps calls to `right_hand_split`. + + The calls increasingly `omit` right-hand trailers (bracket pairs with + content), meaning the trailers get glued together to split on another + bracket pair instead. + """ + for omit in generate_trailers_to_omit(line, mode.line_length): + lines = list( + right_hand_split(line, mode.line_length, features, omit=omit) + ) + # Note: this check is only able to figure out if the first line of the + # *current* transformation fits in the line length. This is true only + # for simple cases. All others require running more transforms via + # `transform_line()`. This check doesn't know if those would succeed. + if is_line_short_enough(lines[0], line_length=mode.line_length): yield from lines return # All splits failed, best effort split with no omits. # This mostly happens to multiline strings that are by definition - # reported as not fitting a single line. - # line_length=1 here was historically a bug that somehow became a feature. - # See #762 and #781 for the full story. - yield from right_hand_split(line, line_length=1, features=features) - - if line.inside_brackets: - transformers = [ - string_merge, - string_paren_strip, - delimiter_split, - standalone_comment_split, - string_split, - string_paren_wrap, - rhs, - ] + # reported as not fitting a single line, as well as lines that contain + # trailing commas (those have to be exploded). + yield from right_hand_split( + line, line_length=mode.line_length, features=features + ) + + if mode.experimental_string_processing: + if line.inside_brackets: + transformers = [ + string_merge, + string_paren_strip, + delimiter_split, + standalone_comment_split, + string_split, + string_paren_wrap, + rhs, + ] + else: + transformers = [ + string_merge, + string_paren_strip, + string_split, + string_paren_wrap, + rhs, + ] else: - transformers = [ - string_merge, - string_paren_strip, - string_split, - string_paren_wrap, - rhs, - ] + if line.inside_brackets: + transformers = [delimiter_split, standalone_comment_split, rhs] + else: + transformers = [rhs] for transform in transformers: # We are accumulating lines in `result` because we might want to abort # mission and return the original line in the end, or attempt a different # split altogether. - result: List[Line] = [] try: - for transformed_line in transform(line, features): - if str(transformed_line).strip("\n") == line_str: - raise CannotTransform( - "Line transformer returned an unchanged result" - ) - - result.extend( - transform_line( - transformed_line, - line_length=line_length, - normalize_strings=normalize_strings, - features=features, - ) - ) + result = run_transformer(line, transform, mode, features, line_str=line_str) except CannotTransform: continue else: @@ -2774,6 +2736,7 @@ class StringTransformer(ABC): line_length: int normalize_strings: bool + __name__ = "StringTransformer" @abstractmethod def do_match(self, line: Line) -> TMatchResult: @@ -3020,7 +2983,7 @@ def __remove_backslash_line_continuation_chars( ) new_line = line.clone() - new_line.comments = line.comments + new_line.comments = line.comments.copy() append_leaves(new_line, line, LL) new_string_leaf = new_line.leaves[string_idx] @@ -3238,7 +3201,9 @@ class StringParenStripper(StringTransformer): Requirements: The line contains a string which is surrounded by parentheses and: - The target string is NOT the only argument to a function call). - - The RPAR is NOT followed by an attribute access (i.e. a dot). + - If the target string contains a PERCENT, the brackets are not + preceeded or followed by an operator with higher precedence than + PERCENT. Transformations: The parentheses mentioned in the 'Requirements' section are stripped. @@ -3281,14 +3246,51 @@ def do_match(self, line: Line) -> TMatchResult: string_parser = StringParser() next_idx = string_parser.parse(LL, string_idx) + # if the leaves in the parsed string include a PERCENT, we need to + # make sure the initial LPAR is NOT preceded by an operator with + # higher or equal precedence to PERCENT + if is_valid_index(idx - 2): + # mypy can't quite follow unless we name this + before_lpar = LL[idx - 2] + if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( + ( + before_lpar.type + in { + token.STAR, + token.AT, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.TILDE, + token.DOUBLESTAR, + token.AWAIT, + token.LSQB, + token.LPAR, + } + ) + or ( + # only unary PLUS/MINUS + before_lpar.parent + and before_lpar.parent.type == syms.factor + and (before_lpar.type in {token.PLUS, token.MINUS}) + ) + ): + continue + # Should be followed by a non-empty RPAR... if ( is_valid_index(next_idx) and LL[next_idx].type == token.RPAR and not is_empty_rpar(LL[next_idx]) ): - # That RPAR should NOT be followed by a '.' symbol. - if is_valid_index(next_idx + 1) and LL[next_idx + 1].type == token.DOT: + # That RPAR should NOT be followed by anything with higher + # precedence than PERCENT + if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { + token.DOUBLESTAR, + token.LSQB, + token.LPAR, + token.DOT, + }: continue return Ok(string_idx) @@ -3309,7 +3311,6 @@ def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: new_line = line.clone() new_line.comments = line.comments.copy() - append_leaves(new_line, line, LL[: string_idx - 1]) string_leaf = Leaf(token.STRING, LL[string_idx].value) @@ -3318,7 +3319,7 @@ def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: new_line.append(string_leaf) append_leaves( - new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :], + new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :] ) LL[rpar_idx].remove() @@ -4594,8 +4595,6 @@ def append_leaves(new_line: Line, old_line: Line, leaves: List[Leaf]) -> None: set(@leaves) is a subset of set(@old_line.leaves). """ for old_leaf in leaves: - assert old_leaf in old_line.leaves - new_leaf = Leaf(old_leaf.type, old_leaf.value) replace_child(old_leaf, new_leaf) new_line.append(new_leaf) @@ -4755,8 +4754,7 @@ def right_hand_split( tail = bracket_split_build_line(tail_leaves, line, opening_bracket) bracket_split_succeeded_or_raise(head, body, tail) if ( - # the body shouldn't be exploded - not body.should_explode + Feature.FORCE_OPTIONAL_PARENTHESES not in features # the opening bracket is an optional paren and opening_bracket.type == token.LPAR and not opening_bracket.value @@ -4769,7 +4767,7 @@ def right_hand_split( # there are no standalone comments in the body and not body.contains_standalone_comments(0) # and we can actually remove the parens - and can_omit_invisible_parens(body, line_length) + and can_omit_invisible_parens(body, line_length, omit_on_explode=omit) ): omit = {id(closing_bracket), *omit} try: @@ -4855,7 +4853,8 @@ def bracket_split_build_line( continue if leaves[i].type != token.COMMA: - leaves.insert(i + 1, Leaf(token.COMMA, ",")) + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) break # Populate the line @@ -4863,8 +4862,8 @@ def bracket_split_build_line( result.append(leaf, preformatted=True) for comment_after in original.comments_after(leaf): result.append(comment_after, preformatted=True) - if is_body: - result.should_explode = should_explode(result, opening_bracket) + if is_body and should_split_body_explode(result, opening_bracket): + result.should_explode = True return result @@ -4949,7 +4948,8 @@ def append_to_line(leaf: Leaf) -> Iterator[Line]: and current_line.leaves[-1].type != token.COMMA and current_line.leaves[-1].type != STANDALONE_COMMENT ): - current_line.append(Leaf(token.COMMA, ",")) + new_comma = Leaf(token.COMMA, ",") + current_line.append(new_comma) yield current_line @@ -5571,24 +5571,63 @@ def ensure_visible(leaf: Leaf) -> None: leaf.value = ")" -def should_explode(line: Line, opening_bracket: Leaf) -> bool: - """Should `line` immediately be split with `delimiter_split()` after RHS?""" +def should_split_body_explode(line: Line, opening_bracket: Leaf) -> bool: + """Should `line` be immediately split with `delimiter_split()` after RHS?""" - if not ( - opening_bracket.parent - and opening_bracket.parent.type in {syms.atom, syms.import_from} - and opening_bracket.value in "[{(" - ): + if not (opening_bracket.parent and opening_bracket.value in "[{("): return False + # We're essentially checking if the body is delimited by commas and there's more + # than one of them (we're excluding the trailing comma and if the delimiter priority + # is still commas, that means there's more). + exclude = set() + trailing_comma = False try: last_leaf = line.leaves[-1] - exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set() + if last_leaf.type == token.COMMA: + trailing_comma = True + exclude.add(id(last_leaf)) max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) except (IndexError, ValueError): return False - return max_priority == COMMA_PRIORITY + return max_priority == COMMA_PRIORITY and ( + trailing_comma + # always explode imports + or opening_bracket.parent.type in {syms.atom, syms.import_from} + ) + + +def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool: + """Return True if content between `opening` and `closing` looks like a one-tuple.""" + if opening.type != token.LPAR and closing.type != token.RPAR: + return False + + depth = closing.bracket_depth + 1 + for _opening_index, leaf in enumerate(leaves): + if leaf is opening: + break + + else: + raise LookupError("Opening paren not found in `leaves`") + + commas = 0 + _opening_index += 1 + for leaf in leaves[_opening_index:]: + if leaf is closing: + break + + bracket_depth = leaf.bracket_depth + if bracket_depth == depth and leaf.type == token.COMMA: + commas += 1 + if leaf.parent and leaf.parent.type in { + syms.arglist, + syms.typedargslist, + }: + commas += 1 + break + + return commas < 2 def get_features_used(node: Node) -> Set[Feature]: @@ -5655,11 +5694,13 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf a preceding closing bracket fits in one line. Yielded sets are cumulative (contain results of previous yields, too). First - set is empty. + set is empty, unless the line should explode, in which case bracket pairs until + the one that needs to explode are omitted. """ omit: Set[LeafID] = set() - yield omit + if not line.should_explode: + yield omit length = 4 * line.depth opening_bracket: Optional[Leaf] = None @@ -5678,9 +5719,23 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf if leaf is opening_bracket: opening_bracket = None elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if ( + line.should_explode + and prev + and prev.type == token.COMMA + and not is_one_tuple_between( + leaf.opening_bracket, leaf, line.leaves + ) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + inner_brackets.add(id(leaf)) elif leaf.type in CLOSING_BRACKETS: - if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if prev and prev.type in OPENING_BRACKETS: # Empty brackets would fail a split so treat them as "inner" # brackets (e.g. only add them to the `omit` set if another # pair of brackets was good enough. @@ -5693,6 +5748,16 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf inner_brackets.clear() yield omit + if ( + line.should_explode + and prev + and prev.type == token.COMMA + and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + if leaf.value: opening_bracket = leaf.opening_bracket closing_bracket = leaf @@ -5759,16 +5824,40 @@ def get_gitignore(root: Path) -> PathSpec: return PathSpec.from_lines("gitwildmatch", lines) +def normalize_path_maybe_ignore( + path: Path, root: Path, report: "Report" +) -> Optional[str]: + """Normalize `path`. May return `None` if `path` was ignored. + + `report` is where "path ignored" output goes. + """ + try: + normalized_path = path.resolve().relative_to(root).as_posix() + except OSError as e: + report.path_ignored(path, f"cannot be read because {e}") + return None + + except ValueError: + if path.is_symlink(): + report.path_ignored(path, f"is a symbolic link that points outside {root}") + return None + + raise + + return normalized_path + + def gen_python_files( paths: Iterable[Path], root: Path, include: Optional[Pattern[str]], - exclude_regexes: Iterable[Pattern[str]], + exclude: Pattern[str], + force_exclude: Optional[Pattern[str]], report: "Report", gitignore: PathSpec, ) -> Iterator[Path]: """Generate all files under `path` whose paths are not excluded by the - `exclude` regex, but are included by the `include` regex. + `exclude_regex` or `force_exclude` regexes, but are included by the `include` regex. Symbolic links pointing outside of the `root` directory are ignored. @@ -5776,43 +5865,41 @@ def gen_python_files( """ assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" for child in paths: - # Then ignore with `exclude` option. - try: - normalized_path = child.resolve().relative_to(root).as_posix() - except OSError as e: - report.path_ignored(child, f"cannot be read because {e}") + normalized_path = normalize_path_maybe_ignore(child, root, report) + if normalized_path is None: continue - except ValueError: - if child.is_symlink(): - report.path_ignored( - child, f"is a symbolic link that points outside {root}" - ) - continue - - raise # First ignore files matching .gitignore if gitignore.match_file(normalized_path): report.path_ignored(child, "matches the .gitignore file content") continue + # Then ignore with `--exclude` and `--force-exclude` options. normalized_path = "/" + normalized_path if child.is_dir(): normalized_path += "/" - is_excluded = False - for exclude in exclude_regexes: - exclude_match = exclude.search(normalized_path) if exclude else None - if exclude_match and exclude_match.group(0): - report.path_ignored(child, "matches the --exclude regular expression") - is_excluded = True - break - if is_excluded: + exclude_match = exclude.search(normalized_path) if exclude else None + if exclude_match and exclude_match.group(0): + report.path_ignored(child, "matches the --exclude regular expression") + continue + + force_exclude_match = ( + force_exclude.search(normalized_path) if force_exclude else None + ) + if force_exclude_match and force_exclude_match.group(0): + report.path_ignored(child, "matches the --force-exclude regular expression") continue if child.is_dir(): yield from gen_python_files( - child.iterdir(), root, include, exclude_regexes, report, gitignore + child.iterdir(), + root, + include, + exclude, + force_exclude, + report, + gitignore, ) elif child.is_file(): @@ -5825,8 +5912,8 @@ def gen_python_files( def find_project_root(srcs: Iterable[str]) -> Path: """Return a directory containing .git, .hg, or pyproject.toml. - That directory can be one of the directories passed in `srcs` or their - common parent. + That directory will be a common parent of all files and directories + passed in `srcs`. If no directory in the tree contains a marker that would specify it's the project root, the root of the file system is returned. @@ -5834,11 +5921,20 @@ def find_project_root(srcs: Iterable[str]) -> Path: if not srcs: return Path("/").resolve() - common_base = min(Path(src).resolve() for src in srcs) - if common_base.is_dir(): - # Append a fake file so `parents` below returns `common_base_dir`, too. - common_base /= "fake-file" - for directory in common_base.parents: + path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): if (directory / ".git").exists(): return directory @@ -6023,7 +6119,7 @@ def _stringify_ast( and field == "value" and isinstance(value, str) ): - normalized = re.sub(r" *\n[ \t]+", "\n ", value).strip() + normalized = re.sub(r" *\n[ \t]*", "\n", value).strip() else: normalized = value yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" @@ -6067,6 +6163,7 @@ def assert_stable(src: str, dst: str, mode: Mode) -> None: newdst = format_str(dst, mode=mode) if dst != newdst: log = dump_to_file( + str(mode), diff(src, dst, "source", "first pass"), diff(dst, newdst, "first pass", "second pass"), ) @@ -6243,7 +6340,11 @@ def can_be_split(line: Line) -> bool: return True -def can_omit_invisible_parens(line: Line, line_length: int) -> bool: +def can_omit_invisible_parens( + line: Line, + line_length: int, + omit_on_explode: Collection[LeafID] = (), +) -> bool: """Does `line` have a shape safe to reformat without optional parens around it? Returns True for only a subset of potentially nice looking formattings but @@ -6266,37 +6367,27 @@ def can_omit_invisible_parens(line: Line, line_length: int) -> bool: assert len(line.leaves) >= 2, "Stranded delimiter" - first = line.leaves[0] - second = line.leaves[1] - penultimate = line.leaves[-2] - last = line.leaves[-1] - # With a single delimiter, omit if the expression starts or ends with # a bracket. + first = line.leaves[0] + second = line.leaves[1] if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: - remainder = False - length = 4 * line.depth - for _index, leaf, leaf_length in enumerate_with_length(line): - if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: - remainder = True - if remainder: - length += leaf_length - if length > line_length: - break - - if leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - remainder = False - - else: - # checked the entire string and line length wasn't exceeded - if len(line.leaves) == _index + 1: - return True + if _can_omit_opening_paren(line, first=first, line_length=line_length): + return True # Note: we are not returning False here because a line might have *both* # a leading opening bracket and a trailing closing bracket. If the # opening bracket doesn't match our rule, maybe the closing will. + penultimate = line.leaves[-2] + last = line.leaves[-1] + if line.should_explode: + try: + penultimate, last = last_two_except(line.leaves, omit=omit_on_explode) + except LookupError: + # Turns out we'd omit everything. We cannot skip the optional parentheses. + return False + if ( last.type == token.RPAR or last.type == token.RBRACE @@ -6317,21 +6408,120 @@ def can_omit_invisible_parens(line: Line, line_length: int) -> bool: # unnecessary. return True - length = 4 * line.depth - seen_other_brackets = False - for _index, leaf, leaf_length in enumerate_with_length(line): + if line.should_explode and penultimate.type == token.COMMA: + # The rightmost non-omitted bracket pair is the one we want to explode on. + return True + + if _can_omit_closing_paren(line, last=last, line_length=line_length): + return True + + return False + + +def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + remainder = False + length = 4 * line.depth + _index = -1 + for _index, leaf, leaf_length in enumerate_with_length(line): + if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: + remainder = True + if remainder: length += leaf_length - if leaf is last.opening_bracket: - if seen_other_brackets or length <= line_length: - return True + if length > line_length: + break - elif leaf.type in OPENING_BRACKETS: + if leaf.type in OPENING_BRACKETS: # There are brackets we can further split on. - seen_other_brackets = True + remainder = False + + else: + # checked the entire string and line length wasn't exceeded + if len(line.leaves) == _index + 1: + return True + + return False + + +def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + length = 4 * line.depth + seen_other_brackets = False + for _index, leaf, leaf_length in enumerate_with_length(line): + length += leaf_length + if leaf is last.opening_bracket: + if seen_other_brackets or length <= line_length: + return True + + elif leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + seen_other_brackets = True return False +def last_two_except(leaves: List[Leaf], omit: Collection[LeafID]) -> Tuple[Leaf, Leaf]: + """Return (penultimate, last) leaves skipping brackets in `omit` and contents.""" + stop_after = None + last = None + for leaf in reversed(leaves): + if stop_after: + if leaf is stop_after: + stop_after = None + continue + + if last: + return leaf, last + + if id(leaf) in omit: + stop_after = leaf.opening_bracket + else: + last = leaf + else: + raise LookupError("Last two leaves were also skipped") + + +def run_transformer( + line: Line, + transform: Transformer, + mode: Mode, + features: Collection[Feature], + *, + line_str: str = "", +) -> List[Line]: + if not line_str: + line_str = line_to_string(line) + result: List[Line] = [] + for transformed_line in transform(line, features): + if str(transformed_line).strip("\n") == line_str: + raise CannotTransform("Line transformer returned an unchanged result") + + result.extend(transform_line(transformed_line, mode=mode, features=features)) + + if not ( + transform.__name__ == "rhs" + and line.bracket_tracker.invisible + and not any(bracket.value for bracket in line.bracket_tracker.invisible) + and not line.contains_multiline_strings() + and not result[0].contains_uncollapsable_type_comments() + and not result[0].contains_unsplittable_type_ignore() + and not is_line_short_enough(result[0], line_length=mode.line_length) + ): + return result + + line_copy = line.clone() + append_leaves(line_copy, line, line.leaves) + features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES} + second_opinion = run_transformer( + line_copy, transform, mode, features_fop, line_str=line_str + ) + if all( + is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion + ): + result = second_opinion + return result + + def get_cache_file(mode: Mode) -> Path: return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" @@ -6417,6 +6607,26 @@ def patched_main() -> None: main() +def is_docstring(leaf: Leaf) -> bool: + if not is_multiline_string(leaf): + # For the purposes of docstring re-indentation, we don't need to do anything + # with single-line docstrings. + return False + + if prev_siblings_are( + leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] + ): + return True + + # Multiline docstring on the same line as the `def`. + if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): + # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python + # grammar. We're safe to return True without further checks. + return True + + return False + + def fix_docstring(docstring: str, prefix: str) -> str: # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation if not docstring: @@ -6440,7 +6650,6 @@ def fix_docstring(docstring: str, prefix: str) -> str: trimmed.append(prefix + stripped_line) else: trimmed.append("") - # Return a single string: return "\n".join(trimmed) diff --git a/src/black_primer/primer.json b/src/black_primer/primer.json index f5cc3fdf931..546f47782cd 100644 --- a/src/black_primer/primer.json +++ b/src/black_primer/primer.json @@ -30,7 +30,7 @@ "py_versions": ["all"] }, "django": { - "disabled_reason": "black --check --diff returned 123", + "disabled_reason": "black --check --diff returned 123 on two files", "disabled": true, "cli_arguments": [], "expect_formatting_changes": true, @@ -53,10 +53,10 @@ "py_versions": ["all"] }, "pandas": { - "disabled_reason": "black --check --diff returned 123", + "disabled_reason": "black --check --diff returned 123 on one file", "disabled": true, "cli_arguments": [], - "expect_formatting_changes": false, + "expect_formatting_changes": true, "git_clone_url": "https://github.com/pandas-dev/pandas.git", "long_checkout": false, "py_versions": ["all"] @@ -83,10 +83,8 @@ "py_versions": ["all"] }, "pytest": { - "disabled_reason": "black --check --diff returned 123", - "disabled": true, "cli_arguments": [], - "expect_formatting_changes": false, + "expect_formatting_changes": true, "git_clone_url": "https://github.com/pytest-dev/pytest.git", "long_checkout": false, "py_versions": ["all"] diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index 052c94883cf..81940f78f0f 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -128,7 +128,7 @@ def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL: return self.parse_stream_raw(stream, debug) def parse_file( - self, filename: Path, encoding: Optional[Text] = None, debug: bool = False, + self, filename: Path, encoding: Optional[Text] = None, debug: bool = False ) -> NL: """Parse a file and return the syntax tree.""" with io.open(filename, "r", encoding=encoding) as stream: diff --git a/tests/data/cantfit.py b/tests/data/cantfit.py index ef9b78e09a9..0849374f776 100644 --- a/tests/data/cantfit.py +++ b/tests/data/cantfit.py @@ -67,11 +67,15 @@ normal_name = ( but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying() ) -normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying( - arg1, arg2, arg3 +normal_name = ( + but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying( + arg1, arg2, arg3 + ) ) -normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying( - [1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3 +normal_name = ( + but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying( + [1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3 + ) ) # long arguments normal_name = normal_function_name( diff --git a/tests/data/collections.py b/tests/data/collections.py index ebe8d3c5200..68431665211 100644 --- a/tests/data/collections.py +++ b/tests/data/collections.py @@ -2,18 +2,18 @@ from . import A, B, C -# unwraps +# keeps existing trailing comma from foo import ( bar, ) -# stays wrapped +# also keeps existing structure from foo import ( baz, qux, ) -# as doesn't get confusing when unwrapped +# `as` works as well from foo import ( xyzzy as magic, ) @@ -77,17 +77,21 @@ from . import A, B, C -# unwraps -from foo import bar +# keeps existing trailing comma +from foo import ( + bar, +) -# stays wrapped +# also keeps existing structure from foo import ( baz, qux, ) -# as doesn't get confusing when unwrapped -from foo import xyzzy as magic +# `as` works as well +from foo import ( + xyzzy as magic, +) a = { 1, @@ -151,11 +155,20 @@ if True: ec2client.get_waiter("instance_stopped").wait( - InstanceIds=[instance.id], WaiterConfig={"Delay": 5,} + InstanceIds=[instance.id], + WaiterConfig={ + "Delay": 5, + }, ) ec2client.get_waiter("instance_stopped").wait( - InstanceIds=[instance.id], WaiterConfig={"Delay": 5,}, + InstanceIds=[instance.id], + WaiterConfig={ + "Delay": 5, + }, ) ec2client.get_waiter("instance_stopped").wait( - InstanceIds=[instance.id], WaiterConfig={"Delay": 5,}, + InstanceIds=[instance.id], + WaiterConfig={ + "Delay": 5, + }, ) diff --git a/tests/data/comments2.py b/tests/data/comments2.py index 89c29104bd8..221cb3fe143 100644 --- a/tests/data/comments2.py +++ b/tests/data/comments2.py @@ -316,7 +316,13 @@ def inline_comments_in_brackets_ruin_everything(): ) -CONFIG_FILES = [CONFIG_FILE,] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final +CONFIG_FILES = ( + [ + CONFIG_FILE, + ] + + SHARED_CONFIG_FILES + + USER_CONFIG_FILES +) # type: Final class Test: diff --git a/tests/data/comments7.py b/tests/data/comments7.py index 436df1a2a41..0e2bd35bf55 100644 --- a/tests/data/comments7.py +++ b/tests/data/comments7.py @@ -22,6 +22,12 @@ # resolve_to_config_type, # DEFAULT_TYPE_ATTRIBUTES, ) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent # NOT DRY +) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent as component # DRY +) result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @@ -46,6 +52,26 @@ def func(): 0.0789, a[-1], # type: ignore ) + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0789, + a[-1] # type: ignore + ) + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + a[-1] # type: ignore + ) # The type: ignore exception only applies to line length, not # other types of formatting. @@ -55,6 +81,54 @@ def func(): ) +class C: + @pytest.mark.parametrize( + ("post_data", "message"), + [ + # metadata_version errors. + ( + {}, + "None is an invalid value for Metadata-Version. Error: This field is" + " required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ( + {"metadata_version": "-1"}, + "'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata" + " Version see" + " https://packaging.python.org/specifications/core-metadata" + ), + # name errors. + ( + {"metadata_version": "1.2"}, + "'' is an invalid value for Name. Error: This field is required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ( + {"metadata_version": "1.2", "name": "foo-"}, + "'foo-' is an invalid value for Name. Error: Must start and end with a" + " letter or numeral and contain only ascii numeric and '.', '_' and" + " '-'. see https://packaging.python.org/specifications/core-metadata" + ), + # version errors. + ( + {"metadata_version": "1.2", "name": "example"}, + "'' is an invalid value for Version. Error: This field is required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ( + {"metadata_version": "1.2", "name": "example", "version": "dog"}, + "'dog' is an invalid value for Version. Error: Must start and end with" + " a letter or numeral and contain only ascii numeric and '.', '_' and" + " '-'. see https://packaging.python.org/specifications/core-metadata" + ) + ] + ) + def test_fails_invalid_post_data( + self, pyramid_config, db_request, post_data, message + ): + ... + # output from .config import ( @@ -81,6 +155,12 @@ def func(): # resolve_to_config_type, # DEFAULT_TYPE_ATTRIBUTES, ) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent, # NOT DRY +) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent as component, # DRY +) result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @@ -97,7 +177,27 @@ def func(): def func(): - c = call(0.0123, 0.0456, 0.0789, 0.0123, 0.0789, a[-1],) # type: ignore + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0789, + a[-1], # type: ignore + ) + c = call(0.0123, 0.0456, 0.0789, 0.0123, 0.0789, a[-1]) # type: ignore + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + a[-1], # type: ignore + ) # The type: ignore exception only applies to line length, not # other types of formatting. @@ -115,3 +215,52 @@ def func(): "aaaaaaaa", "aaaaaaaa", ) + + +class C: + @pytest.mark.parametrize( + ("post_data", "message"), + [ + # metadata_version errors. + ( + {}, + "None is an invalid value for Metadata-Version. Error: This field is" + " required. see" + " https://packaging.python.org/specifications/core-metadata", + ), + ( + {"metadata_version": "-1"}, + "'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata" + " Version see" + " https://packaging.python.org/specifications/core-metadata", + ), + # name errors. + ( + {"metadata_version": "1.2"}, + "'' is an invalid value for Name. Error: This field is required. see" + " https://packaging.python.org/specifications/core-metadata", + ), + ( + {"metadata_version": "1.2", "name": "foo-"}, + "'foo-' is an invalid value for Name. Error: Must start and end with a" + " letter or numeral and contain only ascii numeric and '.', '_' and" + " '-'. see https://packaging.python.org/specifications/core-metadata", + ), + # version errors. + ( + {"metadata_version": "1.2", "name": "example"}, + "'' is an invalid value for Version. Error: This field is required. see" + " https://packaging.python.org/specifications/core-metadata", + ), + ( + {"metadata_version": "1.2", "name": "example", "version": "dog"}, + "'dog' is an invalid value for Version. Error: Must start and end with" + " a letter or numeral and contain only ascii numeric and '.', '_' and" + " '-'. see https://packaging.python.org/specifications/core-metadata", + ), + ], + ) + def test_fails_invalid_post_data( + self, pyramid_config, db_request, post_data, message + ): + ... \ No newline at end of file diff --git a/tests/data/composition_no_trailing_comma.py b/tests/data/composition_no_trailing_comma.py new file mode 100644 index 00000000000..f17b89dea8d --- /dev/null +++ b/tests/data/composition_no_trailing_comma.py @@ -0,0 +1,367 @@ +class C: + def test(self) -> None: + with patch("black.out", print): + self.assertEqual( + unstyle(str(report)), "1 file reformatted, 1 file failed to reformat." + ) + self.assertEqual( + unstyle(str(report)), + "1 file reformatted, 1 file left unchanged, 1 file failed to reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 1 file left unchanged, 1 file failed to" + " reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", + ) + for i in (a,): + if ( + # Rule 1 + i % 2 == 0 + # Rule 2 + and i % 3 == 0 + ): + while ( + # Just a comment + call() + # Another + ): + print(i) + xxxxxxxxxxxxxxxx = Yyyy2YyyyyYyyyyy( + push_manager=context.request.resource_manager, + max_items_to_push=num_items, + batch_size=Yyyy2YyyyYyyyyYyyy.FULL_SIZE + ).push( + # Only send the first n items. + items=items[:num_items] + ) + return ( + 'Utterly failed doctest test for %s\n File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def omitting_trailers(self) -> None: + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex] + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex][TwoLevelIndex][ThreeLevelIndex][FourLevelIndex] + d[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15][16][17][18][19][20][21][ + 22 + ] + assignment = ( + some.rather.elaborate.rule() and another.rule.ending_with.index[123] + ) + + def easy_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } == expected, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } + + def tricky_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } == expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ), "Not what we expected" + + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } == expected, ( + "Not what we expected and the message is too long to fit in one line" + ) + + assert expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ) == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + }, ( + "Not what we expected and the message is too long to fit in one line" + " because it's too long" + ) + + dis_c_instance_method = """\ + %3d 0 LOAD_FAST 1 (x) + 2 LOAD_CONST 1 (1) + 4 COMPARE_OP 2 (==) + 6 LOAD_FAST 0 (self) + 8 STORE_ATTR 0 (x) + 10 LOAD_CONST 0 (None) + 12 RETURN_VALUE + """ % ( + _C.__init__.__code__.co_firstlineno + 1, + ) + + assert ( + expectedexpectedexpectedexpectedexpectedexpectedexpectedexpectedexpect + == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } + ) + + + +# output + +class C: + def test(self) -> None: + with patch("black.out", print): + self.assertEqual( + unstyle(str(report)), "1 file reformatted, 1 file failed to reformat." + ) + self.assertEqual( + unstyle(str(report)), + "1 file reformatted, 1 file left unchanged, 1 file failed to reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 1 file left unchanged, 1 file failed to" + " reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", + ) + for i in (a,): + if ( + # Rule 1 + i % 2 == 0 + # Rule 2 + and i % 3 == 0 + ): + while ( + # Just a comment + call() + # Another + ): + print(i) + xxxxxxxxxxxxxxxx = Yyyy2YyyyyYyyyyy( + push_manager=context.request.resource_manager, + max_items_to_push=num_items, + batch_size=Yyyy2YyyyYyyyyYyyy.FULL_SIZE, + ).push( + # Only send the first n items. + items=items[:num_items] + ) + return ( + 'Utterly failed doctest test for %s\n File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def omitting_trailers(self) -> None: + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex] + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex][TwoLevelIndex][ThreeLevelIndex][FourLevelIndex] + d[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15][16][17][18][19][20][21][ + 22 + ] + assignment = ( + some.rather.elaborate.rule() and another.rule.ending_with.index[123] + ) + + def easy_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } == expected, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } + + def tricky_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } == expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ), "Not what we expected" + + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } == expected, ( + "Not what we expected and the message is too long to fit in one line" + ) + + assert expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ) == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + }, ( + "Not what we expected and the message is too long to fit in one line" + " because it's too long" + ) + + dis_c_instance_method = """\ + %3d 0 LOAD_FAST 1 (x) + 2 LOAD_CONST 1 (1) + 4 COMPARE_OP 2 (==) + 6 LOAD_FAST 0 (self) + 8 STORE_ATTR 0 (x) + 10 LOAD_CONST 0 (None) + 12 RETURN_VALUE + """ % ( + _C.__init__.__code__.co_firstlineno + 1, + ) + + assert ( + expectedexpectedexpectedexpectedexpectedexpectedexpectedexpectedexpect + == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } + ) diff --git a/tests/data/docstring.py b/tests/data/docstring.py index f5adeb7bb7b..2d3d73a101c 100644 --- a/tests/data/docstring.py +++ b/tests/data/docstring.py @@ -46,7 +46,7 @@ def zort(): def poit(): """ - Lorem ipsum dolor sit amet. + Lorem ipsum dolor sit amet. Consectetur adipiscing elit: - sed do eiusmod tempor incididunt ut labore @@ -58,6 +58,14 @@ def poit(): pass +def under_indent(): + """ + These lines are indented in a way that does not +make sense. + """ + pass + + def over_indent(): """ This has a shallow indent @@ -73,6 +81,35 @@ def single_line(): """ pass + +def this(): + r""" + 'hey ho' + """ + + +def that(): + """ "hey yah" """ + + +def and_that(): + """ + "hey yah" """ + + +def and_this(): + ''' + "hey yah"''' + + +def believe_it_or_not_this_is_in_the_py_stdlib(): ''' +"hey yah"''' + + +def ignored_docstring(): + """a => \ +b""" + # output class MyClass: @@ -136,6 +173,14 @@ def poit(): pass +def under_indent(): + """ + These lines are indented in a way that does not + make sense. + """ + pass + + def over_indent(): """ This has a shallow indent @@ -148,3 +193,33 @@ def over_indent(): def single_line(): """But with a newline after it!""" pass + + +def this(): + r""" + 'hey ho' + """ + + +def that(): + """ "hey yah" """ + + +def and_that(): + """ + "hey yah" """ + + +def and_this(): + ''' + "hey yah"''' + + +def believe_it_or_not_this_is_in_the_py_stdlib(): + ''' + "hey yah"''' + + +def ignored_docstring(): + """a => \ +b""" \ No newline at end of file diff --git a/tests/data/expression.diff b/tests/data/expression.diff index f47ee1c6d2c..684f92cd3b7 100644 --- a/tests/data/expression.diff +++ b/tests/data/expression.diff @@ -130,15 +130,21 @@ call(**self.screen_kwargs) call(b, **self.screen_kwargs) lukasz.langa.pl -@@ -94,23 +127,25 @@ +@@ -94,26 +127,29 @@ 1.0 .real ....__class__ list[str] dict[str, int] tuple[str, ...] ++tuple[str, int, float, dict[str, int]] + tuple[ +- str, int, float, dict[str, int] +-] -tuple[str, int, float, dict[str, int],] -+tuple[ -+ str, int, float, dict[str, int], ++ str, ++ int, ++ float, ++ dict[str, int], +] very_long_variable_name_filters: t.List[ t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]], @@ -160,7 +166,7 @@ slice[0:1:2] slice[:] slice[:-1] -@@ -134,112 +169,170 @@ +@@ -137,113 +173,180 @@ numpy[-(c + 1) :, d] numpy[:, l[-2]] numpy[:, ::-1] @@ -200,6 +206,7 @@ g = 1, *"ten" -what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(vars_to_remove) -what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove) +-result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc()).all() -result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc(),).all() +what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set( + vars_to_remove @@ -212,7 +219,17 @@ + .filter( + models.Customer.account_id == account_id, models.Customer.email == email_address + ) -+ .order_by(models.Customer.id.asc(),) ++ .order_by(models.Customer.id.asc()) ++ .all() ++) ++result = ( ++ session.query(models.Customer.id) ++ .filter( ++ models.Customer.account_id == account_id, models.Customer.email == email_address ++ ) ++ .order_by( ++ models.Customer.id.asc(), ++ ) + .all() +) Ø = set() diff --git a/tests/data/expression.py b/tests/data/expression.py index 6a04db8b1c4..8e63bdcdf9b 100644 --- a/tests/data/expression.py +++ b/tests/data/expression.py @@ -96,6 +96,9 @@ list[str] dict[str, int] tuple[str, ...] +tuple[ + str, int, float, dict[str, int] +] tuple[str, int, float, dict[str, int],] very_long_variable_name_filters: t.List[ t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]], @@ -157,6 +160,7 @@ g = 1, *"ten" what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(vars_to_remove) what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove) +result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc()).all() result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc(),).all() Ø = set() authors.łukasz.say_thanks() @@ -379,8 +383,12 @@ async def f(): list[str] dict[str, int] tuple[str, ...] +tuple[str, int, float, dict[str, int]] tuple[ - str, int, float, dict[str, int], + str, + int, + float, + dict[str, int], ] very_long_variable_name_filters: t.List[ t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]], @@ -459,7 +467,17 @@ async def f(): .filter( models.Customer.account_id == account_id, models.Customer.email == email_address ) - .order_by(models.Customer.id.asc(),) + .order_by(models.Customer.id.asc()) + .all() +) +result = ( + session.query(models.Customer.id) + .filter( + models.Customer.account_id == account_id, models.Customer.email == email_address + ) + .order_by( + models.Customer.id.asc(), + ) .all() ) Ø = set() diff --git a/tests/data/fmtonoff4.py b/tests/data/fmtonoff4.py index 54673c06b2d..4ca707965ad 100644 --- a/tests/data/fmtonoff4.py +++ b/tests/data/fmtonoff4.py @@ -25,7 +25,12 @@ def f(): @test( - [1, 2, 3, 4,] + [ + 1, + 2, + 3, + 4, + ] ) def f(): pass diff --git a/tests/data/function.py b/tests/data/function.py index 51234a1e9b4..2d642c8731b 100644 --- a/tests/data/function.py +++ b/tests/data/function.py @@ -230,7 +230,10 @@ def trailing_comma(): } -def f(a, **kwargs,) -> A: +def f( + a, + **kwargs, +) -> A: return ( yield from A( very_long_argument_name1=very_long_value_for_the_argument, diff --git a/tests/data/function2.py b/tests/data/function2.py index a6773d429cd..cfc259ea7bd 100644 --- a/tests/data/function2.py +++ b/tests/data/function2.py @@ -25,7 +25,10 @@ def inner(): # output -def f(a, **kwargs,) -> A: +def f( + a, + **kwargs, +) -> A: with cache_dir(): if something: result = CliRunner().invoke( diff --git a/tests/data/function_trailing_comma.py b/tests/data/function_trailing_comma.py index fcd81ad7d96..d15459cbeb5 100644 --- a/tests/data/function_trailing_comma.py +++ b/tests/data/function_trailing_comma.py @@ -1,25 +1,88 @@ def f(a,): - ... + d = {'key': 'value',} + tup = (1,) + +def f2(a,b,): + d = {'key': 'value', 'key2': 'value2',} + tup = (1,2,) def f(a:int=1,): - ... + call(arg={'explode': 'this',}) + call2(arg=[1,2,3],) + x = { + "a": 1, + "b": 2, + }["a"] + if a == {"a": 1,"b": 2,"c": 3,"d": 4,"e": 5,"f": 6,"g": 7,"h": 8,}["a"]: + pass def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ]: - pass + json = {"k": {"k2": {"k3": [1,]}}} # output -def f(a,): - ... +def f( + a, +): + d = { + "key": "value", + } + tup = (1,) + + +def f2( + a, + b, +): + d = { + "key": "value", + "key2": "value2", + } + tup = ( + 1, + 2, + ) -def f(a: int = 1,): - ... +def f( + a: int = 1, +): + call( + arg={ + "explode": "this", + } + ) + call2( + arg=[1, 2, 3], + ) + x = { + "a": 1, + "b": 2, + }["a"] + if a == { + "a": 1, + "b": 2, + "c": 3, + "d": 4, + "e": 5, + "f": 6, + "g": 7, + "h": 8, + }["a"]: + pass def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ]: - pass + json = { + "k": { + "k2": { + "k3": [ + 1, + ] + } + } + } \ No newline at end of file diff --git a/tests/data/import_spacing.py b/tests/data/import_spacing.py index 51cfda23ff3..8e6e23cc348 100644 --- a/tests/data/import_spacing.py +++ b/tests/data/import_spacing.py @@ -2,6 +2,9 @@ # flake8: noqa +from logging import ( + WARNING +) from logging import ( ERROR, ) @@ -53,7 +56,10 @@ # flake8: noqa -from logging import ERROR +from logging import WARNING +from logging import ( + ERROR, +) import sys # This relies on each of the submodules having an __all__ variable. diff --git a/tests/data/long_strings.py b/tests/data/long_strings.py index 5da460b65c0..e1ed90f22de 100644 --- a/tests/data/long_strings.py +++ b/tests/data/long_strings.py @@ -137,6 +137,20 @@ ), # comment after comma ) +func_with_bad_parens_that_wont_fit_in_one_line( + ("short string that should have parens stripped"), + x, + y, + z +) + +func_with_bad_parens_that_wont_fit_in_one_line( + x, + y, + ("short string that should have parens stripped"), + z +) + func_with_bad_parens( ("short string that should have parens stripped"), x, @@ -487,12 +501,26 @@ def foo(): " which should NOT be there.", # comment after comma ) +func_with_bad_parens_that_wont_fit_in_one_line( + "short string that should have parens stripped", x, y, z +) + +func_with_bad_parens_that_wont_fit_in_one_line( + x, y, "short string that should have parens stripped", z +) + func_with_bad_parens( - "short string that should have parens stripped", x, y, z, + "short string that should have parens stripped", + x, + y, + z, ) func_with_bad_parens( - x, y, "short string that should have parens stripped", z, + x, + y, + "short string that should have parens stripped", + z, ) annotated_variable: Final = ( diff --git a/tests/data/long_strings__regression.py b/tests/data/long_strings__regression.py index 8dbc58a4315..044bb4a5deb 100644 --- a/tests/data/long_strings__regression.py +++ b/tests/data/long_strings__regression.py @@ -528,17 +528,23 @@ def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx): xxxxxxxx = [ xxxxxxxxxxxxxxxx( "xxxx", - xxxxxxxxxxx={"xxxx": 1.0,}, + xxxxxxxxxxx={ + "xxxx": 1.0, + }, xxxxxx={"xxxxxx 1": xxxxxx(xxxx="xxxxxx 1", xxxxxx=600.0)}, xxxxxxxx_xxxxxxx=0.0, ), xxxxxxxxxxxxxxxx( "xxxxxxx", - xxxxxxxxxxx={"xxxx": 1.0,}, + xxxxxxxxxxx={ + "xxxx": 1.0, + }, xxxxxx={"xxxxxx 1": xxxxxx(xxxx="xxxxxx 1", xxxxxx=200.0)}, xxxxxxxx_xxxxxxx=0.0, ), - xxxxxxxxxxxxxxxx("xxxx",), + xxxxxxxxxxxxxxxx( + "xxxx", + ), ] diff --git a/tests/data/long_strings_flag_disabled.py b/tests/data/long_strings_flag_disabled.py new file mode 100644 index 00000000000..ef3094fd779 --- /dev/null +++ b/tests/data/long_strings_flag_disabled.py @@ -0,0 +1,289 @@ +x = "This is a really long string that can't possibly be expected to fit all together on one line. In fact it may even take up three or more lines... like four or five... but probably just three." + +x += "This is a really long string that can't possibly be expected to fit all together on one line. In fact it may even take up three or more lines... like four or five... but probably just three." + +y = "Short string" + +print( + "This is a really long string inside of a print statement with extra arguments attached at the end of it.", + x, + y, + z, +) + +print( + "This is a really long string inside of a print statement with no extra arguments attached at the end of it." +) + +D1 = { + "The First": "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a dictionary, so formatting is more difficult.", + "The Second": "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a dictionary.", +} + +D2 = { + 1.0: "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a dictionary, so formatting is more difficult.", + 2.0: "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a dictionary.", +} + +D3 = { + x: "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a dictionary, so formatting is more difficult.", + y: "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a dictionary.", +} + +D4 = { + "A long and ridiculous {}".format( + string_key + ): "This is a really really really long string that has to go i,side of a dictionary. It is soooo bad.", + some_func( + "calling", "some", "stuff" + ): "This is a really really really long string that has to go inside of a dictionary. It is {soooo} bad (#{x}).".format( + sooo="soooo", x=2 + ), + "A %s %s" + % ( + "formatted", + "string", + ): "This is a really really really long string that has to go inside of a dictionary. It is %s bad (#%d)." + % ("soooo", 2), +} + +func_with_keywords( + my_arg, + my_kwarg="Long keyword strings also need to be wrapped, but they will probably need to be handled a little bit differently.", +) + +bad_split1 = ( + "But what should happen when code has already been formatted but in the wrong way? Like" + " with a space at the end instead of the beginning. Or what about when it is split too soon?" +) + +bad_split2 = ( + "But what should happen when code has already " + "been formatted but in the wrong way? Like " + "with a space at the end instead of the " + "beginning. Or what about when it is split too " + "soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split." +) + +bad_split3 = ( + "What if we have inline comments on " # First Comment + "each line of a bad split? In that " # Second Comment + "case, we should just leave it alone." # Third Comment +) + +bad_split_func1( + "But what should happen when code has already " + "been formatted but in the wrong way? Like " + "with a space at the end instead of the " + "beginning. Or what about when it is split too " + "soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split.", + xxx, + yyy, + zzz, +) + +bad_split_func2( + xxx, + yyy, + zzz, + long_string_kwarg="But what should happen when code has already been formatted but in the wrong way? Like " + "with a space at the end instead of the beginning. Or what about when it is split too " + "soon?", +) + +bad_split_func3( + ( + "But what should happen when code has already " + r"been formatted but in the wrong way? Like " + "with a space at the end instead of the " + r"beginning. Or what about when it is split too " + r"soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split." + ), + xxx, + yyy, + zzz, +) + +raw_string = r"This is a long raw string. When re-formatting this string, black needs to make sure it prepends the 'r' onto the new string." + +fmt_string1 = "We also need to be sure to preserve any and all {} which may or may not be attached to the string in question.".format( + "method calls" +) + +fmt_string2 = "But what about when the string is {} but {}".format( + "short", + "the method call is really really really really really really really really long?", +) + +old_fmt_string1 = ( + "While we are on the topic of %s, we should also note that old-style formatting must also be preserved, since some %s still uses it." + % ("formatting", "code") +) + +old_fmt_string2 = "This is a %s %s %s %s" % ( + "really really really really really", + "old", + "way to format strings!", + "Use f-strings instead!", +) + +old_fmt_string3 = "Whereas only the strings after the percent sign were long in the last example, this example uses a long initial string as well. This is another %s %s %s %s" % ( + "really really really really really", + "old", + "way to format strings!", + "Use f-strings instead!", +) + +fstring = f"f-strings definitely make things more {difficult} than they need to be for {{black}}. But boy they sure are handy. The problem is that some lines will need to have the 'f' whereas others do not. This {line}, for example, needs one." + +fstring_with_no_fexprs = f"Some regular string that needs to get split certainly but is NOT an fstring by any means whatsoever." + +comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top. + +arg_comment_string = print( + "Long lines with inline comments which are apart of (and not the only member of) an argument list should have their comments appended to the reformatted string's enclosing left parentheses.", # This comment stays on the bottom. + "Arg #2", + "Arg #3", + "Arg #4", + "Arg #5", +) + +pragma_comment_string1 = "Lines which end with an inline pragma comment of the form `# : <...>` should be left alone." # noqa: E501 + +pragma_comment_string2 = "Lines which end with an inline pragma comment of the form `# : <...>` should be left alone." # noqa + +"""This is a really really really long triple quote string and it should not be touched.""" + +triple_quote_string = """This is a really really really long triple quote string assignment and it should not be touched.""" + +assert ( + some_type_of_boolean_expression +), "Followed by a really really really long string that is used to provide context to the AssertionError exception." + +assert ( + some_type_of_boolean_expression +), "Followed by a really really really long string that is used to provide context to the AssertionError exception, which uses dynamic string {}.".format( + "formatting" +) + +assert some_type_of_boolean_expression, ( + "Followed by a really really really long string that is used to provide context to the AssertionError exception, which uses dynamic string %s." + % "formatting" +) + +assert some_type_of_boolean_expression, ( + "Followed by a really really really long string that is used to provide context to the AssertionError exception, which uses dynamic %s %s." + % ("string", "formatting") +) + +some_function_call( + "With a reallly generic name and with a really really long string that is, at some point down the line, " + + added + + " to a variable and then added to another string." +) + +some_function_call( + "With a reallly generic name and with a really really long string that is, at some point down the line, " + + added + + " to a variable and then added to another string. But then what happens when the final string is also supppppperrrrr long?! Well then that second (realllllllly long) string should be split too.", + "and a second argument", + and_a_third, +) + +return "A really really really really really really really really really really really really really long {} {}".format( + "return", "value" +) + +func_with_bad_comma( + "This is a really long string argument to a function that has a trailing comma which should NOT be there.", +) + +func_with_bad_comma( + "This is a really long string argument to a function that has a trailing comma which should NOT be there.", # comment after comma +) + +func_with_bad_comma( + ( + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there." + ), +) + +func_with_bad_comma( + ( + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there." + ), # comment after comma +) + +func_with_bad_parens_that_wont_fit_in_one_line( + ("short string that should have parens stripped"), x, y, z +) + +func_with_bad_parens_that_wont_fit_in_one_line( + x, y, ("short string that should have parens stripped"), z +) + +func_with_bad_parens( + ("short string that should have parens stripped"), + x, + y, + z, +) + +func_with_bad_parens( + x, + y, + ("short string that should have parens stripped"), + z, +) + +annotated_variable: Final = ( + "This is a large " + + STRING + + " that has been " + + CONCATENATED + + "using the '+' operator." +) +annotated_variable: Final = "This is a large string that has a type annotation attached to it. A type annotation should NOT stop a long string from being wrapped." +annotated_variable: Literal[ + "fakse_literal" +] = "This is a large string that has a type annotation attached to it. A type annotation should NOT stop a long string from being wrapped." + +backslashes = "This is a really long string with \"embedded\" double quotes and 'single' quotes that also handles checking for an even number of backslashes \\" +backslashes = "This is a really long string with \"embedded\" double quotes and 'single' quotes that also handles checking for an even number of backslashes \\\\" +backslashes = "This is a really 'long' string with \"embedded double quotes\" and 'single' quotes that also handles checking for an odd number of backslashes \\\", like this...\\\\\\" + +short_string = "Hi" " there." + +func_call(short_string=("Hi" " there.")) + +raw_strings = r"Don't" " get" r" merged" " unless they are all raw." + + +def foo(): + yield "This is a really long string that can't possibly be expected to fit all together on one line. In fact it may even take up three or more lines... like four or five... but probably just three." + + +x = f"This is a {{really}} long string that needs to be split without a doubt (i.e. most definitely). In short, this {string} that can't possibly be {{expected}} to fit all together on one line. In {fact} it may even take up three or more lines... like four or five... but probably just four." + +long_unmergable_string_with_pragma = ( + "This is a really long string that can't be merged because it has a likely pragma at the end" # type: ignore + " of it." +) + +long_unmergable_string_with_pragma = ( + "This is a really long string that can't be merged because it has a likely pragma at the end" # noqa + " of it." +) + +long_unmergable_string_with_pragma = ( + "This is a really long string that can't be merged because it has a likely pragma at the end" # pylint: disable=some-pylint-check + " of it." +) diff --git a/tests/data/percent_precedence.py b/tests/data/percent_precedence.py new file mode 100644 index 00000000000..b895443fb46 --- /dev/null +++ b/tests/data/percent_precedence.py @@ -0,0 +1,41 @@ +("" % a) ** 2 +("" % a)[0] +("" % a)() +("" % a).b + +2 * ("" % a) +2 @ ("" % a) +2 / ("" % a) +2 // ("" % a) +2 % ("" % a) ++("" % a) +b + ("" % a) +-("" % a) +b - ("" % a) +b + -("" % a) +~("" % a) +2 ** ("" % a) +await ("" % a) +b[("" % a)] +b(("" % a)) +# output +("" % a) ** 2 +("" % a)[0] +("" % a)() +("" % a).b + +2 * ("" % a) +2 @ ("" % a) +2 / ("" % a) +2 // ("" % a) +2 % ("" % a) ++("" % a) +b + "" % a +-("" % a) +b - "" % a +b + -("" % a) +~("" % a) +2 ** ("" % a) +await ("" % a) +b[("" % a)] +b(("" % a)) diff --git a/tests/data/trailing_comma_optional_parens1.py b/tests/data/trailing_comma_optional_parens1.py new file mode 100644 index 00000000000..5ad29a8affd --- /dev/null +++ b/tests/data/trailing_comma_optional_parens1.py @@ -0,0 +1,3 @@ +if e1234123412341234.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + pass \ No newline at end of file diff --git a/tests/data/trailing_comma_optional_parens2.py b/tests/data/trailing_comma_optional_parens2.py new file mode 100644 index 00000000000..2817073816e --- /dev/null +++ b/tests/data/trailing_comma_optional_parens2.py @@ -0,0 +1,3 @@ +if (e123456.get_tk_patchlevel() >= (8, 6, 0, 'final') or + (8, 5, 8) <= get_tk_patchlevel() < (8, 6)): + pass \ No newline at end of file diff --git a/tests/data/trailing_comma_optional_parens3.py b/tests/data/trailing_comma_optional_parens3.py new file mode 100644 index 00000000000..e6a673ec537 --- /dev/null +++ b/tests/data/trailing_comma_optional_parens3.py @@ -0,0 +1,8 @@ +if True: + if True: + if True: + return _( + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweas " + + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwegqweasdzxcqweasdzxc.", + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwe", + ) % {"reported_username": reported_username, "report_reason": report_reason} \ No newline at end of file diff --git a/tests/test_black.py b/tests/test_black.py index 88839d86c5a..f5d4e1115a8 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -3,14 +3,27 @@ import logging from concurrent.futures import ThreadPoolExecutor from contextlib import contextmanager +from dataclasses import replace from functools import partial +import inspect from io import BytesIO, TextIOWrapper import os from pathlib import Path import regex as re import sys from tempfile import TemporaryDirectory -from typing import Any, BinaryIO, Dict, Generator, List, Tuple, Iterator, TypeVar +import types +from typing import ( + Any, + BinaryIO, + Callable, + Dict, + Generator, + List, + Tuple, + Iterator, + TypeVar, +) import unittest from unittest.mock import patch, MagicMock @@ -36,8 +49,9 @@ from .test_primer import PrimerCLITests # noqa: F401 -ff = partial(black.format_file_in_place, mode=black.FileMode(), fast=True) -fs = partial(black.format_str, mode=black.FileMode()) +DEFAULT_MODE = black.FileMode(experimental_string_processing=True) +ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True) +fs = partial(black.format_str, mode=DEFAULT_MODE) THIS_FILE = Path(__file__) THIS_DIR = THIS_FILE.parent PROJECT_ROOT = THIS_DIR.parent @@ -110,6 +124,20 @@ def skip_if_exception(e: str) -> Iterator[None]: raise +class FakeContext(click.Context): + """A fake click Context for when calling functions that need it.""" + + def __init__(self) -> None: + self.default_map: Dict[str, Any] = {} + + +class FakeParameter(click.Parameter): + """A fake click Parameter for when calling functions that need it.""" + + def __init__(self) -> None: + pass + + class BlackRunner(CliRunner): """Modify CliRunner so that stderr is not merged with stdout. @@ -137,6 +165,7 @@ def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None class BlackTestCase(unittest.TestCase): maxDiff = None + _diffThreshold = 2 ** 20 def assertFormatEqual(self, expected: str, actual: str) -> None: if actual != expected and not os.environ.get("SKIP_AST_PRINT"): @@ -155,7 +184,7 @@ def assertFormatEqual(self, expected: str, actual: str) -> None: list(bdv.visit(exp_node)) except Exception as ve: black.err(str(ve)) - self.assertEqual(expected, actual) + self.assertMultiLineEqual(expected, actual) def invokeBlack( self, args: List[str], exit_code: int = 0, ignore_config: bool = True @@ -176,13 +205,13 @@ def invokeBlack( ) @patch("black.dump_to_file", dump_to_stderr) - def checkSourceFile(self, name: str) -> None: + def checkSourceFile(self, name: str, mode: black.FileMode = DEFAULT_MODE) -> None: path = THIS_DIR.parent / name source, expected = read_data(str(path), data=False) - actual = fs(source) + actual = fs(source, mode=mode) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, mode) self.assertFalse(ff(path)) @patch("black.dump_to_file", dump_to_stderr) @@ -191,7 +220,7 @@ def test_empty(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) def test_empty_ff(self) -> None: expected = "" @@ -253,7 +282,7 @@ def test_piping(self) -> None: self.assertEqual(result.exit_code, 0) self.assertFormatEqual(expected, result.output) black.assert_equivalent(source, result.output) - black.assert_stable(source, result.output, black.FileMode()) + black.assert_stable(source, result.output, DEFAULT_MODE) def test_piping_diff(self) -> None: diff_header = re.compile( @@ -306,7 +335,7 @@ def test_function(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_function2(self) -> None: @@ -314,6 +343,21 @@ def test_function2(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) + black.assert_stable(source, actual, DEFAULT_MODE) + + @patch("black.dump_to_file", dump_to_stderr) + def _test_wip(self) -> None: + source, expected = read_data("wip") + sys.settrace(tracefunc) + mode = replace( + DEFAULT_MODE, + experimental_string_processing=False, + target_versions={black.TargetVersion.PY38}, + ) + actual = fs(source, mode=mode) + sys.settrace(None) + self.assertFormatEqual(expected, actual) + black.assert_equivalent(source, actual) black.assert_stable(source, actual, black.FileMode()) @patch("black.dump_to_file", dump_to_stderr) @@ -322,7 +366,28 @@ def test_function_trailing_comma(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) + + @unittest.expectedFailure + @patch("black.dump_to_file", dump_to_stderr) + def test_trailing_comma_optional_parens_stability1(self) -> None: + source, _expected = read_data("trailing_comma_optional_parens1") + actual = fs(source) + black.assert_stable(source, actual, DEFAULT_MODE) + + @unittest.expectedFailure + @patch("black.dump_to_file", dump_to_stderr) + def test_trailing_comma_optional_parens_stability2(self) -> None: + source, _expected = read_data("trailing_comma_optional_parens2") + actual = fs(source) + black.assert_stable(source, actual, DEFAULT_MODE) + + @unittest.expectedFailure + @patch("black.dump_to_file", dump_to_stderr) + def test_trailing_comma_optional_parens_stability3(self) -> None: + source, _expected = read_data("trailing_comma_optional_parens3") + actual = fs(source) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_expression(self) -> None: @@ -330,14 +395,14 @@ def test_expression(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_pep_572(self) -> None: source, expected = read_data("pep_572") actual = fs(source) self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) if sys.version_info >= (3, 8): black.assert_equivalent(source, actual) @@ -361,7 +426,7 @@ def test_expression_ff(self) -> None: self.assertFormatEqual(expected, actual) with patch("black.dump_to_file", dump_to_stderr): black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) def test_expression_diff(self) -> None: source, _ = read_data("expression.py") @@ -413,14 +478,14 @@ def test_fstring(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_pep_570(self) -> None: source, expected = read_data("pep_570") actual = fs(source) self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) if sys.version_info >= (3, 8): black.assert_equivalent(source, actual) @@ -438,8 +503,8 @@ def test_string_quotes(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) - mode = black.FileMode(string_normalization=False) + black.assert_stable(source, actual, DEFAULT_MODE) + mode = replace(DEFAULT_MODE, string_normalization=False) not_normalized = fs(source, mode=mode) self.assertFormatEqual(source.replace("\\\n", ""), not_normalized) black.assert_equivalent(source, not_normalized) @@ -451,7 +516,12 @@ def test_docstring(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) + mode = replace(DEFAULT_MODE, string_normalization=False) + not_normalized = fs(source, mode=mode) + self.assertFormatEqual(expected, not_normalized) + black.assert_equivalent(source, not_normalized) + black.assert_stable(source, not_normalized, mode=mode) def test_long_strings(self) -> None: """Tests for splitting long strings.""" @@ -459,7 +529,15 @@ def test_long_strings(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) + + def test_long_strings_flag_disabled(self) -> None: + """Tests for turning off the string processing logic.""" + source, expected = read_data("long_strings_flag_disabled") + mode = replace(DEFAULT_MODE, experimental_string_processing=False) + actual = fs(source, mode=mode) + self.assertFormatEqual(expected, actual) + black.assert_stable(expected, actual, mode) @patch("black.dump_to_file", dump_to_stderr) def test_long_strings__edge_case(self) -> None: @@ -468,7 +546,7 @@ def test_long_strings__edge_case(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_long_strings__regression(self) -> None: @@ -477,7 +555,7 @@ def test_long_strings__regression(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_slices(self) -> None: @@ -485,7 +563,15 @@ def test_slices(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) + + @patch("black.dump_to_file", dump_to_stderr) + def test_percent_precedence(self) -> None: + source, expected = read_data("percent_precedence") + actual = fs(source) + self.assertFormatEqual(expected, actual) + black.assert_equivalent(source, actual) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments(self) -> None: @@ -493,7 +579,7 @@ def test_comments(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments2(self) -> None: @@ -501,7 +587,7 @@ def test_comments2(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments3(self) -> None: @@ -509,7 +595,7 @@ def test_comments3(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments4(self) -> None: @@ -517,7 +603,7 @@ def test_comments4(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments5(self) -> None: @@ -525,7 +611,7 @@ def test_comments5(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments6(self) -> None: @@ -533,15 +619,16 @@ def test_comments6(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comments7(self) -> None: source, expected = read_data("comments7") - actual = fs(source) + mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38}) + actual = fs(source, mode=mode) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_comment_after_escaped_newline(self) -> None: @@ -549,7 +636,7 @@ def test_comment_after_escaped_newline(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_cantfit(self) -> None: @@ -557,7 +644,7 @@ def test_cantfit(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_import_spacing(self) -> None: @@ -565,7 +652,7 @@ def test_import_spacing(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_composition(self) -> None: @@ -573,7 +660,16 @@ def test_composition(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) + + @patch("black.dump_to_file", dump_to_stderr) + def test_composition_no_trailing_comma(self) -> None: + source, expected = read_data("composition_no_trailing_comma") + mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38}) + actual = fs(source, mode=mode) + self.assertFormatEqual(expected, actual) + black.assert_equivalent(source, actual) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_empty_lines(self) -> None: @@ -581,7 +677,7 @@ def test_empty_lines(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_remove_parens(self) -> None: @@ -589,7 +685,7 @@ def test_remove_parens(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_string_prefixes(self) -> None: @@ -597,12 +693,12 @@ def test_string_prefixes(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_numeric_literals(self) -> None: source, expected = read_data("numeric_literals") - mode = black.FileMode(target_versions=black.PY36_VERSIONS) + mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) actual = fs(source, mode=mode) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) @@ -611,7 +707,7 @@ def test_numeric_literals(self) -> None: @patch("black.dump_to_file", dump_to_stderr) def test_numeric_literals_ignoring_underscores(self) -> None: source, expected = read_data("numeric_literals_skip_underscores") - mode = black.FileMode(target_versions=black.PY36_VERSIONS) + mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) actual = fs(source, mode=mode) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) @@ -622,7 +718,7 @@ def test_numeric_literals_py2(self) -> None: source, expected = read_data("numeric_literals_py2") actual = fs(source) self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_python2(self) -> None: @@ -630,12 +726,12 @@ def test_python2(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_python2_print_function(self) -> None: source, expected = read_data("python2_print_function") - mode = black.FileMode(target_versions={TargetVersion.PY27}) + mode = replace(DEFAULT_MODE, target_versions={TargetVersion.PY27}) actual = fs(source, mode=mode) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) @@ -647,11 +743,11 @@ def test_python2_unicode_literals(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_stub(self) -> None: - mode = black.FileMode(is_pyi=True) + mode = replace(DEFAULT_MODE, is_pyi=True) source, expected = read_data("stub.pyi") actual = fs(source, mode=mode) self.assertFormatEqual(expected, actual) @@ -666,7 +762,7 @@ def test_async_as_identifier(self) -> None: major, minor = sys.version_info[:2] if major < 3 or (major <= 3 and minor < 7): black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) # ensure black can parse this when the target is 3.6 self.invokeBlack([str(source_path), "--target-version", "py36"]) # but not on 3.7, because async/await is no longer an identifier @@ -681,7 +777,7 @@ def test_python37(self) -> None: major, minor = sys.version_info[:2] if major > 3 or (major == 3 and minor >= 7): black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) # ensure black can parse this when the target is 3.7 self.invokeBlack([str(source_path), "--target-version", "py37"]) # but not on 3.6, because we use async as a reserved keyword @@ -695,7 +791,7 @@ def test_python38(self) -> None: major, minor = sys.version_info[:2] if major > 3 or (major == 3 and minor >= 8): black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_fmtonoff(self) -> None: @@ -703,7 +799,7 @@ def test_fmtonoff(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_fmtonoff2(self) -> None: @@ -711,7 +807,7 @@ def test_fmtonoff2(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_fmtonoff3(self) -> None: @@ -719,7 +815,7 @@ def test_fmtonoff3(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_fmtonoff4(self) -> None: @@ -727,7 +823,7 @@ def test_fmtonoff4(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_remove_empty_parentheses_after_class(self) -> None: @@ -735,7 +831,7 @@ def test_remove_empty_parentheses_after_class(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_new_line_between_class_and_code(self) -> None: @@ -743,7 +839,7 @@ def test_new_line_between_class_and_code(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_bracket_match(self) -> None: @@ -751,7 +847,7 @@ def test_bracket_match(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_tuple_assign(self) -> None: @@ -759,7 +855,7 @@ def test_tuple_assign(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) @patch("black.dump_to_file", dump_to_stderr) def test_beginning_backslash(self) -> None: @@ -767,7 +863,7 @@ def test_beginning_backslash(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) def test_tab_comment_indentation(self) -> None: contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n" @@ -1196,7 +1292,7 @@ def err(msg: str, **kwargs: Any) -> None: def test_format_file_contents(self) -> None: empty = "" - mode = black.FileMode() + mode = DEFAULT_MODE with self.assertRaises(black.NothingChanged): black.format_file_contents(empty, mode=mode, fast=False) just_nl = "\n" @@ -1241,7 +1337,7 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual("".join(err_lines), "") def test_cache_broken_file(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir() as workspace: cache_file = black.get_cache_file(mode) with cache_file.open("w") as fobj: @@ -1255,7 +1351,7 @@ def test_cache_broken_file(self) -> None: self.assertIn(src, cache) def test_cache_single_file_already_cached(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir() as workspace: src = (workspace / "test.py").resolve() with src.open("w") as fobj: @@ -1267,7 +1363,7 @@ def test_cache_single_file_already_cached(self) -> None: @event_loop() def test_cache_multiple_files(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir() as workspace, patch( "black.ProcessPoolExecutor", new=ThreadPoolExecutor ): @@ -1288,7 +1384,7 @@ def test_cache_multiple_files(self) -> None: self.assertIn(two, cache) def test_no_cache_when_writeback_diff(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir() as workspace: src = (workspace / "test.py").resolve() with src.open("w") as fobj: @@ -1298,7 +1394,7 @@ def test_no_cache_when_writeback_diff(self) -> None: self.assertFalse(cache_file.exists()) def test_no_cache_when_stdin(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir(): result = CliRunner().invoke( black.main, ["-"], input=BytesIO(b"print('hello')") @@ -1308,12 +1404,12 @@ def test_no_cache_when_stdin(self) -> None: self.assertFalse(cache_file.exists()) def test_read_cache_no_cachefile(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir(): self.assertEqual(black.read_cache(mode), {}) def test_write_cache_read_cache(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir() as workspace: src = (workspace / "test.py").resolve() src.touch() @@ -1339,7 +1435,7 @@ def test_filter_cached(self) -> None: self.assertEqual(done, {cached}) def test_write_cache_creates_directory_if_needed(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir(exists=False) as workspace: self.assertFalse(workspace.exists()) black.write_cache({}, [], mode) @@ -1347,7 +1443,7 @@ def test_write_cache_creates_directory_if_needed(self) -> None: @event_loop() def test_failed_formatting_does_not_get_cached(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir() as workspace, patch( "black.ProcessPoolExecutor", new=ThreadPoolExecutor ): @@ -1363,7 +1459,7 @@ def test_failed_formatting_does_not_get_cached(self) -> None: self.assertIn(clean, cache) def test_write_cache_write_fail(self) -> None: - mode = black.FileMode() + mode = DEFAULT_MODE with cache_dir(), patch.object(Path, "open") as mock: mock.side_effect = OSError black.write_cache({}, [], mode) @@ -1406,8 +1502,8 @@ def test_broken_symlink(self) -> None: self.invokeBlack([str(workspace.resolve())]) def test_read_cache_line_lengths(self) -> None: - mode = black.FileMode() - short_mode = black.FileMode(line_length=1) + mode = DEFAULT_MODE + short_mode = replace(DEFAULT_MODE, line_length=1) with cache_dir() as workspace: path = (workspace / "file.py").resolve() path.touch() @@ -1422,11 +1518,11 @@ def test_tricky_unicode_symbols(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) def test_single_file_force_pyi(self) -> None: - reg_mode = black.FileMode() - pyi_mode = black.FileMode(is_pyi=True) + reg_mode = DEFAULT_MODE + pyi_mode = replace(DEFAULT_MODE, is_pyi=True) contents, expected = read_data("force_pyi") with cache_dir() as workspace: path = (workspace / "file.py").resolve() @@ -1444,8 +1540,8 @@ def test_single_file_force_pyi(self) -> None: @event_loop() def test_multi_file_force_pyi(self) -> None: - reg_mode = black.FileMode() - pyi_mode = black.FileMode(is_pyi=True) + reg_mode = DEFAULT_MODE + pyi_mode = replace(DEFAULT_MODE, is_pyi=True) contents, expected = read_data("force_pyi") with cache_dir() as workspace: paths = [ @@ -1477,8 +1573,8 @@ def test_pipe_force_pyi(self) -> None: self.assertFormatEqual(actual, expected) def test_single_file_force_py36(self) -> None: - reg_mode = black.FileMode() - py36_mode = black.FileMode(target_versions=black.PY36_VERSIONS) + reg_mode = DEFAULT_MODE + py36_mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) source, expected = read_data("force_py36") with cache_dir() as workspace: path = (workspace / "file.py").resolve() @@ -1496,8 +1592,8 @@ def test_single_file_force_py36(self) -> None: @event_loop() def test_multi_file_force_py36(self) -> None: - reg_mode = black.FileMode() - py36_mode = black.FileMode(target_versions=black.PY36_VERSIONS) + reg_mode = DEFAULT_MODE + py36_mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) source, expected = read_data("force_py36") with cache_dir() as workspace: paths = [ @@ -1524,7 +1620,7 @@ def test_collections(self) -> None: actual = fs(source) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) + black.assert_stable(source, actual, DEFAULT_MODE) def test_pipe_force_py36(self) -> None: source, expected = read_data("force_py36") @@ -1551,7 +1647,32 @@ def test_include_exclude(self) -> None: this_abs = THIS_DIR.resolve() sources.extend( black.gen_python_files( - path.iterdir(), this_abs, include, [exclude], report, gitignore + path.iterdir(), this_abs, include, exclude, None, report, gitignore + ) + ) + self.assertEqual(sorted(expected), sorted(sources)) + + @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) + def test_exclude_for_issue_1572(self) -> None: + # Exclude shouldn't touch files that were explicitly given to Black through the + # CLI. Exclude is supposed to only apply to the recursive discovery of files. + # https://github.com/psf/black/issues/1572 + path = THIS_DIR / "data" / "include_exclude_tests" + include = "" + exclude = r"/exclude/|a\.py" + src = str(path / "b/exclude/a.py") + report = black.Report() + expected = [Path(path / "b/exclude/a.py")] + sources = list( + black.get_sources( + ctx=FakeContext(), + src=(src,), + quiet=True, + verbose=False, + include=include, + exclude=exclude, + force_exclude=None, + report=report, ) ) self.assertEqual(sorted(expected), sorted(sources)) @@ -1572,7 +1693,7 @@ def test_gitignore_exclude(self) -> None: this_abs = THIS_DIR.resolve() sources.extend( black.gen_python_files( - path.iterdir(), this_abs, include, [exclude], report, gitignore + path.iterdir(), this_abs, include, exclude, None, report, gitignore ) ) self.assertEqual(sorted(expected), sorted(sources)) @@ -1600,7 +1721,8 @@ def test_empty_include(self) -> None: path.iterdir(), this_abs, empty, - [re.compile(black.DEFAULT_EXCLUDES)], + re.compile(black.DEFAULT_EXCLUDES), + None, report, gitignore, ) @@ -1627,7 +1749,8 @@ def test_empty_exclude(self) -> None: path.iterdir(), this_abs, re.compile(black.DEFAULT_INCLUDES), - [empty], + empty, + None, report, gitignore, ) @@ -1684,7 +1807,7 @@ def test_symlink_out_of_root_directory(self) -> None: try: list( black.gen_python_files( - path.iterdir(), root, include, exclude, report, gitignore + path.iterdir(), root, include, exclude, None, report, gitignore ) ) except ValueError as ve: @@ -1698,7 +1821,7 @@ def test_symlink_out_of_root_directory(self) -> None: with self.assertRaises(ValueError): list( black.gen_python_files( - path.iterdir(), root, include, exclude, report, gitignore + path.iterdir(), root, include, exclude, None, report, gitignore ) ) path.iterdir.assert_called() @@ -1777,20 +1900,8 @@ def test_parse_pyproject_toml(self) -> None: def test_read_pyproject_toml(self) -> None: test_toml_file = THIS_DIR / "test.toml" - - # Fake a click context and parameter so mypy stays happy - class FakeContext(click.Context): - def __init__(self) -> None: - self.default_map: Dict[str, Any] = {} - - class FakeParameter(click.Parameter): - def __init__(self) -> None: - pass - fake_ctx = FakeContext() - black.read_pyproject_toml( - fake_ctx, FakeParameter(), str(test_toml_file), - ) + black.read_pyproject_toml(fake_ctx, FakeParameter(), str(test_toml_file)) config = fake_ctx.default_map self.assertEqual(config["verbose"], "1") self.assertEqual(config["check"], "no") @@ -1801,6 +1912,28 @@ def __init__(self) -> None: self.assertEqual(config["exclude"], r"\.pyi?$") self.assertEqual(config["include"], r"\.py?$") + def test_find_project_root(self) -> None: + with TemporaryDirectory() as workspace: + root = Path(workspace) + test_dir = root / "test" + test_dir.mkdir() + + src_dir = root / "src" + src_dir.mkdir() + + root_pyproject = root / "pyproject.toml" + root_pyproject.touch() + src_pyproject = src_dir / "pyproject.toml" + src_pyproject.touch() + src_python = src_dir / "foo.py" + src_python.touch() + + self.assertEqual( + black.find_project_root((src_dir, test_dir)), root.resolve() + ) + self.assertEqual(black.find_project_root((src_dir,)), src_dir.resolve()) + self.assertEqual(black.find_project_root((src_python,)), src_dir.resolve()) + class BlackDTestCase(AioHTTPTestCase): async def get_application(self) -> web.Application: @@ -1970,5 +2103,31 @@ async def test_blackd_response_black_version_header(self) -> None: self.assertIsNotNone(response.headers.get(blackd.BLACK_VERSION_HEADER)) +with open(black.__file__, "r", encoding="utf-8") as _bf: + black_source_lines = _bf.readlines() + + +def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable: + """Show function calls `from black/__init__.py` as they happen. + + Register this with `sys.settrace()` in a test you're debugging. + """ + if event != "call": + return tracefunc + + stack = len(inspect.stack()) - 19 + stack *= 2 + filename = frame.f_code.co_filename + lineno = frame.f_lineno + func_sig_lineno = lineno - 1 + funcname = black_source_lines[func_sig_lineno].strip() + while funcname.startswith("@"): + func_sig_lineno += 1 + funcname = black_source_lines[func_sig_lineno].strip() + if "black/__init__.py" in filename: + print(f"{' ' * stack}{lineno}:{funcname}") + return tracefunc + + if __name__ == "__main__": unittest.main(module="test_black") From 81d333fa4f13e2508f5a8d4c746cdf0beb540588 Mon Sep 17 00:00:00 2001 From: dwanderson-intel Date: Thu, 6 Oct 2022 09:34:48 -0400 Subject: [PATCH 3/3] Bump from upstream (#3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump myst-parser from 0.15.2 to 0.16.0 in /docs (GH-2696) Bumps [myst-parser](https://github.com/executablebooks/MyST-Parser) from 0.15.2 to 0.16.0. - [Release notes](https://github.com/executablebooks/MyST-Parser/releases) - [Changelog](https://github.com/executablebooks/MyST-Parser/blob/master/CHANGELOG.md) - [Commits](https://github.com/executablebooks/MyST-Parser/compare/v0.15.2...v0.16.0) --- updated-dependencies: - dependency-name: myst-parser dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump pre-commit/action from 2.0.2 to 2.0.3 (GH-2695) Bumps [pre-commit/action](https://github.com/pre-commit/action) from 2.0.2 to 2.0.3. - [Release notes](https://github.com/pre-commit/action/releases) - [Commits](https://github.com/pre-commit/action/compare/v2.0.2...v2.0.3) --- updated-dependencies: - dependency-name: pre-commit/action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Unpacking on flow constructs (return/yield) now implies 3.8+ (#2700) * Use 'python -m build' to build wheel and source distributions (#2701) * Imply 3.8+ when annotated assigments used with unparenthesized tuples (#2708) * Bump sphinx from 4.3.1 to 4.3.2 in /docs (#2709) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.3.1 to 4.3.2. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v4.3.1...v4.3.2) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump myst-parser from 0.16.0 to 0.16.1 in /docs (#2710) Bumps [myst-parser](https://github.com/executablebooks/MyST-Parser) from 0.16.0 to 0.16.1. - [Release notes](https://github.com/executablebooks/MyST-Parser/releases) - [Changelog](https://github.com/executablebooks/MyST-Parser/blob/master/CHANGELOG.md) - [Commits](https://github.com/executablebooks/MyST-Parser/compare/v0.16.0...v0.16.1) --- updated-dependencies: - dependency-name: myst-parser dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Disable universal newlines when reading TOML (#2408) * Define is_name_token (and friends) to resolve some `type: ignore`s (GH-2714) Gets rid of a few # type: ignores by using TypeGuard. * Remove usage of Pipenv, rely on good ol' `pip` and `virtualenv` in docs (#2717) * Support multiple top-level as-expressions on case statements (#2716) * Update contributing wording (#2719) Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * remove all type: ignores in src/black (GH-2720) Excet ;t * Support pytest 7 by fixing broken imports (GH-2705) The tmp_path related changes are not necessary to make pytest 7 work, but it feels more complete this way. * Drop upper version bounds on dependencies (GH-2718) They mostly cause unnecessary trouble. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Primer: exclude crashing sqlalchemy file for now (GH-2735) Until we can properly look into and fix it. -> https://github.com/psf/black/issues/2734 * Documentation: include Wing IDE 8 integrations (GH-2733) Wing IDE 8 now supports autoformatting w/ Black natively 🎉 Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Improve CLI reference wording (#2753) * Stubs: preserve blank line between attributes and methods (#2736) * Action: Support running in a docker container (#2748) see: https://github.com/actions/runner/issues/716 * Fix call patterns that contain as-expression on the kwargs (#2749) * Remove Python 2 support (#2740) *blib2to3's support was left untouched because: 1) I don't want to touch parsing machinery, and 2) it'll allow us to provide a more useful error message if someone does try to format Python 2 code. * Enhance `--verbose` (#2526) Black would now echo the location that it determined as the root path for the project if `--verbose` is enabled by the user, according to which it chooses the SRC paths, i.e. the absolute path of the project is `{root}/{src}`. Closes #1880 * Speed up new backtracking parser (#2728) * Fix handling of standalone match/case with newlines/comments (#2760) Resolves #2759 * Change git url for pip installation in README (#2761) * Change git url for pip installation in README Unauthenticated git protocol was disabled recently by Github and should not be used anymore. https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git * Update CHANGES.md * Change installation url to comply with git security change (#2765) Co-authored-by: Jeffrey Lazar * don't expect changes on poetry (#2769) They just made themselves ESP-compliant in https://github.com/python-poetry/poetry/commit/ecb030e1f0b7c13cc11971f00ee5012e82a892bc * Normalise string prefix order (#2297) Closes #2171 * Don't make redundant copies of the DFA (#2763) * Added decent coloring (#2712) * CI: add diff-shades integration (#2725) Hopefully this makes it much easier to gauge the impacts of future changes! * Dont require typing-extensions in 3.10 (GH-2772) 3.10 ships with TypeGuard which is the newest feature we need. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * [trivial] Use proper test cases on `unittest` (#2775) * Bump sphinx from 4.3.2 to 4.4.0 in /docs (#2776) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.3.2 to 4.4.0. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v4.3.2...v4.4.0) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix typo in diff_shades.yml workflow (#2778) * Create --preview CLI flag (#2752) * Fix and speedup diff-shades integration (#2773) * Deprecate ESP and move the functionality under --preview (#2789) * Hint at likely cause of ast parsing failure in error message (#2786) Co-authored-by: Batuhan Taskaya Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Add support for custom python cell magics (#2744) Fixes #2742. This PR adds the ability to configure additional python cell magics. This will allow formatting cells in Jupyter Notebooks that are using custom (python) magics. * Set `click` lower bound to `8.0.0` (#2791) Closes #2774 * add wind technology software projects using black (#2792) * Switch to Furo (#2793) - Add Furo dependency to docs/requirements.txt - Drop a fair bit of theme configuration - Fix the toctree declarations in index.rst - Move stuff around as Furo isn't 100% compatible with Alabaster Furo was chosen as it provides excellent mobile support, user controllable light/dark theming, and is overall easier to read * Allow setting custom cache directory on all platforms (#2739) Fixes #2506 ``XDG_CACHE_HOME`` does not work on Windows. To allow for users to set a custom cache directory on all systems I added a new environment variable ``BLACK_CACHE_DIR`` to set the cache directory. The default remains the same so users will only notice a change if that environment variable is set. The specific use case I have for this is I need to run black on in different processes at the same time. There is a race condition with the cache pickle file that made this rather difficult. A custom cache directory will remove the race condition. I created ``get_cache_dir`` function in order to test the logic. This is only used to set the ``CACHE_DIR`` constant. * Mark Felix and Batuhan as maintainers (#2794) Y'all deserve it :) * Refactor logic for stub empty lines (#2796) This PR is intended to have no change to semantics. This is in preparation for #2784 which will likely introduce more logic that depends on `current_line.depth`. Inlining the subtraction gets rid of offsetting and makes it much easier to see what the result will be. * Mention "skip news" label in CHANGELOG action (#2797) Co-authored-by: hauntsaninja <> * Enable pattern matching by default (#2758) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Allow blackd to be run as a package (#2800) * Remove Beta mentions in README + Docs (#2801) - State we're now stable and that we'll uphold our formatting changes as per policy - Link to The Black Style doc. Co-authored-by: Jelle Zijlstra * Use `magic_trailing_comma` and `preview` for `FileMode` in `fuzz` (#2802) Co-authored-by: Jelle Zijlstra * Make SRC or code mandatory and mutually exclusive (#2360) (#2804) Closes #2360: I'd like to make passing SRC or `--code` mandatory and the arguments mutually exclusive. This will change our (partially already broken) promises of CLI behavior, but I'll comment below. * Hug power operators if its operands are "simple" (#2726) Since power operators almost always have the highest binding power in expressions, it's often more readable to hug it with its operands. The main exception to this is when its operands are non-trivial in which case the power operator will not hug, the rule for this is the following: > For power ops, an operand is considered "simple" if it's only a NAME, numeric CONSTANT, or attribute access (chained attribute access is allowed), with or without a preceding unary operator. Fixes GH-538. Closes GH-2095. diff-shades results: https://gist.github.com/ichard26/ca6c6ad4bd1de5152d95418c8645354b Co-authored-by: Diego Co-authored-by: Felix Hildén Co-authored-by: Jelle Zijlstra * properly run ourselves twice (#2807) The previous run-twice logic only affected the stability checks but not the output. Now, we actually output the twice-formatted code. * Fix crash on some power hugging cases (#2806) Found by the fuzzer. Repro case: python -m black -c 'importA;()<<0**0#' * black-primer: stop running it (#2809) At the moment, it's just a source of spurious CI failures and busywork updating the configuration file. Unlike diff-shades, it is run across many different platforms and Python versions, but that doesn't seem essential. We already run unit tests across platforms and versions. I chose to leave the code around for now in case somebody is using it, but CI will no longer run it. * more trailing comma tests (#2810) * Use parentheses on method access on float and int literals (#2799) Co-authored-by: Jelle Zijlstra Co-authored-by: Felix Hildén * Tests for unicode identifiers (#2816) * reorganize release notes for 22.1.0 (#2790) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Elaborate on Python support policy (#2819) * Treat blank lines in stubs the same inside top-level `if` statements (#2820) * torture test (#2815) Fixes #2651. Fixes #2754. Fixes #2518. Fixes #2321. This adds a test that lists a number of cases of unstable formatting that we have seen in the issue tracker. Checking it in will ensure that we don't regress on these cases. * Formalise style preference description (#2818) Closes #1256: I reworded our style docs to be more explicit about the style we're aiming for and how it is changed (or isn't). * Fix arithmetic stability issue (#2817) It turns out "simple_stmt" isn't that simple: it can contain multiple statements separated by semicolons. Invisible parenthesis logic for arithmetic expressions only looked at the first child of simple_stmt. This causes instability in the presence of semicolons, since the next run through the statement following the semicolon will be the first child of another simple_stmt. I believe this along with #2572 fix the known stability issues. * Fix instability due to trailing comma logic (#2572) It was causing stability issues because the first pass could cause a "magic trailing comma" to appear, meaning that the second pass might get a different result. It's not critical. Some things format differently (with extra parens) * Add a test case to torture.py (#2822) Co-authored-by: hauntsaninja <> * Update classifiers to reflect stable (#2823) * Remove test suite from setup.py (#2824) We no longer use it * Fix changelog entries in the wrong release (#2825) * Fix changelog entries in the wrong release (#2825) * Prepare docs for release 22.1.0 (GH-2826) * Adjust `--preview` documentation (#2833) * Exclude __pypackages__ by default (GH-2836) PDM uses this as part of not-accepted-yet PEP 582. * Soft comparison of --required-version (#2832) Co-authored-by: Jelle Zijlstra Co-authored-by: Felix Hildén * release process: formalize the changelog template (#2837) I did this manually for the last few releases and I think it's going to be helpful in the future too. Unfortunately this adds a little more work during the release (sorry @cooperlees). This change will also improve the merge conflict situation a bit, because changes to different sections won't merge conflict. For the last release, the sections were in a kind of random order. In the template I put highlights and "Style" first because they're most important to users, and alphabetized the rest. * Surface links to Stability Policy (GH-2848) * Isolate command line tests from user-level config (#2851) * Update description for GitHub Action `options:` argument (GH-2858) It was missing --diff as one of the default arguments passed. * Create indentation FAQ entry (#2855) Co-authored-by: Jelle Zijlstra * Bump sphinx-copybutton from 0.4.0 to 0.5.0 in /docs (#2871) Bumps [sphinx-copybutton](https://github.com/executablebooks/sphinx-copybutton) from 0.4.0 to 0.5.0. - [Release notes](https://github.com/executablebooks/sphinx-copybutton/releases) - [Changelog](https://github.com/executablebooks/sphinx-copybutton/blob/master/CHANGELOG.md) - [Commits](https://github.com/executablebooks/sphinx-copybutton/compare/v0.4.0...v0.5.0) --- updated-dependencies: - dependency-name: sphinx-copybutton dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add Django in 'used by' section in Readme (#2875) * Add Django in 'used by' section in Readme * Fix Readme issue * Avoid crashing when the user has no homedir (#2814) * Order the disabled error codes for pylint (GH-2870) Just make them alphabetical. * Fix typo in file_collection_and_discovery.md (GH-2860) "you your" -> "your" Co-authored-by: Felix Hildén * Isolate command line tests for notebooks from user-level config (#2854) * correct Vim integration code (#2853) - use `Black` directly: the commands an autocommand runs are Ex commands, so no execute or colon is necessary. - use an `augroup` (best practice) to prevent duplicate autocommands from hindering performance. * Add special config verbose log case when black is using user-level config (#2861) * Bump furo from 2022.1.2 to 2022.2.14.1 in /docs (GH-2892) Bumps [furo](https://github.com/pradyunsg/furo) from 2022.1.2 to 2022.2.14.1. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2022.01.02...2022.02.14.1) --- updated-dependencies: - dependency-name: furo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Format ourselves in preview mode (#2889) * separate CHANGELOG section for preview style (#2890) * fix new formatting issue (#2895) Race between #2889 and another PR. * README: fix "Pragmatism" link target (#2901) Fixes #2897 * replace md5 with sha256 (#2905) MD5 is unavailable on systems with active FIPS mode. That makes black crash when run on such systems. * Move test for g:load_black to improve plugin performance (GH-2896) If a vim/neovim user wishes to suppress loading the vim plugin by setting g:load_black in their VIMRC (for me, Arch linux automatically adds the plugin to Neovim's RTP, even though I'm not using it), the current location of the test comes after a call to has('python3'). This adds, in my tests, between 35 and 45 ms to Vim load time (which I know isn't a lot but it's also unnecessary). Moving the call to `exists('g:load_black')` to before the call to `has('python3')` removes this unnecessary test and speeds up loading. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Bump furo from 2022.2.14.1 to 2022.3.4 in /docs (#2906) Bumps [furo](https://github.com/pradyunsg/furo) from 2022.2.14.1 to 2022.3.4. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2022.02.14.1...2022.03.04) --- updated-dependencies: - dependency-name: furo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Allow `for`'s target expression to be starred (#2879) Fixes #2878 * Bump actions/checkout from 2 to 3 (#2909) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump actions/setup-python from 2 to 3 (#2908) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 3. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Cooper Lees * Fix handling of Windows junctions in normalize_path_maybe_ignore (#2904) Fixes #2569 * Use tomllib on Python 3.11 (#2903) * Bump mypy, flake8, and pre-commit-hooks in pre-commit (GH-2922) * Farewell black-primer, it was nice knowing you (#2924) Enjoy your retirement at https://github.com/cooperlees/black-primer * Remove power hugging formatting from preview (#2928) It is falsely placed in preview features and always formats the power operators, it was added in #2789 but there is no check for formatting added along with it. * Update pylint config docs (#2931) * dont skip formatting #%% (#2919) Fixes #2588 * stub style: remove some possible future changes (#2940) Fixes #2938. All of these suggested future changes are out of scope for an autoformatter and should be handled by a linter instead. * Github now supports .git-blame-ignore-revs (GH-2948) It's in beta. https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view * Avoid magic-trailing-comma in single-element subscripts (#2942) Closes #2918. * Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) * Resolve new flake8-bugbear errors (B020) (GH-2950) Fixes a couple places where we were using the same variable name as we are iterating over. Co-authored-by: Jelle Zijlstra * Remove unnecessary parentheses from `except` clauses (#2939) Co-authored-by: Jelle Zijlstra * Enforce no formatting changes for PRs via CI (GH-2951) Now PRs will run two diff-shades jobs, "preview-changes" which formats all projects with preview=True, and "assert-no-changes" which formats all projects with preview=False. The latter also fails if any changes were made. Pushes to main will only run "preview-changes" Also the workflow_dispatch feature was dropped since it was complicating everything for little gain. * Bump sphinx from 4.4.0 to 4.5.0 in /docs (GH-2959) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.4.0 to 4.5.0. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v4.4.0...v4.5.0) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix _unicodefun patch code for Click 8.1.0 (#2966) Fixes #2964 * Prepare release 22.3.0 (#2968) * Bump actions/cache from 2.1.7 to 3 (GH-2962) Bumps [actions/cache](https://github.com/actions/cache) from 2.1.7 to 3. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v2.1.7...v3) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Keep tests working w/ upcoming aiohttp 4.0.0 (#2974) aiohttp.test_utils.unittest_run_loop was deprecated since aiohttp 3.8 and aiohttp 4 (which isn't a thing quite yet) removes it. To maintain compatibility with the full range of versions we declare to support, test_blackd.py will now define a no-op replacement if it can't be imported. Also, mypy is painfully slow to use without a cache, let's reenable it. * Convert `index.rst` and `license.rst` to markdown (#2852) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Add # type: ignore for click._unicodefun import (#2981) * Remove click pin in diff-shades workflow (#2979) Click 8.1.1 was released with a fix for pallets/click#2227. * Bump peter-evans/find-comment from 1.3.0 to 2 (#2960) Bumps [peter-evans/find-comment](https://github.com/peter-evans/find-comment) from 1.3.0 to 2. - [Release notes](https://github.com/peter-evans/find-comment/releases) - [Commits](https://github.com/peter-evans/find-comment/compare/d2dae40ed151c634e4189471272b57e76ec19ba8...1769778a0c5bd330272d749d12c036d65e70d39d) --- updated-dependencies: - dependency-name: peter-evans/find-comment dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump peter-evans/create-or-update-comment from 1.4.5 to 2 (#2961) Bumps [peter-evans/create-or-update-comment](https://github.com/peter-evans/create-or-update-comment) from 1.4.5 to 2. - [Release notes](https://github.com/peter-evans/create-or-update-comment/releases) - [Commits](https://github.com/peter-evans/create-or-update-comment/compare/a35cf36e5301d70b76f316e867e7788a55a31dae...c9fcb64660bc90ec1cc535646af190c992007c32) --- updated-dependencies: - dependency-name: peter-evans/create-or-update-comment dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * try-except tomllib import (#2987) See #2965 I left the version check in place because mypy doesn't generally like try-excepted imports. * Fix broken link in README.md (#2989) Broken when we converted some more RST docs to MyST * Remove unnecessary parentheses from `with` statements (#2926) Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Update test_black.shhh_click test for click 8+ (#2993) The 8.0.x series renamed its "die on LANG=C" function and the 8.1.x series straight up deleted it. Unfortunately this makes this test type check cleanly hard, so we'll just lint with click 8.1+ (the pre-commit hook configuration was changed mostly to just evict any now unsupported mypy environments) * Update FAQ: Mention formatting of custom jupyter cell magic (#2982) Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Top PyPI Packages: Use 30-days data, 365 is no longer available (#2995) * Output python version and implementation as part of `--version` flag (#2997) Example: black, 22.1.1.dev56+g421383d.d20220405 (compiled: no) Python (CPython) 3.9.12 Co-authored-by: Batuhan Taskaya * Better manage return annotation brackets (#2990) Allows us to better control placement of return annotations by: a) removing redundant parens b) moves very long type annotations onto their own line Co-authored-by: Jelle Zijlstra * Remove redundant parentheses around awaited coroutines/tasks (#2991) This is a tricky one as await is technically an expression and therefore in certain situations requires brackets for operator precedence. However, the vast majority of await usage is just await some_coroutine(...) and similar in format to return statements. Therefore this PR removes redundant parens around these await expressions. Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Correctly handle fmt: skip comments without internal spaces (#2970) Co-authored-by: Jelle Zijlstra * Explain our use of mypyc in the FAQ (#3002) I realized we don't have a FAQ entry about this, let's change that so compiled: yes/no doesn't surprise as many people :) Co-authored-by: Jelle Zijlstra * Bump actions/upload-artifact from 2 to 3 (#3004) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 2 to 3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Quote "black[jupyter]" in README.md (#3007) * Bump furo from 2022.3.4 to 2022.4.7 in /docs (#3003) Bumps [furo](https://github.com/pradyunsg/furo) from 2022.3.4 to 2022.4.7. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2022.03.04...2022.04.07) --- updated-dependencies: - dependency-name: furo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Quote black[jupyter] and black[d] in installation docs (#3006) We just got someone on Discord who was confused because the command as written caused their shell to try to do command expansion. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Make ipynb tests compatible with ipython 8.3.0+ (#3008) * Support 3.11 / PEP 654 syntax (#3016) * Updated Black Docker Hub link in docs (#3023) Fixes #3022 * Fix strtobool function (#3025) * Fix strtobool function for vim plugin * Update CHANGES.md Co-authored-by: Cooper Lees * Stop pinning lark-parser (#3041) - Latest version works more Test: `tox -e fuzz` * Bump myst-parser from 0.16.1 to 0.17.2 in /docs (#3019) Bumps [myst-parser](https://github.com/executablebooks/MyST-Parser) from 0.16.1 to 0.17.2. - [Release notes](https://github.com/executablebooks/MyST-Parser/releases) - [Changelog](https://github.com/executablebooks/MyST-Parser/blob/master/CHANGELOG.md) - [Commits](https://github.com/executablebooks/MyST-Parser/compare/v0.16.1...v0.17.2) --- updated-dependencies: - dependency-name: myst-parser dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: Set permissions for GitHub actions (#3043) Restrict the GitHub token permissions only to the required ones; this way, even if the attackers will succeed in compromising your workflow, they won’t be able to do much. - Included permissions for the action. https://github.com/ossf/scorecard/blob/main/docs/checks.md#token-permissions https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs [Keeping your GitHub Actions and workflows secure Part 1: Preventing pwn requests](https://securitylab.github.com/research/github-actions-preventing-pwn-requests/) Signed-off-by: naveen <172697+naveensrinivasan@users.noreply.github.com> * Move imports of `ThreadPoolExecutor` into `reformat_many()`, allowing Black-in-the-browser (#3046) This is a slight perf win for use-cases that don't invoke `reformat_many()`, but more importantly to me today it means I can use Black in pyscript. * Docs: clarify fmt:on/off requirements (#2985) (#3048) * Put closing quote on a separate line if docstring is too long (#3044) Fixes #1632 Co-authored-by: Felix Hildén * Read simple data cases automatically (#3034) Co-authored-by: Felix Hildén * Bump docker/setup-qemu-action from 1 to 2 (#3056) Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 1 to 2. - [Release notes](https://github.com/docker/setup-qemu-action/releases) - [Commits](https://github.com/docker/setup-qemu-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/setup-qemu-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump docker/build-push-action from 2 to 3 (#3057) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 2 to 3. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump docker/login-action from 1 to 2 (#3059) Bumps [docker/login-action](https://github.com/docker/login-action) from 1 to 2. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump docker/setup-buildx-action from 1 to 2 (#3058) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1 to 2. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Remove hard coded test cases (#3062) * Document new Microsoft Black Formatter extension for VSCode (#3063) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Add more examples to exclude files in addition to the defaults (#3070) Co-authored-by: Jelle Zijlstra * Implement support for PEP 646 (#3071) * Add script to ease migration to black (#3038) * Add script to ease migration to black * Update CHANGES.md Co-authored-by: Cooper Lees * Fix minor typo (#3096) * Bump pre-commit/action from 2.0.3 to 3.0.0 (#3108) Bumps [pre-commit/action](https://github.com/pre-commit/action) from 2.0.3 to 3.0.0. - [Release notes](https://github.com/pre-commit/action/releases) - [Commits](https://github.com/pre-commit/action/compare/v2.0.3...v3.0.0) --- updated-dependencies: - dependency-name: pre-commit/action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Remove newline after code block open (#3035) Co-authored-by: Jelle Zijlstra * Update documentation dependencies (#3118) Furo, myst-parser, and Sphinx (had to pin docutils due to sphinx breakage) * Use is_number_token instead of assertion (#3069) * Bump actions/setup-python from 3 to 4 (#3121) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 3 to 4. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Test run black on self (#3114) * Add run_self environment in tox * Add run_self task as part of the lint CI flow * Remove hard coded sources list * Remove black from pre-commit Co-authored-by: Cooper Lees * Replace link to Requests documentation (#3125) * Bump sphinx from 5.0.1 to 5.0.2 in /docs (#3128) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/5.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v5.0.1...v5.0.2) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Only call get_future_imports when needed (#3135) * Bump furo from 2022.6.4.1 to 2022.6.21 in /docs (#3138) Bumps [furo](https://github.com/pradyunsg/furo) from 2022.6.4.1 to 2022.6.21. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2022.06.04.1...2022.06.21) --- updated-dependencies: - dependency-name: furo dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Update preview style docs to include recent changes (#3136) Covers GH-2926, GH-2990, GH-2991, and GH-3035. Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Prepare docs for release 22.6.0 (#3139) * Fix typo in CHANGES.md (#3142) * Use RTD's new build process and config (#3149) See the deprecation notice: https://docs.readthedocs.io/en/stable/config-file/v2.html#python-version * Stability policy: permit exceptional changes for unformatted code (#3155) * Recommend using BlackConnect in IntelliJ IDEs (#3150) * Recommend using BlackConnect in IntelliJ IDEs * IntelliJ IDEs integration docs: improve formatting * Add changelog for recommending BlackConnect * IntelliJ IDEs integration docs: improve formatting * Apply suggestions from code review Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Fix indentation * Apply italic to Black name Consequently with other places in the document * Move CHANGELOG entry to Unreleased section * IntelliJ IDEs integration docs: bring back a point with formatting a file * IntelliJ IDEs integration docs: fix extra whitespace and linebreak Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Copy over comments when hugging power ops (#2874) Otherwise they'd be deleted which was a regression in 22.1.0 (oops! my bad!). Also type comments are now tracked in the AST safety check on all compatible platforms to error out if this happens again. Overall the line rewriting code has been rewritten to do "the right thing (tm)", I hope this fixes other potential bugs in the code (fwiw I got to drop the bugfix in blib2to3.pytree.Leaf.clone since now bracket metadata is properly copied over). Fixes #2873 * Don't (ever) put a single-char closing docstring quote on a new line (#3166) Doing so is invalid. Note this only fixes the preview style since the logic putting closing docstring quotes on their own line if they violate the line length limit is quite new. Co-authored-by: Jelle Zijlstra * Add warning to not run blackd publicly in docs (#3167) Co-authored-by: Jelle Zijlstra * Move to explicitly creating a new loop (#3164) * Move to explicitly creating a new loop - >= 3.10 add a warning that `get_event_loop` will not automatically create a loop - Move to explicit API Test: - `python3.11 -m venv --upgrade-deps /tmp/tb` - `/tmp/tb/bin/pip install -e .` - Install deps and no blackd as aiohttp + yarl can't build still with 3.11 - https://github.com/aio-libs/aiohttp/issues/6600 - `export PYTHONWARNINGS=error` ``` cooper@l33t:~/repos/black$ /tmp/tb/bin/black . All done! ✨ 🍰 ✨ 44 files left unchanged. ``` Fixes #3110 * Add to CHANGES.md * Fix a cooper typo yet again * Set default asyncio loop to our explicitly created one + unset on exit * Update CHANGES.md Fix my silly typo. Co-authored-by: Thomas Grainger Co-authored-by: Cooper Ry Lees Co-authored-by: Thomas Grainger * Actually disable docstring prefix normalization with -S + fix instability (#3168) The former was a regression I introduced a long time ago. To avoid changing the stable style too much, the regression is only fixed if --preview is enabled Annoyingly enough, as we currently always enforce a second format pass if changes were made, there's no good way to prove the existence of the docstring quote normalization instability issue. For posterity, here's one failing example: --- source +++ first pass @@ -1,7 +1,7 @@ def some_function(self): - '''' + """ ' - ''' + """ pass --- first pass +++ second pass @@ -1,7 +1,7 @@ def some_function(self): - """ ' + """' """ pass Co-authored-by: Jelle Zijlstra * Fix typo in config docs for --extend-exclude (#3170) The old regex in the example was invalid and caused an error on startup. * configure strict pytest and filterwarnings=['error', ... (#3173) * configure strict pytest * ignore current warnings * Add pypy-3.8 to test matrix (#3174) * Improve warning filtering in tests (#3175) * Fix the handling of `# fmt: skip` when it's at a colon line (#3148) When the Leaf node with `# fmt: skip` is a NEWLINE inside a `suite` Node, the nodes to ignore should be from the siblings of the parent `suite` Node. There is a also a special case for the ASYNC token, where it expands to the grandparent Node where the ASYNC token is. This fixes GH-2646, GH-3126, GH-2680, GH-2421, GH-2339, and GH-2138. * Fix an infinite loop when using `# fmt: on/off` ... (#3158) ... in the middle of an expression or code block by adding a missing return. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> Co-authored-by: Jelle Zijlstra * Use underscores instead of a space in a test file's name (#3180) ... for *consistency* * Bump sphinx from 5.0.2 to 5.1.0 in /docs (#3183) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 5.0.2 to 5.1.0. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/5.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v5.0.2...v5.1.0) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add isort to linting toolchain Co-authored-by: Shivansh-007 * Reformat codebase with isort * Bump pre-commit hooks (#3191) * Consolidate test CI and add concurrency limits (#3189) * Vim plugin: prefix messages with "Black: " (#3194) As mentioned in GH-3185, when using Black as a Vim plugin, especially automatically on save, the plugin's messages can be confusing, as nothing indicates that they come from Black. * Remove blib2to3 grammar cache logging (#3193) As error logs are emitted often (they happen when Black's cache directory is created after blib2to3 tries to write its cache) and cause issues to be filed by users who think Black isn't working correctly. These errors are expected for now and aren't a cause for concern so let's remove them to stop worrying users (and new issues from being opened). We can improve the blib2to3 caching mechanism to write its cache at the end of a successful command line invocation later. * Add sanity check to executable CD + more (#3190) Building executables without any testing is quite sketchy, let's at least verify they won't crash on startup and format Black's own codebase. Also replaced "binaries" with "executables" since it's clearer and won't be confused with mypyc. Finally, I added colorama so all Windows users can get colour. * Move fuzz.py to scripts/ (#3199) * Bump sphinx from 5.1.0 to 5.1.1 in /docs (#3201) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 5.1.0 to 5.1.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/5.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v5.1.0...v5.1.1) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * makes install available for all users in docker image (#3202) * makes install available for all users in docker image moves the installation path from /root/.local to a virtualenv. this way we still get the lightweight multistage build without excluding non-root users. * adds changelog entry for docker-image fix A changelog entry has been added under the Integration subheader * changes dockerfile to use the venv activate script we are now using the inbuilt venv activate script, as well as explicitly mentioning the binary location in the entrypoint cmd. Co-authored-by: Nicolò Co-authored-by: Cooper Lees * Remove invalid syntax in docstrings -S --preview test (#3205) uR is not a legal string prefix, so this test breaks (AssertionError: cannot use --safe with this file; failed to parse source file AST: invalid syntax) if changed to one in which the file is changed. I've changed the last test to have u alone, and added an R to the test above instead. * Use debug f-strings for feature detection (#3215) Fixes GH-2907. * Use --no-implicit-optional for type checking (#3220) This makes type checking PEP 484 compliant (as of 2018). mypy will change its defaults soon. See: https://github.com/python/mypy/issues/9091 https://github.com/python/mypy/pull/13401 * add preview option support for blackd (#3217) Fixes #3195 Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Port & upstream mypyc wheel build workflow (#3197) * Strip trailing commas in subscripts with -C (#3209) Fixes #2296, #3204 * Update email (#3235) This file gets scraped a lot, so create a distinct email for potential spam. * Add passing 3.11 CI by exempting blackd tests (#3234) - Had to exempt blackd tests for now due to aiohttp - Skip by using `sys.version_info` tuple - aiohttp does not compile in 3.11 yet - refer to #3230 - Add a deadsnakes ubuntu workflow to run 3.11-dev to ensure we don't regress - Have it also format ourselves Test: - `tox -e 311` Co-authored-by: Cooper Ry Lees Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Fix a string merging/split issue caused by standalone comments. (#3227) Fixes #2734: a standalone comment causes strings to be merged into one far too long (and requiring two passes to do so). Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Remove hacky subprocess call in action.yml (#3226) Updates action.yml to use the alternative $GITHUB_ACTION_PATH variable instead of the original ${{ github.action_path }} which caused issues with bash on the Windows runners. This removes the need for a Python subprocess to call the main.py script. * Fix misdetection of project root with `--stdin-filename` (#3216) There are a number of places this behaviour could be patched, for instance, it's quite tempting to patch it in `get_sources`. However I believe we generally have the invariant that project root contains all files we want to format, in which case it seems prudent to keep that invariant. This also improves the accuracy of the "sources to be formatted" log message with --stdin-filename. Fixes GH-3207. * Lazily import parallelized format modules `black.reformat_many` depends on a lot of slow-to-import modules. When formatting simply a single file, the time paid to import those modules is totally wasted. So I moved `black.reformat_many` and its helpers to `black.concurrency` which is now *only* imported if there's more than one file to reformat. This way, running Black over a single file is snappier Here are the numbers before and after this patch running `python -m black --version`: - interpreted: 411 ms +- 9 ms -> 342 ms +- 7 ms: 1.20x faster - compiled: 365 ms +- 15 ms -> 304 ms +- 7 ms: 1.20x faster Co-authored-by: Fabio Zadrozny * Load .gitignore and exclude regex at time of use Loading .gitignore and compiling the exclude regex can take more than 15ms. We shouldn't and don't need to pay this cost if we're simply formatting files given on the command line directly. I would've loved to lazily import pathspec, but the patch won't be clean until the file collection and discovery logic is refactored first. Co-authored-by: Fabio Zadrozny * Delay worker count determination os.cpu_count() can return None (sounds like a super arcane edge case though) so the type annotation for the `workers` parameter of `black.main` is wrong. This *could* technically cause a runtime TypeError since it'd trip one of mypyc's runtime type checks so we might as well fix it. Reading the documentation (and cross-checking with the source code), you are actually allowed to pass None as `max_workers` to `concurrent.futures.ProcessPoolExecutor`. If it is None, the pool initializer will simply call os.cpu_count() [^1] (defaulting to 1 if it returns None [^2]). It'll even round down the worker count to a level that's safe for Windows. ... so theoretically we don't even need to call os.cpu_count() ourselves, but our Windows limit is 60 (unlike the stdlib's 61) and I'd prefer not accidentally reintroducing a crash on machines with many, many CPU cores. [^1]: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ProcessPoolExecutor [^2]: https://github.com/python/cpython/blob/a372a7d65320396d44e8beb976e3a6c382963d4e/Lib/concurrent/futures/process.py#L600 * Add parens around implicit string concatenations where it increases readability (#3162) Adds parentheses around implicit string concatenations when it's inside a list, set, or tuple. Except when it's only element and there's no trailing comma. Looking at the order of the transformers here, we need to "wrap in parens" before string_split runs. So my solution is to introduce a "collaboration" between StringSplitter and StringParenWrapper where the splitter "skips" the split until the wrapper adds the parens (and then the line after the paren is split by StringSplitter) in another pass. I have also considered an alternative approach, where I tried to add a different "string paren wrapper" class, and it runs before string_split. Then I found out it requires a different do_transform implementation than StringParenWrapper.do_transform, since the later assumes it runs after the delimiter_split transform. So I stopped researching that route. Originally function calls were also included in this change, but given missing commas should usually result in a runtime error and the scary amount of changes this cause on downstream code, they were removed in later revisions. * Use strict mypy checking (#3222) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Use .gitignore files in the initial source directories (#3237) Solves https://github.com/psf/black/issues/2598 where Black wouldn't use .gitignore at folder/.gitignore if you ran `black folder` for example. Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Improve & update release process to reflect recent changes (#3242) - Formalise release cadence guidelines - Overhaul release steps to be easier to follow and more thorough - Reorder changelog template to something more sensible - Update release automation docs to reflect recent improvements (notably the addition of in-repo mypyc wheel builds) Co-authored-by: Felix Hildén Co-authored-by: Jelle Zijlstra * Update stable branch after publishing to PyPI (#3223) We've decided to a) convert stable back into a branch and b) to update it immediately as part of the release process. We may as well automate it. And about going back to a branch ... Git tags are not the right tool, at all[^1]. They come with the expectation that they will never change. Things will not work as expected if they do change, doubly so if they change regularly. Once you pull stable from the remote and it's copied in your local repository, no matter how many times you run git pull you'll never see it get updated automatically. Your only recourse is to delete the tag via `git tag -d stable` before pulling. This gets annoying really quickly since stable is supposed to be the solution for folks "who want to move along as Black developers deem the newest version reliable."[^2] See this comment for how this impacts users using our Vim plugin[^3]. It also affects us developers[^4]. If you have stable locally, once we cut a new release and update the stable tag, a simple `git pull` / `git fetch` will not pull down the updated stable tag. Unless you remember to delete stable before pulling, stable will become stale and useless. You can argue this is a good thing ("people should explicitly opt into updating stable"), but IMO it does not match user expectations nor developer expectations[^5]. Especially since not all our integrations that use stable are bound by this security measure, for example our GitHub Action (since it does a clean fetch of the repository every time it's used). I believe consistency would be good here. Finally, ever since we switched to a tag, we've been facing issues with ReadTheDocs not picking up updates to stable unless we force a rebuild. The initial rebuild on the stable update just pulls the commit the tag previously pointed to. I'm not sure if switching back to a branch will fix this, but I'd wager it will. [^1]: https://git-scm.com/docs/git-tag#_on_re_tagging [^2]: https://black.readthedocs.io/en/stable/contributing/release_process.html#moving-the-stable-tag [^3]: https://github.com/psf/black/issues/2503#issuecomment-1196357379 [^4]: In fairness, most folks working on Black probably don't use the `stable` ref anyway, especially us maintainers who'd know what is the latest version by heart, but it'd still be nice to make it usable for local dev though. [^5]: Also what benefit does a `stable` ref have over explicit version tags like `22.6.0`? If you're going to opt into some odd pin mechanism, might as well use explicit version tags for clarity and consistency. * Prepare docs for release 22.8.0 (#3248) * docs: adds ExitStack alternative to future_style.md (#3247) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> Co-authored-by: Jelle Zijlstra * Add preview flag to Vim plugin (#3246) This allows the configuration of the --preview flag in the Vim plugin. * Mitigate deprecation of aiohttp's `@middleware` decorator (#3259) This is deprecated since aiohttp 4.0. If it doesn't exist just define a no-op decorator that does nothing (after the other aiohttp imports though!). By doing this, it's safe to ignore the DeprecationWarning without needing to require the latest aiohttp once they remove `@middleware`. * Move 3.11 tests to install aiohttp without C extensions (#3258) * Move 311 tests to install aiohttp without C extensions - Configure tox to install aiohttp without extensions - i.e. use `AIOHTTP_NO_EXTENSIONS=1` for pip install - This allows us to reenable blackd tests that use aiohttp testing helpers etc. - Had to ignore `cgi` module deprecation warning - Filed issue for aiohttp to fix: https://github.com/aio-libs/aiohttp/issues/6905 Test: - `/tmp/tb/bin/tox -e 311` * Fix formatting + linting * Add latest aiohttp for loop fix + Try to exempt deprecation warning but failed - will ask for help * Remove unnecessary warning ignore Co-authored-by: Cooper Ry Lees Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * [FIX] migrate-black.py: don't fail on binary files (#3266) * Fix a crash on dicts with paren-wrapped long string keys (#3262) Fix a crash when formatting some dicts with parenthesis-wrapped long string keys. When LL[0] is an atom string, we need to check the atom node's siblings instead of LL[0] itself, e.g.: dictsetmaker atom STRING '"This is a really long string that can\'t be expected to fit in one line and is used as a nested dict\'s key"' /atom COLON ':' atom LSQB ' ' '[' listmaker STRING '"value"' COMMA ',' STRING ' ' '"value"' /listmaker RSQB ']' /atom COMMA ',' /dictsetmaker * Improve order of paragraphs on line splitting (#3270) These two paragraphs were tucked away at the end of the section, after the diversion on backslashes. I nearly missed the first paragraph and opened a nonsense issue, and I think the second belongs higher up with it too. * Fix mypyc build errors on newer manylinux2014_x86_64 images (#3272) Make sure `gcc` is installed in the build env The mypyc build requires `gcc` to be installed even if it's being built with `clang`, otherwise `clang` fails to find `libgcc`. * Bump furo from 2022.6.21 to 2022.9.15 in /docs (#3277) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Build mypyc wheels for CPython 3.11 (#3276) Bumps cibuildwheel from 2.8.1 to 2.10.0 which has 3.11 building enabled by default. Unfortunately mypyc errors out on 3.11: src/black/files.py:29:9: error: Name "tomllib" already defined (by an import) [no-redef] ... so we have to also hide the fallback import of tomli on older 3.11 alphas from mypy[c]. * Make context manager examples in future style docs consistent (#3274) * Support version specifiers in GH action (#3265) Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> * Fix a crash when `# fmt: on` is used on a different block level than `# fmt: off` (#3281) Previously _Black_ produces invalid code because the `# fmt: on` is used on a different block level. While _Black_ requires `# fmt: off` and `# fmt: on` to be used at the same block level, incorrect usage shouldn't cause crashes. The formatting behavior this PR introduces is, the code below the initial `# fmt: off` block level will be turned off for formatting, when `# fmt: on` is used on a different level or there is no `# fmt: on`. This also matches the current behavior when `# fmt: off` is used at the top-level without a matching `# fmt: on`, it turns off formatting for everything below `# fmt: off`. - Fixes #2567 - Fixes #3184 - Fixes #2985 - Fixes #2882 - Fixes #2232 - Fixes #2140 - Fixes #1817 - Fixes #569 * Make README logo link to docs (#3285) docs: Make README logo link to docs * Switch build backend to Hatchling (#3233) This implements PEP 621, obviating the need for `setup.py`, `setup.cfg`, and `MANIFEST.in`. The build backend Hatchling (of which I am a maintainer in the PyPA) is now used as that is the default in the official Python packaging tutorial. Hatchling is available on all the major distribution channels such as Debian, Fedora, and many more. ## Python support The earliest supported Python 3 version of Hatchling is 3.7, therefore I've also set that as the minimum here. Python 3.6 is EOL and other build backends like flit-core and setuptools also dropped support. Python 3.6 accounted for 3-4% of downloads in the last month. ## Plugins Configuration is now completely static with the help of 3 plugins: ### Readme hynek's hatch-fancy-pypi-readme allows for the dynamic construction of the readme which was previously coded up in `setup.py`. Now it's simply: ```toml [tool.hatch.metadata.hooks.fancy-pypi-readme] content-type = "text/markdown" fragments = [ { path = "README.md" }, { path = "CHANGES.md" }, ] ``` ### Versioning hatch-vcs is currently just a wrapper around setuptools-scm (which despite the legacy naming is actually now decoupled from setuptools): ```toml [tool.hatch.version] source = "vcs" [tool.hatch.build.hooks.vcs] version-file = "src/_black_version.py" template = ''' version = "{version}" ''' ``` ### mypyc hatch-mypyc offers many benefits over the existing approach: - No need to manually select files for inclusion - Avoids the need for the current CI workaround for https://github.com/mypyc/mypyc/issues/946 - Intermediate artifacts (like `build/`) from setuptools and mypyc itself no longer clutter the project directory - Runtime dependencies required at build time no longer need to be manually redeclared as this is a built-in option of Hatchling Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> Co-authored-by: Jelle Zijlstra * Fix outdated references to 3.6 and run pyupgrade (#3286) I also missed the accidental removal of the 3.11 classifier in the PR. * Always call freeze_support() if sys.frozen is True (#3275) * Bump actions/upload-artifact from 2 to 3 (#3289) updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump sphinx from 5.1.1 to 5.2.1 in /docs (#3288) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 5.1.1 to 5.2.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/5.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v5.1.1...v5.2.1) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add option to format Jupyter Notebooks in GitHub Action (#3282) To run the formatter on Jupyter Notebooks, Black must be installed with an extra dependency (`black[jupyter]`). This commit adds an optional argument to install Black with this dependency when using the official GitHub Action [1]. To enable the formatter on Jupyter Notebooks, just add `jupyter: true` as an argument. Feature requested at [2]. [1]: https://black.readthedocs.io/en/stable/integrations/github_actions.html [2]: https://github.com/psf/black/issues/3280 Signed-off-by: Antonio Ossa Guerra * Mention CHANGES.md in PR template explicitly (#3295) This makes the location more explicit which hopefully makes the PR process smoother for other first time contributors. Co-authored-by: Jelle Zijlstra * Bump pypa/cibuildwheel from 2.10.0 to 2.10.2 (#3290) updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-patch Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Enable build isolation under CIWB (#3297) No idea how this is still here after the Hatchling PR, but it is no longer useful and is breaking the build. * Add .ipynb_checkpoints to DEFAULT_EXCLUDES (#3293) Jupyter creates a checkpoint file every single time you create an .ipynb file, and then it updates the checkpoint file every single time you manually save your progress for the initial .ipynb. These checkpoints are stored in a directory named `.ipynb_checkpoints`. Co-authored-by: Batuhan Taskaya * Bump myst-parser from 0.18.0 to 0.18.1 in /docs (#3303) Bumps [myst-parser](https://github.com/executablebooks/MyST-Parser) from 0.18.0 to 0.18.1. - [Release notes](https://github.com/executablebooks/MyST-Parser/releases) - [Changelog](https://github.com/executablebooks/MyST-Parser/blob/master/CHANGELOG.md) - [Commits](https://github.com/executablebooks/MyST-Parser/compare/v0.18.0...v0.18.1) --- updated-dependencies: - dependency-name: myst-parser dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump furo from 2022.9.15 to 2022.9.29 in /docs (#3304) Bumps [furo](https://github.com/pradyunsg/furo) from 2022.9.15 to 2022.9.29. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2022.09.15...2022.09.29) --- updated-dependencies: - dependency-name: furo dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Cooper Lees * Bump sphinx from 5.2.1 to 5.2.3 in /docs (#3305) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 5.2.1 to 5.2.3. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/5.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v5.2.1...v5.2.3) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump docutils from 0.18.1 to 0.19 in /docs (#3161) Bumps [docutils](https://docutils.sourceforge.io/) from 0.18.1 to 0.19. --- updated-dependencies: - dependency-name: docutils dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Preserve crlf line endings in blackd (#3257) Co-authored-by: KotlinIsland * bump from upstream Signed-off-by: dependabot[bot] Signed-off-by: naveen <172697+naveensrinivasan@users.noreply.github.com> Signed-off-by: Antonio Ossa Guerra Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Batuhan Taskaya Co-authored-by: Mike Taves Co-authored-by: Taneli Hukkinen <3275109+hukkin@users.noreply.github.com> Co-authored-by: Marco Edward Gorelli Co-authored-by: Łukasz Langa Co-authored-by: Felix Hildén Co-authored-by: Jelle Zijlstra Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com> Co-authored-by: Miro Hrončok Co-authored-by: Gunung Pambudi Wibisono <55311527+gunungpw@users.noreply.github.com> Co-authored-by: Josh Owen Co-authored-by: Shivansh-007 Co-authored-by: cbows <32486983+cbows@users.noreply.github.com> Co-authored-by: Jeffrey Lazar Co-authored-by: Jeffrey Lazar Co-authored-by: VanSHOE <75690289+VanSHOE@users.noreply.github.com> Co-authored-by: emfdavid <84335963+emfdavid@users.noreply.github.com> Co-authored-by: Michael Marino Co-authored-by: Rob Hammond <13874373+RHammond2@users.noreply.github.com> Co-authored-by: Perry Vargas Co-authored-by: Shantanu <12621235+hauntsaninja@users.noreply.github.com> Co-authored-by: Cooper Lees Co-authored-by: Nikita Sobolev Co-authored-by: Diego Co-authored-by: Nipunn Koorapati Co-authored-by: Frédérik Paradis Co-authored-by: S. Co1 Co-authored-by: Peter Mescalchin Co-authored-by: Paolo Melchiorre Co-authored-by: Joachim Jablon Co-authored-by: Xuan (Sean) Hu Co-authored-by: Laurent Lyaudet Co-authored-by: Frédérik Paradis Co-authored-by: D. Ben Knoble Co-authored-by: Tomáš Jelínek Co-authored-by: oncomouse Co-authored-by: yoerg <73831825+yoerg@users.noreply.github.com> Co-authored-by: Joseph Young <80432516+jpy-git@users.noreply.github.com> Co-authored-by: Jan-Hendrik Müller <44469195+kolibril13@users.noreply.github.com> Co-authored-by: Hugo van Kemenade Co-authored-by: Ryan Siu Co-authored-by: Sam Ezeh Co-authored-by: JiriKr <33967184+JiriKr@users.noreply.github.com> Co-authored-by: Vadim Nikolaev Co-authored-by: Cooper Lees Co-authored-by: Naveen <172697+naveensrinivasan@users.noreply.github.com> Co-authored-by: Zac Hatfield-Dodds Co-authored-by: Iain Dorrington Co-authored-by: Sagi Shadur Co-authored-by: laundmo Co-authored-by: Yusuke Nishioka Co-authored-by: Holger Brunn Co-authored-by: Vivek Vashist Co-authored-by: Nate Prewitt Co-authored-by: Yilei "Dolee" Yang Co-authored-by: Dimitri Merejkowsky Co-authored-by: Jakub Kuczys <6032823+jack1142@users.noreply.github.com> Co-authored-by: Maciej Olko Co-authored-by: Nimrod <87605179+Panther-12@users.noreply.github.com> Co-authored-by: Cooper Ry Lees Co-authored-by: Thomas Grainger Co-authored-by: onescriptkid Co-authored-by: Yilei "Dolee" Yang Co-authored-by: Théophile Bastian Co-authored-by: Nicolò Intrieri <81313286+n-borges@users.noreply.github.com> Co-authored-by: Nicolò Co-authored-by: Tom Fryers <61272761+TomFryers@users.noreply.github.com> Co-authored-by: Alexandr Artemyev Co-authored-by: Alexander Huynh Co-authored-by: Ionite Co-authored-by: Fabio Zadrozny Co-authored-by: Martin de La Gorce Co-authored-by: James Salvatore Co-authored-by: PeterGrossmann Co-authored-by: Zsolt Dollenstein Co-authored-by: Blandes22 <96037855+Blandes22@users.noreply.github.com> Co-authored-by: Jakub Kuczys Co-authored-by: Stijn de Gooijer Co-authored-by: Ofek Lev Co-authored-by: Antonio Ossa-Guerra Co-authored-by: Ray Bell Co-authored-by: KotlinIsland <65446343+KotlinIsland@users.noreply.github.com> Co-authored-by: KotlinIsland --- .coveragerc | 4 + .flake8 | 6 +- .github/ISSUE_TEMPLATE/bug_report.md | 71 +- .github/ISSUE_TEMPLATE/config.yml | 12 + .github/ISSUE_TEMPLATE/docs-issue.md | 27 + .github/ISSUE_TEMPLATE/feature_request.md | 26 +- .github/ISSUE_TEMPLATE/style_issue.md | 22 +- .github/PULL_REQUEST_TEMPLATE.md | 36 + .github/dependabot.yml | 17 + .github/workflows/changelog.yml | 24 + .github/workflows/diff_shades.yml | 155 + .github/workflows/diff_shades_comment.yml | 49 + .github/workflows/doc.yml | 46 +- .github/workflows/docker.yml | 56 + .github/workflows/fuzz.yml | 27 +- .github/workflows/lint.yml | 28 +- .github/workflows/primer.yml | 31 - .github/workflows/pypi_upload.yml | 93 + .github/workflows/test-311.yml | 57 + .github/workflows/test.yml | 94 +- .github/workflows/upload_binary.yml | 63 + .gitignore | 11 +- .pre-commit-config.yaml | 63 +- .pre-commit-hooks.yaml | 14 +- .prettierrc.yaml | 3 + .readthedocs.yaml | 18 + AUTHORS.md | 194 + CHANGES.md | 699 +- CONTRIBUTING.md | 66 +- Dockerfile | 21 +- README.md | 637 +- action.yml | 45 +- action/main.py | 45 + autoload/black.vim | 225 + docs/Makefile | 2 +- docs/_static/custom.css | 38 - docs/_static/license.svg | 2 +- docs/_static/logo2-readme.png | Bin 80754 -> 99591 bytes docs/authors.md | 185 +- docs/change_log.md | 504 +- docs/compatible_configs/flake8/.flake8 | 3 + docs/compatible_configs/flake8/setup.cfg | 3 + docs/compatible_configs/flake8/tox.ini | 3 + docs/compatible_configs/isort/.editorconfig | 2 + docs/compatible_configs/isort/.isort.cfg | 2 + docs/compatible_configs/isort/pyproject.toml | 2 + docs/compatible_configs/isort/setup.cfg | 2 + docs/compatible_configs/pylint/pylintrc | 2 + docs/compatible_configs/pylint/pyproject.toml | 2 + docs/compatible_configs/pylint/setup.cfg | 2 + docs/conf.py | 271 +- docs/contributing/gauging_changes.md | 58 + docs/contributing/index.md | 49 + docs/contributing/issue_triage.md | 169 + .../reference/reference_classes.rst | 6 +- .../reference/reference_exceptions.rst | 2 +- .../reference/reference_functions.rst | 180 + .../reference/reference_summary.rst | 5 + docs/contributing/release_process.md | 212 + docs/contributing/the_basics.md | 80 + docs/faq.md | 138 + docs/getting_started.md | 48 + docs/guides/index.md | 16 + .../introducing_black_to_your_project.md | 52 + .../using_black_with_other_tools.md} | 142 +- docs/index.md | 139 + docs/integrations/editors.md | 317 + docs/integrations/github_actions.md | 70 + docs/integrations/index.md | 31 + docs/integrations/source_version_control.md | 34 + docs/license.md | 9 + docs/requirements.txt | 12 +- .../current_style.md} | 223 +- docs/the_black_code_style/future_style.md | 133 + docs/the_black_code_style/index.md | 52 + .../black_as_a_server.md} | 27 +- .../black_docker_image.md | 46 + .../file_collection_and_discovery.md | 38 + docs/usage_and_configuration/index.md | 28 + docs/usage_and_configuration/the_basics.md | 297 + gallery/gallery.py | 29 +- mypy.ini | 64 +- plugin/black.vim | 209 +- pyproject.toml | 212 +- readthedocs.yml | 7 - scripts/__init__.py | 0 scripts/check_pre_commit_rev_in_example.py | 54 + scripts/check_version_in_basics_example.py | 47 + scripts/diff_shades_gha_helper.py | 227 + scripts/fuzz.py | 92 + scripts/migrate-black.py | 96 + setup.cfg | 2 - src/black/__init__.py | 6707 ++--------------- src/black/brackets.py | 342 + src/black/cache.py | 97 + src/black/comments.py | 332 + src/black/concurrency.py | 191 + src/black/const.py | 4 + src/black/debug.py | 47 + src/black/files.py | 287 + src/black/handle_ipynb_magics.py | 459 ++ src/black/linegen.py | 1294 ++++ src/black/lines.py | 802 ++ src/black/mode.py | 216 + src/black/nodes.py | 850 +++ src/black/numerics.py | 60 + src/black/output.py | 105 + src/black/parsing.py | 278 + src/black/py.typed | 1 - src/black/report.py | 106 + src/black/rusty.py | 27 + src/black/strings.py | 238 + src/black/trans.py | 2222 ++++++ src/black_primer/cli.py | 136 - src/black_primer/lib.py | 332 - src/blackd/__init__.py | 61 +- src/blackd/__main__.py | 3 + src/blackd/middlewares.py | 45 + src/blib2to3/Grammar.txt | 75 +- src/blib2to3/README | 9 +- src/blib2to3/pgen2/conv.py | 2 +- src/blib2to3/pgen2/driver.py | 112 +- src/blib2to3/pgen2/grammar.py | 4 + src/blib2to3/pgen2/parse.py | 240 +- src/blib2to3/pgen2/pgen.py | 15 +- src/blib2to3/pgen2/token.py | 7 +- src/blib2to3/pgen2/tokenize.py | 39 +- src/blib2to3/pygram.py | 25 +- src/blib2to3/pytree.py | 44 +- test_requirements.txt | 6 + tests/conftest.py | 1 + .../data/fast/pep_572_do_not_remove_parens.py | 21 + tests/data/force_pyi.py | 6 - tests/data/include_exclude_tests/.gitignore | 1 + .../data/include_exclude_tests/pyproject.toml | 3 + tests/data/invalid_gitignore_tests/.gitignore | 1 + tests/data/invalid_gitignore_tests/a.py | 0 .../invalid_gitignore_tests/pyproject.toml | 1 + .../data/invalid_nested_gitignore_tests/a.py | 0 .../a/.gitignore | 1 + .../invalid_nested_gitignore_tests/a/a.py | 0 .../pyproject.toml | 1 + tests/data/jupyter/non_python_notebook.ipynb | 1 + .../jupyter/notebook_empty_metadata.ipynb | 27 + .../notebook_no_trailing_newline.ipynb | 39 + .../jupyter/notebook_trailing_newline.ipynb | 39 + .../notebook_which_cant_be_parsed.ipynb | 1 + .../jupyter/notebook_without_changes.ipynb | 46 + .../async_as_identifier.py | 0 .../data/{ => miscellaneous}/blackd_diff.diff | 0 tests/data/{ => miscellaneous}/blackd_diff.py | 0 .../{ => miscellaneous}/debug_visitor.out | 0 .../data/{ => miscellaneous}/debug_visitor.py | 0 tests/data/miscellaneous/decorators.py | 182 + .../docstring_no_string_normalization.py | 249 + ...cstring_preview_no_string_normalization.py | 10 + .../expression_skip_magic_trailing_comma.diff | 447 ++ tests/data/{ => miscellaneous}/force_py36.py | 0 tests/data/miscellaneous/force_pyi.py | 65 + .../long_strings_flag_disabled.py | 292 + .../miscellaneous/missing_final_newline.diff | 8 + .../miscellaneous/missing_final_newline.py | 3 + .../miscellaneous/pattern_matching_invalid.py | 18 + tests/data/miscellaneous/power_op_newline.py | 10 + tests/data/miscellaneous/python2_detection.py | 90 + .../data/{ => miscellaneous}/string_quotes.py | 10 + tests/data/miscellaneous/stub.pyi | 151 + .../nested_gitignore_tests/pyproject.toml | 3 + .../nested_gitignore_tests/root/.gitignore | 1 + tests/data/nested_gitignore_tests/root/a.py | 1 + tests/data/nested_gitignore_tests/root/b.py | 1 + tests/data/nested_gitignore_tests/root/c.py | 1 + .../root/child/.gitignore | 1 + .../nested_gitignore_tests/root/child/a.py | 1 + .../nested_gitignore_tests/root/child/b.py | 1 + .../nested_gitignore_tests/root/child/c.py | 1 + tests/data/nested_gitignore_tests/x.py | 0 tests/data/numeric_literals_py2.py | 16 - tests/data/{ => preview}/cantfit.py | 0 tests/data/preview/comments7.py | 285 + tests/data/preview/comments8.py | 15 + tests/data/preview/docstring_preview.py | 105 + tests/data/{ => preview}/long_strings.py | 215 +- .../{ => preview}/long_strings__edge_case.py | 30 + .../{ => preview}/long_strings__regression.py | 504 +- tests/data/preview/one_element_subscript.py | 36 + tests/data/preview/percent_precedence.py | 41 + tests/data/preview/remove_await_parens.py | 168 + tests/data/preview/remove_except_parens.py | 79 + tests/data/preview/remove_for_brackets.py | 48 + .../remove_newline_after_code_block_open.py | 189 + .../preview/return_annotation_brackets.py | 222 + .../data/preview/skip_magic_trailing_comma.py | 34 + .../preview_310/remove_newline_after_match.py | 34 + tests/data/preview_39/remove_with_brackets.py | 119 + .../py_310/parenthesized_context_managers.py | 21 + tests/data/py_310/pattern_matching_complex.py | 144 + tests/data/py_310/pattern_matching_extras.py | 119 + tests/data/py_310/pattern_matching_generic.py | 107 + tests/data/py_310/pattern_matching_simple.py | 92 + tests/data/py_310/pattern_matching_style.py | 91 + tests/data/py_310/pep_572_py310.py | 4 + tests/data/py_310/starred_for_target.py | 27 + tests/data/py_311/pep_646.py | 194 + tests/data/py_311/pep_654.py | 53 + tests/data/py_311/pep_654_style.py | 111 + tests/data/{ => py_36}/numeric_literals.py | 0 .../numeric_literals_skip_underscores.py | 0 tests/data/{ => py_37}/python37.py | 0 tests/data/{ => py_38}/pep_570.py | 0 tests/data/{ => py_38}/pep_572.py | 6 +- tests/data/py_38/pep_572_remove_parens.py | 105 + tests/data/py_38/python38.py | 45 + tests/data/py_39/pep_572_py39.py | 7 + tests/data/py_39/python39.py | 37 + tests/data/python2.py | 33 - tests/data/python2_print_function.py | 16 - tests/data/python2_unicode_literals.py | 20 - tests/data/python38.py | 27 - .../attribute_access_on_number_literals.py | 47 + .../{ => simple_cases}/beginning_backslash.py | 0 tests/data/{ => simple_cases}/bracketmatch.py | 0 .../class_blank_parentheses.py | 0 .../class_methods_new_line.py | 0 tests/data/{ => simple_cases}/collections.py | 0 .../comment_after_escaped_newline.py | 0 tests/data/{ => simple_cases}/comments.py | 0 tests/data/{ => simple_cases}/comments2.py | 4 +- tests/data/{ => simple_cases}/comments3.py | 0 tests/data/{ => simple_cases}/comments4.py | 0 tests/data/{ => simple_cases}/comments5.py | 0 tests/data/{ => simple_cases}/comments6.py | 0 .../comments_non_breaking_space.py | 44 + tests/data/{ => simple_cases}/composition.py | 0 .../composition_no_trailing_comma.py | 367 + tests/data/simple_cases/docstring.py | 433 ++ tests/data/{ => simple_cases}/empty_lines.py | 0 tests/data/{ => simple_cases}/expression.diff | 81 +- tests/data/{ => simple_cases}/expression.py | 58 +- tests/data/{ => simple_cases}/fmtonoff.py | 0 tests/data/{ => simple_cases}/fmtonoff2.py | 0 tests/data/{ => simple_cases}/fmtonoff3.py | 0 tests/data/{ => simple_cases}/fmtonoff4.py | 0 tests/data/simple_cases/fmtonoff5.py | 158 + tests/data/simple_cases/fmtskip.py | 3 + tests/data/simple_cases/fmtskip2.py | 17 + tests/data/simple_cases/fmtskip3.py | 20 + tests/data/simple_cases/fmtskip4.py | 13 + tests/data/simple_cases/fmtskip5.py | 22 + tests/data/simple_cases/fmtskip6.py | 13 + tests/data/simple_cases/fmtskip7.py | 11 + tests/data/simple_cases/fmtskip8.py | 62 + tests/data/{ => simple_cases}/fstring.py | 0 tests/data/{ => simple_cases}/function.py | 0 tests/data/{ => simple_cases}/function2.py | 63 + .../simple_cases/function_trailing_comma.py | 153 + .../data/{ => simple_cases}/import_spacing.py | 0 tests/data/simple_cases/power_op_spacing.py | 131 + .../data/{ => simple_cases}/remove_parens.py | 2 - tests/data/{ => simple_cases}/slices.py | 2 +- tests/data/simple_cases/string_prefixes.py | 45 + tests/data/simple_cases/torture.py | 91 + .../trailing_comma_optional_parens1.py | 63 + .../trailing_comma_optional_parens2.py | 12 + .../trailing_comma_optional_parens3.py | 21 + .../tricky_unicode_symbols.py | 3 + tests/data/{ => simple_cases}/tupleassign.py | 0 tests/data/string_prefixes.py | 18 - tests/data/stub.pyi | 35 - tests/optional.py | 124 + tests/test.toml | 7 + tests/test_black.py | 2507 +++--- tests/test_blackd.py | 228 + tests/test_format.py | 177 + tests/test_ipynb.py | 524 ++ tests/test_no_ipynb.py | 37 + tests/test_primer.py | 216 - tests/test_trans.py | 51 + tests/util.py | 161 + tox.ini | 100 + 280 files changed, 24270 insertions(+), 10470 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/docs-issue.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/changelog.yml create mode 100644 .github/workflows/diff_shades.yml create mode 100644 .github/workflows/diff_shades_comment.yml create mode 100644 .github/workflows/docker.yml delete mode 100644 .github/workflows/primer.yml create mode 100644 .github/workflows/pypi_upload.yml create mode 100644 .github/workflows/test-311.yml create mode 100644 .github/workflows/upload_binary.yml create mode 100644 .prettierrc.yaml create mode 100644 .readthedocs.yaml create mode 100644 AUTHORS.md create mode 100644 action/main.py create mode 100644 autoload/black.vim delete mode 100644 docs/_static/custom.css create mode 100644 docs/compatible_configs/flake8/.flake8 create mode 100644 docs/compatible_configs/flake8/setup.cfg create mode 100644 docs/compatible_configs/flake8/tox.ini create mode 100644 docs/compatible_configs/isort/.editorconfig create mode 100644 docs/compatible_configs/isort/.isort.cfg create mode 100644 docs/compatible_configs/isort/pyproject.toml create mode 100644 docs/compatible_configs/isort/setup.cfg create mode 100644 docs/compatible_configs/pylint/pylintrc create mode 100644 docs/compatible_configs/pylint/pyproject.toml create mode 100644 docs/compatible_configs/pylint/setup.cfg create mode 100644 docs/contributing/gauging_changes.md create mode 100644 docs/contributing/index.md create mode 100644 docs/contributing/issue_triage.md rename docs/{ => contributing}/reference/reference_classes.rst (89%) rename docs/{ => contributing}/reference/reference_exceptions.rst (80%) create mode 100644 docs/contributing/reference/reference_functions.rst rename docs/{ => contributing}/reference/reference_summary.rst (51%) create mode 100644 docs/contributing/release_process.md create mode 100644 docs/contributing/the_basics.md create mode 100644 docs/faq.md create mode 100644 docs/getting_started.md create mode 100644 docs/guides/index.md create mode 100644 docs/guides/introducing_black_to_your_project.md rename docs/{compatible_configs.md => guides/using_black_with_other_tools.md} (64%) create mode 100644 docs/index.md create mode 100644 docs/integrations/editors.md create mode 100644 docs/integrations/github_actions.md create mode 100644 docs/integrations/index.md create mode 100644 docs/integrations/source_version_control.md create mode 100644 docs/license.md rename docs/{the_black_code_style.md => the_black_code_style/current_style.md} (70%) create mode 100644 docs/the_black_code_style/future_style.md create mode 100644 docs/the_black_code_style/index.md rename docs/{blackd.md => usage_and_configuration/black_as_a_server.md} (81%) create mode 100644 docs/usage_and_configuration/black_docker_image.md create mode 100644 docs/usage_and_configuration/file_collection_and_discovery.md create mode 100644 docs/usage_and_configuration/index.md create mode 100644 docs/usage_and_configuration/the_basics.md delete mode 100644 readthedocs.yml create mode 100644 scripts/__init__.py create mode 100644 scripts/check_pre_commit_rev_in_example.py create mode 100644 scripts/check_version_in_basics_example.py create mode 100644 scripts/diff_shades_gha_helper.py create mode 100644 scripts/fuzz.py create mode 100755 scripts/migrate-black.py delete mode 100644 setup.cfg create mode 100644 src/black/brackets.py create mode 100644 src/black/cache.py create mode 100644 src/black/comments.py create mode 100644 src/black/concurrency.py create mode 100644 src/black/const.py create mode 100644 src/black/debug.py create mode 100644 src/black/files.py create mode 100644 src/black/handle_ipynb_magics.py create mode 100644 src/black/linegen.py create mode 100644 src/black/lines.py create mode 100644 src/black/mode.py create mode 100644 src/black/nodes.py create mode 100644 src/black/numerics.py create mode 100644 src/black/output.py create mode 100644 src/black/parsing.py create mode 100644 src/black/report.py create mode 100644 src/black/rusty.py create mode 100644 src/black/strings.py create mode 100644 src/black/trans.py delete mode 100644 src/black_primer/cli.py delete mode 100644 src/black_primer/lib.py create mode 100644 src/blackd/__main__.py create mode 100644 src/blackd/middlewares.py create mode 100644 test_requirements.txt create mode 100644 tests/conftest.py create mode 100644 tests/data/fast/pep_572_do_not_remove_parens.py delete mode 100644 tests/data/force_pyi.py create mode 100644 tests/data/include_exclude_tests/.gitignore create mode 100644 tests/data/include_exclude_tests/pyproject.toml create mode 100644 tests/data/invalid_gitignore_tests/.gitignore create mode 100644 tests/data/invalid_gitignore_tests/a.py create mode 100644 tests/data/invalid_gitignore_tests/pyproject.toml create mode 100644 tests/data/invalid_nested_gitignore_tests/a.py create mode 100644 tests/data/invalid_nested_gitignore_tests/a/.gitignore create mode 100644 tests/data/invalid_nested_gitignore_tests/a/a.py create mode 100644 tests/data/invalid_nested_gitignore_tests/pyproject.toml create mode 100644 tests/data/jupyter/non_python_notebook.ipynb create mode 100644 tests/data/jupyter/notebook_empty_metadata.ipynb create mode 100644 tests/data/jupyter/notebook_no_trailing_newline.ipynb create mode 100644 tests/data/jupyter/notebook_trailing_newline.ipynb create mode 100644 tests/data/jupyter/notebook_which_cant_be_parsed.ipynb create mode 100644 tests/data/jupyter/notebook_without_changes.ipynb rename tests/data/{ => miscellaneous}/async_as_identifier.py (100%) rename tests/data/{ => miscellaneous}/blackd_diff.diff (100%) rename tests/data/{ => miscellaneous}/blackd_diff.py (100%) rename tests/data/{ => miscellaneous}/debug_visitor.out (100%) rename tests/data/{ => miscellaneous}/debug_visitor.py (100%) create mode 100644 tests/data/miscellaneous/decorators.py create mode 100644 tests/data/miscellaneous/docstring_no_string_normalization.py create mode 100644 tests/data/miscellaneous/docstring_preview_no_string_normalization.py create mode 100644 tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff rename tests/data/{ => miscellaneous}/force_py36.py (100%) create mode 100644 tests/data/miscellaneous/force_pyi.py create mode 100644 tests/data/miscellaneous/long_strings_flag_disabled.py create mode 100644 tests/data/miscellaneous/missing_final_newline.diff create mode 100644 tests/data/miscellaneous/missing_final_newline.py create mode 100644 tests/data/miscellaneous/pattern_matching_invalid.py create mode 100644 tests/data/miscellaneous/power_op_newline.py create mode 100644 tests/data/miscellaneous/python2_detection.py rename tests/data/{ => miscellaneous}/string_quotes.py (81%) create mode 100644 tests/data/miscellaneous/stub.pyi create mode 100644 tests/data/nested_gitignore_tests/pyproject.toml create mode 100644 tests/data/nested_gitignore_tests/root/.gitignore create mode 100644 tests/data/nested_gitignore_tests/root/a.py create mode 100644 tests/data/nested_gitignore_tests/root/b.py create mode 100644 tests/data/nested_gitignore_tests/root/c.py create mode 100644 tests/data/nested_gitignore_tests/root/child/.gitignore create mode 100644 tests/data/nested_gitignore_tests/root/child/a.py create mode 100644 tests/data/nested_gitignore_tests/root/child/b.py create mode 100644 tests/data/nested_gitignore_tests/root/child/c.py create mode 100644 tests/data/nested_gitignore_tests/x.py delete mode 100644 tests/data/numeric_literals_py2.py rename tests/data/{ => preview}/cantfit.py (100%) create mode 100644 tests/data/preview/comments7.py create mode 100644 tests/data/preview/comments8.py create mode 100644 tests/data/preview/docstring_preview.py rename tests/data/{ => preview}/long_strings.py (73%) rename tests/data/{ => preview}/long_strings__edge_case.py (80%) rename tests/data/{ => preview}/long_strings__regression.py (56%) create mode 100644 tests/data/preview/one_element_subscript.py create mode 100644 tests/data/preview/percent_precedence.py create mode 100644 tests/data/preview/remove_await_parens.py create mode 100644 tests/data/preview/remove_except_parens.py create mode 100644 tests/data/preview/remove_for_brackets.py create mode 100644 tests/data/preview/remove_newline_after_code_block_open.py create mode 100644 tests/data/preview/return_annotation_brackets.py create mode 100644 tests/data/preview/skip_magic_trailing_comma.py create mode 100644 tests/data/preview_310/remove_newline_after_match.py create mode 100644 tests/data/preview_39/remove_with_brackets.py create mode 100644 tests/data/py_310/parenthesized_context_managers.py create mode 100644 tests/data/py_310/pattern_matching_complex.py create mode 100644 tests/data/py_310/pattern_matching_extras.py create mode 100644 tests/data/py_310/pattern_matching_generic.py create mode 100644 tests/data/py_310/pattern_matching_simple.py create mode 100644 tests/data/py_310/pattern_matching_style.py create mode 100644 tests/data/py_310/pep_572_py310.py create mode 100644 tests/data/py_310/starred_for_target.py create mode 100644 tests/data/py_311/pep_646.py create mode 100644 tests/data/py_311/pep_654.py create mode 100644 tests/data/py_311/pep_654_style.py rename tests/data/{ => py_36}/numeric_literals.py (100%) rename tests/data/{ => py_36}/numeric_literals_skip_underscores.py (100%) rename tests/data/{ => py_37}/python37.py (100%) rename tests/data/{ => py_38}/pep_570.py (100%) rename tests/data/{ => py_38}/pep_572.py (90%) create mode 100644 tests/data/py_38/pep_572_remove_parens.py create mode 100644 tests/data/py_38/python38.py create mode 100644 tests/data/py_39/pep_572_py39.py create mode 100644 tests/data/py_39/python39.py delete mode 100644 tests/data/python2.py delete mode 100755 tests/data/python2_print_function.py delete mode 100644 tests/data/python2_unicode_literals.py delete mode 100644 tests/data/python38.py create mode 100644 tests/data/simple_cases/attribute_access_on_number_literals.py rename tests/data/{ => simple_cases}/beginning_backslash.py (100%) rename tests/data/{ => simple_cases}/bracketmatch.py (100%) rename tests/data/{ => simple_cases}/class_blank_parentheses.py (100%) rename tests/data/{ => simple_cases}/class_methods_new_line.py (100%) rename tests/data/{ => simple_cases}/collections.py (100%) rename tests/data/{ => simple_cases}/comment_after_escaped_newline.py (100%) rename tests/data/{ => simple_cases}/comments.py (100%) rename tests/data/{ => simple_cases}/comments2.py (98%) rename tests/data/{ => simple_cases}/comments3.py (100%) rename tests/data/{ => simple_cases}/comments4.py (100%) rename tests/data/{ => simple_cases}/comments5.py (100%) rename tests/data/{ => simple_cases}/comments6.py (100%) create mode 100644 tests/data/simple_cases/comments_non_breaking_space.py rename tests/data/{ => simple_cases}/composition.py (100%) create mode 100644 tests/data/simple_cases/composition_no_trailing_comma.py create mode 100644 tests/data/simple_cases/docstring.py rename tests/data/{ => simple_cases}/empty_lines.py (100%) rename tests/data/{ => simple_cases}/expression.diff (83%) rename tests/data/{ => simple_cases}/expression.py (89%) rename tests/data/{ => simple_cases}/fmtonoff.py (100%) rename tests/data/{ => simple_cases}/fmtonoff2.py (100%) rename tests/data/{ => simple_cases}/fmtonoff3.py (100%) rename tests/data/{ => simple_cases}/fmtonoff4.py (100%) create mode 100644 tests/data/simple_cases/fmtonoff5.py create mode 100644 tests/data/simple_cases/fmtskip.py create mode 100644 tests/data/simple_cases/fmtskip2.py create mode 100644 tests/data/simple_cases/fmtskip3.py create mode 100644 tests/data/simple_cases/fmtskip4.py create mode 100644 tests/data/simple_cases/fmtskip5.py create mode 100644 tests/data/simple_cases/fmtskip6.py create mode 100644 tests/data/simple_cases/fmtskip7.py create mode 100644 tests/data/simple_cases/fmtskip8.py rename tests/data/{ => simple_cases}/fstring.py (100%) rename tests/data/{ => simple_cases}/function.py (100%) rename tests/data/{ => simple_cases}/function2.py (52%) create mode 100644 tests/data/simple_cases/function_trailing_comma.py rename tests/data/{ => simple_cases}/import_spacing.py (100%) create mode 100644 tests/data/simple_cases/power_op_spacing.py rename tests/data/{ => simple_cases}/remove_parens.py (99%) rename tests/data/{ => simple_cases}/slices.py (94%) create mode 100644 tests/data/simple_cases/string_prefixes.py create mode 100644 tests/data/simple_cases/torture.py create mode 100644 tests/data/simple_cases/trailing_comma_optional_parens1.py create mode 100644 tests/data/simple_cases/trailing_comma_optional_parens2.py create mode 100644 tests/data/simple_cases/trailing_comma_optional_parens3.py rename tests/data/{ => simple_cases}/tricky_unicode_symbols.py (76%) rename tests/data/{ => simple_cases}/tupleassign.py (100%) delete mode 100644 tests/data/string_prefixes.py delete mode 100644 tests/data/stub.pyi create mode 100644 tests/optional.py create mode 100644 tests/test_blackd.py create mode 100644 tests/test_format.py create mode 100644 tests/test_ipynb.py create mode 100644 tests/test_no_ipynb.py delete mode 100644 tests/test_primer.py create mode 100644 tests/test_trans.py create mode 100644 tests/util.py create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc index 32a8da521ba..5577e496a57 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,3 +3,7 @@ omit = src/blib2to3/* tests/data/* */site-packages/* + .tox/* + +[run] +relative_files = True diff --git a/.flake8 b/.flake8 index cee6db4446b..ae11a13347c 100644 --- a/.flake8 +++ b/.flake8 @@ -1,11 +1,7 @@ [flake8] ignore = E203, E266, E501, W503 # line length is intentionally set to 80 here because black uses Bugbear -# See https://github.com/psf/black/blob/master/README.md#line-length for more details +# See https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#line-length for more details max-line-length = 80 max-complexity = 18 select = B,C,E,F,W,T4,B9 -# We need to configure the mypy.ini because the flake8-mypy's default -# options don't properly override it, so if we don't specify it we get -# half of the config from mypy.ini and half from flake8-mypy. -mypy_config = mypy.ini diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index e652f17c94b..48aa9291b05 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,35 +1,64 @@ --- name: Bug report -about: Create a report to help us improve +about: Create a report to help us improve Black's quality title: "" -labels: bug +labels: "T: bug" assignees: "" --- -**Describe the bug** A clear and concise description of what the bug is. + -1. Take this file '...' -2. Run _Black_ on it with these arguments '....' -3. See error +**Describe the bug** -**Expected behavior** A clear and concise description of what you expected to happen. + -**Environment (please complete the following information):** +**To Reproduce** -- Version: [e.g. master] -- OS and Python version: [e.g. Linux/Python 3.7.4rc1] + -**Does this bug also happen on master?** To answer this, you have two options: +For example, take this code: -1. Use the online formatter at https://black.now.sh/?version=master, which will use the - latest master branch. -2. Or run _Black_ on your machine: - - create a new virtualenv (make sure it's the same Python version); - - clone this repository; - - run `pip install -e .`; - - make sure it's sane by running `python -m unittest`; and - - run `black` like you did last time. +```python +this = "code" +``` + +And run it with these arguments: + +```sh +$ black file.py --target-version py39 +``` + +The resulting error is: + +> cannot format file.py: INTERNAL ERROR: ... + +**Expected behavior** + + + +**Environment** + + + +- Black's version: +- OS and Python version: + +**Additional context** -**Additional context** Add any other context about the problem here. + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000000..3f6641c91a0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,12 @@ +# See also: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser + +# This is the default and blank issues are useful so let's keep 'em. +blank_issues_enabled: true + +contact_links: + - name: Chat on Python Discord + url: https://discord.gg/RtVdv86PrH + about: | + User support, questions, and other lightweight requests can be + handled via the #black-formatter text channel we have on Python + Discord. diff --git a/.github/ISSUE_TEMPLATE/docs-issue.md b/.github/ISSUE_TEMPLATE/docs-issue.md new file mode 100644 index 00000000000..d362b867eab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/docs-issue.md @@ -0,0 +1,27 @@ +--- +name: Documentation +about: Report a problem with or suggest something for the documentation +title: "" +labels: "T: documentation" +assignees: "" +--- + +**Is this related to a problem? Please describe.** + + + +**Describe the solution you'd like** + + + +**Describe alternatives you've considered** + + + +**Additional context** + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 00dd5dd8fe5..a34e4a0e214 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,18 +2,26 @@ name: Feature request about: Suggest an idea for this project title: "" -labels: enhancement +labels: "T: enhancement" assignees: "" --- -**Is your feature request related to a problem? Please describe.** A clear and concise -description of what the problem is. Ex. I'm always frustrated when [...] +**Is your feature request related to a problem? Please describe.** -**Describe the solution you'd like** A clear and concise description of what you want to -happen. + -**Describe alternatives you've considered** A clear and concise description of any -alternative solutions or features you've considered. +**Describe the solution you'd like** -**Additional context** Add any other context or screenshots about the feature request -here. + + +**Describe alternatives you've considered** + + + +**Additional context** + + diff --git a/.github/ISSUE_TEMPLATE/style_issue.md b/.github/ISSUE_TEMPLATE/style_issue.md index 6d1f246ed86..2e4343a3527 100644 --- a/.github/ISSUE_TEMPLATE/style_issue.md +++ b/.github/ISSUE_TEMPLATE/style_issue.md @@ -2,15 +2,19 @@ name: Style issue about: Help us improve the Black style title: "" -labels: design +labels: "T: design" assignees: "" --- -**Describe the style change** A clear and concise description of how the style can be -improved. +**Describe the style change** -**Examples in the current _Black_ style** Think of some short code snippets that show -how the current _Black_ style is not great: + + +**Examples in the current _Black_ style** + + ```python def f(): @@ -18,7 +22,9 @@ def f(): pass ``` -**Desired style** How do you think _Black_ should format the above snippets: +**Desired style** + + ```python def f( @@ -26,4 +32,6 @@ def f( pass ``` -**Additional context** Add any other context about the problem here. +**Additional context** + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..a039718cd70 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,36 @@ + + +### Description + + + +### Checklist - did you ... + + + +- [ ] Add an entry in `CHANGES.md` if necessary? +- [ ] Add / update tests if necessary? +- [ ] Add new / update outdated documentation? + + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..325cb31af1c --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,17 @@ +# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "github-actions" + # Workflow files in .github/workflows will be checked + directory: "/" + schedule: + interval: "weekly" + labels: ["skip news", "C: dependencies"] + + - package-ecosystem: "pip" + directory: "docs/" + schedule: + interval: "weekly" + labels: ["skip news", "C: dependencies", "T: documentation"] + reviewers: ["ichard26"] diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 00000000000..b3e1f0b9024 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,24 @@ +name: changelog + +on: + pull_request: + types: [opened, synchronize, labeled, unlabeled, reopened] + +permissions: + contents: read + +jobs: + build: + name: Changelog Entry Check + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Grep CHANGES.md for PR number + if: contains(github.event.pull_request.labels.*.name, 'skip news') != true + run: | + grep -Pz "\((\n\s*)?#${{ github.event.pull_request.number }}(\n\s*)?\)" CHANGES.md || \ + (echo "Please add '(#${{ github.event.pull_request.number }})' change line to CHANGES.md (or if appropriate, ask a maintainer to add the 'skip news' label)" && \ + exit 1) diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml new file mode 100644 index 00000000000..a126756f102 --- /dev/null +++ b/.github/workflows/diff_shades.yml @@ -0,0 +1,155 @@ +name: diff-shades + +on: + push: + branches: [main] + paths: ["src/**", "pyproject.toml", ".github/workflows/*"] + + pull_request: + paths: ["src/**", "pyproject.toml", ".github/workflows/*"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: true + +jobs: + configure: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-config.outputs.matrix }} + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install diff-shades and support dependencies + run: | + python -m pip install click packaging urllib3 + python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip + + - name: Calculate run configuration & metadata + id: set-config + env: + GITHUB_TOKEN: ${{ github.token }} + run: > + python scripts/diff_shades_gha_helper.py config ${{ github.event_name }} ${{ matrix.mode }} + + analysis: + name: analysis / ${{ matrix.mode }} + needs: configure + runs-on: ubuntu-latest + env: + HATCH_BUILD_HOOKS_ENABLE: "1" + # Clang is less picky with the C code it's given than gcc (and may + # generate faster binaries too). + CC: clang-12 + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.configure.outputs.matrix )}} + + steps: + - name: Checkout this repository (full clone) + uses: actions/checkout@v3 + with: + # The baseline revision could be rather old so a full clone is ideal. + fetch-depth: 0 + + - uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install diff-shades and support dependencies + run: | + python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip + python -m pip install click packaging urllib3 + # After checking out old revisions, this might not exist so we'll use a copy. + cat scripts/diff_shades_gha_helper.py > helper.py + git config user.name "diff-shades-gha" + git config user.email "diff-shades-gha@example.com" + + - name: Attempt to use cached baseline analysis + id: baseline-cache + uses: actions/cache@v3 + with: + path: ${{ matrix.baseline-analysis }} + key: ${{ matrix.baseline-cache-key }} + + - name: Build and install baseline revision + if: steps.baseline-cache.outputs.cache-hit != 'true' + env: + GITHUB_TOKEN: ${{ github.token }} + run: > + ${{ matrix.baseline-setup-cmd }} + && python -m pip install . + + - name: Analyze baseline revision + if: steps.baseline-cache.outputs.cache-hit != 'true' + run: > + diff-shades analyze -v --work-dir projects-cache/ + ${{ matrix.baseline-analysis }} ${{ matrix.force-flag }} + + - name: Build and install target revision + env: + GITHUB_TOKEN: ${{ github.token }} + run: > + ${{ matrix.target-setup-cmd }} + && python -m pip install . + + - name: Analyze target revision + run: > + diff-shades analyze -v --work-dir projects-cache/ + ${{ matrix.target-analysis }} --repeat-projects-from + ${{ matrix.baseline-analysis }} ${{ matrix.force-flag }} + + - name: Generate HTML diff report + run: > + diff-shades --dump-html diff.html compare --diff + ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} + + - name: Upload diff report + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.mode }}-diff.html + path: diff.html + + - name: Upload baseline analysis + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.baseline-analysis }} + path: ${{ matrix.baseline-analysis }} + + - name: Upload target analysis + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.target-analysis }} + path: ${{ matrix.target-analysis }} + + - name: Generate summary file (PR only) + if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes' + run: > + python helper.py comment-body + ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} + ${{ matrix.baseline-sha }} ${{ matrix.target-sha }} + ${{ github.event.pull_request.number }} + + - name: Upload summary file (PR only) + if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes' + uses: actions/upload-artifact@v3 + with: + name: .pr-comment.json + path: .pr-comment.json + + - name: Verify zero changes (PR only) + if: matrix.mode == 'assert-no-changes' + run: > + diff-shades compare --check ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} + || (echo "Please verify you didn't change the stable code style unintentionally!" && exit 1) + + - name: Check for failed files for target revision + # Even if the previous step failed, we should still check for failed files. + if: always() + run: > + diff-shades show-failed --check --show-log ${{ matrix.target-analysis }} diff --git a/.github/workflows/diff_shades_comment.yml b/.github/workflows/diff_shades_comment.yml new file mode 100644 index 00000000000..a5d213875c7 --- /dev/null +++ b/.github/workflows/diff_shades_comment.yml @@ -0,0 +1,49 @@ +name: diff-shades-comment + +on: + workflow_run: + workflows: [diff-shades] + types: [completed] + +permissions: + pull-requests: write + +jobs: + comment: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install support dependencies + run: | + python -m pip install pip --upgrade + python -m pip install click packaging urllib3 + + - name: Get details from initial workflow run + id: metadata + env: + GITHUB_TOKEN: ${{ github.token }} + run: > + python scripts/diff_shades_gha_helper.py comment-details + ${{github.event.workflow_run.id }} + + - name: Try to find pre-existing PR comment + if: steps.metadata.outputs.needs-comment == 'true' + id: find-comment + uses: peter-evans/find-comment@1769778a0c5bd330272d749d12c036d65e70d39d + with: + issue-number: ${{ steps.metadata.outputs.pr-number }} + comment-author: "github-actions[bot]" + body-includes: "diff-shades" + + - name: Create or update PR comment + if: steps.metadata.outputs.needs-comment == 'true' + uses: peter-evans/create-or-update-comment@c9fcb64660bc90ec1cc535646af190c992007c32 + with: + comment-id: ${{ steps.find-comment.outputs.comment-id }} + issue-number: ${{ steps.metadata.outputs.pr-number }} + body: ${{ steps.metadata.outputs.comment-body }} + edit-mode: replace diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 6023a02a7f7..fc94dea62d9 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -1,36 +1,38 @@ -name: Documentation Build - -on: - push: - paths: - - "docs/**" - - "README.md" - - "CHANGES.md" - - "CONTRIBUTING.md" - pull_request: - paths: - - "docs/**" - - "README.md" - - "CHANGES.md" - - "CONTRIBUTING.md" +name: Documentation + +on: [push, pull_request] + +permissions: + contents: read jobs: build: - runs-on: ubuntu-latest + # We want to run on external PRs, but not on our own internal PRs as they'll be run + # by the push to the branch. Without this if check, checks are duplicated since + # internal PRs match both the push and pull_request events. + if: + github.event_name == 'push' || github.event.pull_request.head.repo.full_name != + github.repository + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest] + runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - name: Set up latest Python + uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: "*" - name: Install dependencies run: | python -m pip install --upgrade pip setuptools wheel - python -m pip install -e "." + python -m pip install -e ".[d]" python -m pip install -r "docs/requirements.txt" - name: Build documentation - run: sphinx-build -a -b html -W docs/ docs/_build/ + run: sphinx-build -a -b html -W --keep-going docs/ docs/_build diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 00000000000..a3106d04aae --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,56 @@ +name: docker + +on: + push: + branches: + - "main" + release: + types: [published] + +permissions: + contents: read + +jobs: + docker: + if: github.repository == 'psf/black' + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Check + set version tag + run: + echo "GIT_TAG=$(git describe --candidates=0 --tags 2> /dev/null || echo + latest_non_release)" >> $GITHUB_ENV + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: . + platforms: linux/amd64,linux/arm64 + push: true + tags: pyfound/black:latest,pyfound/black:${{ env.GIT_TAG }} + + - name: Build and push latest_release tag + if: ${{ github.event_name == 'release' && github.event.action == 'published' }} + uses: docker/build-push-action@v3 + with: + context: . + platforms: linux/amd64,linux/arm64 + push: true + tags: pyfound/black:latest_release + + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 92caa0fd5c1..ebb8a9fda9e 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -2,30 +2,41 @@ name: Fuzz on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + +permissions: + contents: read + jobs: build: + # We want to run on external PRs, but not on our own internal PRs as they'll be run + # by the push to the branch. Without this if check, checks are duplicated since + # internal PRs match both the push and pull_request events. + if: + github.event_name == 'push' || github.event.pull_request.head.repo.full_name != + github.repository + runs-on: ubuntu-latest strategy: fail-fast: false matrix: - python-version: [3.6, 3.7, 3.8] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install --upgrade coverage - python -m pip install --upgrade hypothesmith - python -m pip install -e ".[d]" + python -m pip install --upgrade tox - name: Run fuzz tests run: | - coverage run fuzz.py - coverage report + tox -e fuzz diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fa7286eec1f..90c48013080 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -4,24 +4,32 @@ on: [push, pull_request] jobs: build: + # We want to run on external PRs, but not on our own internal PRs as they'll be run + # by the push to the branch. Without this if check, checks are duplicated since + # internal PRs match both the push and pull_request events. + if: + github.event_name == 'push' || github.event.pull_request.head.repo.full_name != + github.repository + runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.7] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + - name: Set up latest Python + uses: actions/setup-python@v4 with: - python-version: ${{ matrix.python-version }} + python-version: "*" - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install --upgrade pre-commit python -m pip install -e '.[d]' + python -m pip install tox + + - name: Run pre-commit hooks + uses: pre-commit/action@v3.0.0 - - name: Lint - run: pre-commit run --all-files --show-diff-on-failure + - name: Format ourselves + run: | + tox -e run_self diff --git a/.github/workflows/primer.yml b/.github/workflows/primer.yml deleted file mode 100644 index b5dea5e7139..00000000000 --- a/.github/workflows/primer.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Primer - -on: [push, pull_request] - -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - python-version: [3.6, 3.7, 3.8] - os: [ubuntu-latest, macOS-latest, windows-latest] - - steps: - - uses: actions/checkout@v2 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - python -m pip install -e ".[d]" - - - name: Primer run - env: - pythonioencoding: utf-8 - run: | - black-primer diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml new file mode 100644 index 00000000000..76bcd33b55e --- /dev/null +++ b/.github/workflows/pypi_upload.yml @@ -0,0 +1,93 @@ +name: Publish to PyPI + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + main: + name: sdist + pure wheel + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Set up latest Python + uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install latest pip, build, twine + run: | + python -m pip install --upgrade --disable-pip-version-check pip + python -m pip install --upgrade build twine + + - name: Build wheel and source distributions + run: python -m build + + - name: Upload to PyPI via Twine + env: + TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} + run: twine upload --verbose -u '__token__' dist/* + + mypyc: + name: mypyc wheels (${{ matrix.name }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + name: linux-x86_64 + - os: windows-2019 + name: windows-amd64 + - os: macos-11 + name: macos-x86_64 + macos_arch: "x86_64" + - os: macos-11 + name: macos-arm64 + macos_arch: "arm64" + - os: macos-11 + name: macos-universal2 + macos_arch: "universal2" + + steps: + - uses: actions/checkout@v3 + + - name: Build wheels via cibuildwheel + uses: pypa/cibuildwheel@v2.10.2 + env: + CIBW_ARCHS_MACOS: "${{ matrix.macos_arch }}" + + - name: Upload wheels as workflow artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.name }}-mypyc-wheels + path: ./wheelhouse/*.whl + + - name: Upload wheels to PyPI via Twine + env: + TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} + run: pipx run twine upload --verbose -u '__token__' wheelhouse/*.whl + + update-stable-branch: + name: Update stable branch + needs: [main, mypyc] + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout stable branch + uses: actions/checkout@v3 + with: + ref: stable + fetch-depth: 0 + + - name: Update stable branch to release tag & push + run: | + git reset --hard ${{ github.event.release.tag_name }} + git push diff --git a/.github/workflows/test-311.yml b/.github/workflows/test-311.yml new file mode 100644 index 00000000000..c2da2465ad5 --- /dev/null +++ b/.github/workflows/test-311.yml @@ -0,0 +1,57 @@ +name: Test 3.11 without aiohttp extensions + +on: + push: + paths-ignore: + - "docs/**" + - "*.md" + + pull_request: + paths-ignore: + - "docs/**" + - "*.md" + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: true + +jobs: + main: + # We want to run on external PRs, but not on our own internal PRs as they'll be run + # by the push to the branch. Without this if check, checks are duplicated since + # internal PRs match both the push and pull_request events. + if: + github.event_name == 'push' || github.event.pull_request.head.repo.full_name != + github.repository + + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + python-version: ["3.11.0-rc - 3.11"] + os: [ubuntu-latest, macOS-latest, windows-latest] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install tox + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade tox + + - name: Run tests via tox + run: | + python -m tox -e 311 + + - name: Format ourselves + run: | + python -m pip install . + python -m black --check src/ diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bd0af61e7f5..372d1fd5d38 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,30 +1,106 @@ name: Test -on: [push, pull_request] +on: + push: + paths-ignore: + - "docs/**" + - "*.md" + + pull_request: + paths-ignore: + - "docs/**" + - "*.md" + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: true jobs: - build: + main: + # We want to run on external PRs, but not on our own internal PRs as they'll be run + # by the push to the branch. Without this if check, checks are duplicated since + # internal PRs match both the push and pull_request events. + if: + github.event_name == 'push' || github.event.pull_request.head.repo.full_name != + github.repository + runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - python-version: [3.6, 3.7, 3.8] + python-version: ["3.7", "3.8", "3.9", "3.10", "pypy-3.7", "pypy-3.8"] os: [ubuntu-latest, macOS-latest, windows-latest] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies + - name: Install tox run: | python -m pip install --upgrade pip - python -m pip install --upgrade coverage - python -m pip install -e ".[d]" + python -m pip install --upgrade tox - name: Unit tests + if: "!startsWith(matrix.python-version, 'pypy')" + run: tox -e ci-py -- -v --color=yes + + - name: Unit tests (pypy) + if: "startsWith(matrix.python-version, 'pypy')" + run: tox -e ci-pypy3 -- -v --color=yes + + - name: Upload coverage to Coveralls + # Upload coverage if we are on the main repository and + # we're running on Linux (this action only supports Linux) + if: github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' + uses: AndreMiras/coveralls-python-action@v20201129 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + parallel: true + flag-name: py${{ matrix.python-version }}-${{ matrix.os }} + debug: true + + coveralls-finish: + needs: main + if: github.repository == 'psf/black' + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Send finished signal to Coveralls + uses: AndreMiras/coveralls-python-action@v20201129 + with: + parallel-finished: true + debug: true + + uvloop: + if: + github.event_name == 'push' || github.event.pull_request.head.repo.full_name != + github.repository + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macOS-latest] + + steps: + - uses: actions/checkout@v3 + + - name: Set up latest Python + uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install black with uvloop run: | - coverage run -m unittest + python -m pip install pip --upgrade --disable-pip-version-check + python -m pip install -e ".[uvloop]" + + - name: Format ourselves + run: python -m black --check src/ diff --git a/.github/workflows/upload_binary.yml b/.github/workflows/upload_binary.yml new file mode 100644 index 00000000000..22535a64c67 --- /dev/null +++ b/.github/workflows/upload_binary.yml @@ -0,0 +1,63 @@ +name: Publish executables + +on: + release: + types: [published] + +permissions: + contents: write # actions/upload-release-asset needs this. + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [windows-2019, ubuntu-20.04, macos-latest] + include: + - os: windows-2019 + pathsep: ";" + asset_name: black_windows.exe + executable_mime: "application/vnd.microsoft.portable-executable" + - os: ubuntu-20.04 + pathsep: ":" + asset_name: black_linux + executable_mime: "application/x-executable" + - os: macos-latest + pathsep: ":" + asset_name: black_macos + executable_mime: "application/x-mach-binary" + + steps: + - uses: actions/checkout@v3 + + - name: Set up latest Python + uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install Black and PyInstaller + run: | + python -m pip install --upgrade pip wheel + python -m pip install .[colorama] + python -m pip install pyinstaller + + - name: Build executable with PyInstaller + run: > + python -m PyInstaller -F --name ${{ matrix.asset_name }} --add-data + 'src/blib2to3${{ matrix.pathsep }}blib2to3' src/black/__main__.py + + - name: Quickly test executable + run: | + ./dist/${{ matrix.asset_name }} --version + ./dist/${{ matrix.asset_name }} src --verbose + + - name: Upload binary as release asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ github.event.release.upload_url }} + asset_path: dist/${{ matrix.asset_name }} + asset_name: ${{ matrix.asset_name }} + asset_content_type: ${{ matrix.executable_mime }} diff --git a/.gitignore b/.gitignore index 6b94cacd183..249499b135e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,17 +1,26 @@ +.venv .coverage +.coverage.* _build .DS_Store .vscode docs/_static/pypi.svg .tox __pycache__ + +# Packaging artifacts black.egg-info +black.dist-info build/ dist/ pip-wheel-metadata/ +.eggs + src/_black_version.py .idea -.eggs + .dmypy.json *.swp .hypothesis/ +venv/ +.ipynb_checkpoints/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 667b22d6328..0be8dc42890 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,30 +1,65 @@ # Note: don't use this config for your own repositories. Instead, see -# "Version control integration" in README.md. +# "Version control integration" in docs/integrations/source_version_control.md exclude: ^(src/blib2to3/|profiling/|tests/data/) repos: - repo: local hooks: - - id: black - name: black - language: system - entry: black - require_serial: true - types: [python] + - id: check-pre-commit-rev-in-example + name: Check pre-commit rev in example + language: python + entry: python -m scripts.check_pre_commit_rev_in_example + files: '(CHANGES\.md|source_version_control\.md)$' + additional_dependencies: + &version_check_dependencies [ + commonmark==0.9.1, + pyyaml==5.4.1, + beautifulsoup4==4.9.3, + ] - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.1 + - id: check-version-in-the-basics-example + name: Check black version in the basics example + language: python + entry: python -m scripts.check_version_in_basics_example + files: '(CHANGES\.md|the_basics\.md)$' + additional_dependencies: *version_check_dependencies + + - repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 hooks: - id: flake8 - additional_dependencies: [flake8-bugbear] + additional_dependencies: + - flake8-bugbear + - flake8-comprehensions + - flake8-simplify - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.770 + rev: v0.971 hooks: - id: mypy exclude: ^docs/conf.py + additional_dependencies: + - types-dataclasses >= 0.1.3 + - types-PyYAML + - tomli >= 0.2.6, < 2.0.0 + - types-typed-ast >= 1.4.1 + - click >= 8.1.0 + - platformdirs >= 2.1.0 + - pytest + - hypothesis - - repo: https://github.com/prettier/prettier - rev: 1.19.1 + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.7.1 hooks: - id: prettier - args: [--prose-wrap=always, --print-width=88] + exclude: \.github/workflows/diff_shades\.yml + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace diff --git a/.pre-commit-hooks.yaml b/.pre-commit-hooks.yaml index c59213e2632..137957045a6 100644 --- a/.pre-commit-hooks.yaml +++ b/.pre-commit-hooks.yaml @@ -3,6 +3,16 @@ description: "Black: The uncompromising Python code formatter" entry: black language: python - language_version: python3 + minimum_pre_commit_version: 2.9.2 require_serial: true - types: [python] + types_or: [python, pyi] +- id: black-jupyter + name: black-jupyter + description: + "Black: The uncompromising Python code formatter (with Jupyter Notebook support)" + entry: black + language: python + minimum_pre_commit_version: 2.9.2 + require_serial: true + types_or: [python, pyi, jupyter] + additional_dependencies: [".[jupyter]"] diff --git a/.prettierrc.yaml b/.prettierrc.yaml new file mode 100644 index 00000000000..beda5ba4da8 --- /dev/null +++ b/.prettierrc.yaml @@ -0,0 +1,3 @@ +proseWrap: always +printWidth: 88 +endOfLine: auto diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000000..fff2d6ed341 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,18 @@ +version: 2 + +formats: + - htmlzip + +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +python: + install: + - requirements: docs/requirements.txt + + - method: pip + path: . + extra_requirements: + - d diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 00000000000..f2d599dd878 --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,194 @@ +# Authors + +Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). + +Maintained with: + +- [Carol Willing](mailto:carolcode@willingconsulting.com) +- [Carl Meyer](mailto:carl@oddbird.net) +- [Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com) +- [Mika Naylor](mailto:mail@autophagy.io) +- [Zsolt Dollenstein](mailto:zsol.zsol@gmail.com) +- [Cooper Lees](mailto:me@cooperlees.com) +- Richard Si +- [Felix Hildén](mailto:felix.hilden@gmail.com) +- [Batuhan Taskaya](mailto:batuhan@python.org) + +Multiple contributions by: + +- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) +- [Adam Johnson](mailto:me@adamj.eu) +- [Adam Williamson](mailto:adamw@happyassassin.net) +- [Alexander Huynh](mailto:ahrex-gh-psf-black@e.sc) +- [Alexandr Artemyev](mailto:mogost@gmail.com) +- [Alex Vandiver](mailto:github@chmrr.net) +- [Allan Simon](mailto:allan.simon@supinfo.com) +- Anders-Petter Ljungquist +- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) +- [Andrew Zhou](mailto:andrewfzhou@gmail.com) +- [Andrey](mailto:dyuuus@yandex.ru) +- [Andy Freeland](mailto:andy@andyfreeland.net) +- [Anthony Sottile](mailto:asottile@umich.edu) +- [Antonio Ossa Guerra](mailto:aaossa+black@uc.cl) +- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) +- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) +- [Artem Malyshev](mailto:proofit404@gmail.com) +- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) +- [Augie Fackler](mailto:raf@durin42.com) +- [Aviskar KC](mailto:aviskarkc10@gmail.com) +- Batuhan Taşkaya +- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) +- [Benjamin Woodruff](mailto:github@benjam.info) +- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) +- [Brandt Bucher](mailto:brandtbucher@gmail.com) +- [Brett Cannon](mailto:brett@python.org) +- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) +- [Bryan Forbes](mailto:bryan@reigndropsfall.net) +- [Calum Lind](mailto:calumlind@gmail.com) +- [Charles](mailto:peacech@gmail.com) +- Charles Reid +- [Christian Clauss](mailto:cclauss@bluewin.ch) +- [Christian Heimes](mailto:christian@python.org) +- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) +- [Chris Rose](mailto:offline@offby1.net) +- Codey Oxley +- [Cong](mailto:congusbongus@gmail.com) +- [Cooper Ry Lees](mailto:me@cooperlees.com) +- [Dan Davison](mailto:dandavison7@gmail.com) +- [Daniel Hahler](mailto:github@thequod.de) +- [Daniel M. Capella](mailto:polycitizen@gmail.com) +- Daniele Esposti +- [David Hotham](mailto:david.hotham@metaswitch.com) +- [David Lukes](mailto:dafydd.lukes@gmail.com) +- [David Szotten](mailto:davidszotten@gmail.com) +- [Denis Laxalde](mailto:denis@laxalde.org) +- [Douglas Thor](mailto:dthor@transphormusa.com) +- dylanjblack +- [Eli Treuherz](mailto:eli@treuherz.com) +- [Emil Hessman](mailto:emil@hessman.se) +- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) +- [Florent Thiery](mailto:fthiery@gmail.com) +- Francisco +- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) +- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) +- [Gregory P. Smith](mailto:greg@krypto.org) +- Gustavo Camargo +- hauntsaninja +- [Hadi Alqattan](mailto:alqattanhadizaki@gmail.com) +- [Hassan Abouelela](mailto:hassan@hassanamr.com) +- [Heaford](mailto:dan@heaford.com) +- [Hugo Barrera](mailto::hugo@barrera.io) +- Hugo van Kemenade +- [Hynek Schlawack](mailto:hs@ox.cx) +- [Ionite](mailto:dev@ionite.io) +- [Ivan Katanić](mailto:ivan.katanic@gmail.com) +- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) +- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) +- [Jan Hnátek](mailto:jan.hnatek@gmail.com) +- [Jason Fried](mailto:me@jasonfried.info) +- [Jason Friedland](mailto:jason@friedland.id.au) +- [jgirardet](mailto:ijkl@netc.fr) +- Jim Brännlund +- [Jimmy Jia](mailto:tesrin@gmail.com) +- [Joe Antonakakis](mailto:jma353@cornell.edu) +- [Jon Dufresne](mailto:jon.dufresne@gmail.com) +- [Jonas Obrist](mailto:ojiidotch@gmail.com) +- [Jonty Wareing](mailto:jonty@jonty.co.uk) +- [Jose Nazario](mailto:jose.monkey.org@gmail.com) +- [Joseph Larson](mailto:larson.joseph@gmail.com) +- [Josh Bode](mailto:joshbode@fastmail.com) +- [Josh Holland](mailto:anowlcalledjosh@gmail.com) +- [Joshua Cannon](mailto:joshdcannon@gmail.com) +- [José Padilla](mailto:jpadilla@webapplicate.com) +- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) +- [kaiix](mailto:kvn.hou@gmail.com) +- [Katie McLaughlin](mailto:katie@glasnt.com) +- Katrin Leinweber +- [Keith Smiley](mailto:keithbsmiley@gmail.com) +- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) +- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) +- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) +- [Kyle Sunden](mailto:sunden@wisc.edu) +- Lawrence Chan +- [Linus Groh](mailto:mail@linusgroh.de) +- [Loren Carvalho](mailto:comradeloren@gmail.com) +- [Luka Sterbic](mailto:luka.sterbic@gmail.com) +- [LukasDrude](mailto:mail@lukas-drude.de) +- Mahmoud Hossam +- Mariatta +- [Matt VanEseltine](mailto:vaneseltine@gmail.com) +- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) +- [Matthew Walster](mailto:matthew@walster.org) +- Max Smolens +- [Michael Aquilina](mailto:michaelaquilina@gmail.com) +- [Michael Flaxman](mailto:michael.flaxman@gmail.com) +- [Michael J. Sullivan](mailto:sully@msully.net) +- [Michael McClimon](mailto:michael@mcclimon.org) +- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) +- [Mike](mailto:roshi@fedoraproject.org) +- [mikehoyio](mailto:mikehoy@gmail.com) +- [Min ho Kim](mailto:minho42@gmail.com) +- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) +- MomIsBestFriend +- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) +- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) +- [Neraste](mailto:neraste.herr10@gmail.com) +- [Nikolaus Waxweiler](mailto:madigens@gmail.com) +- [Ofek Lev](mailto:ofekmeister@gmail.com) +- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) +- [otstrel](mailto:otstrel@gmail.com) +- [Pablo Galindo](mailto:Pablogsal@gmail.com) +- [Paul Ganssle](mailto:p.ganssle@gmail.com) +- [Paul Meinhardt](mailto:mnhrdt@gmail.com) +- [Peter Bengtsson](mailto:mail@peterbe.com) +- [Peter Grayson](mailto:pete@jpgrayson.net) +- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) +- pmacosta +- [Quentin Pradet](mailto:quentin@pradet.me) +- [Ralf Schmitt](mailto:ralf@systemexit.de) +- [Ramón Valles](mailto:mroutis@protonmail.com) +- [Richard Fearn](mailto:richardfearn@gmail.com) +- [Rishikesh Jha](mailto:rishijha424@gmail.com) +- [Rupert Bedford](mailto:rupert@rupertb.com) +- Russell Davis +- [Sagi Shadur](mailto:saroad2@gmail.com) +- [Rémi Verschelde](mailto:rverschelde@gmail.com) +- [Sami Salonen](mailto:sakki@iki.fi) +- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) +- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) +- Sergi +- [Scott Stevenson](mailto:scott@stevenson.io) +- Shantanu +- [shaoran](mailto:shaoran@sakuranohana.org) +- [Shinya Fujino](mailto:shf0811@gmail.com) +- springstan +- [Stavros Korokithakis](mailto:hi@stavros.io) +- [Stephen Rosen](mailto:sirosen@globus.org) +- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) +- [Sunil Kapil](mailto:snlkapil@gmail.com) +- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) +- [Tal Amuyal](mailto:TalAmuyal@gmail.com) +- [Terrance](mailto:git@terrance.allofti.me) +- [Thom Lu](mailto:thomas.c.lu@gmail.com) +- [Thomas Grainger](mailto:tagrain@gmail.com) +- [Tim Gates](mailto:tim.gates@iress.com) +- [Tim Swast](mailto:swast@google.com) +- [Timo](mailto:timo_tk@hotmail.com) +- Toby Fleming +- [Tom Christie](mailto:tom@tomchristie.com) +- [Tony Narlock](mailto:tony@git-pull.com) +- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) +- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) +- [Tzu-ping Chung](mailto:uranusjr@gmail.com) +- [Utsav Shah](mailto:ukshah2@illinois.edu) +- utsav-dbx +- vezeli +- [Ville Skyttä](mailto:ville.skytta@iki.fi) +- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) +- [Vlad Emelianov](mailto:volshebnyi@gmail.com) +- [williamfzc](mailto:178894043@qq.com) +- [wouter bolsterlee](mailto:wouter@bolsterl.ee) +- Yazdan +- [Yngve Høiseth](mailto:yngve@hoiseth.net) +- [Yurii Karabas](mailto:1998uriyyo@gmail.com) +- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) diff --git a/CHANGES.md b/CHANGES.md index 11341779f58..4ff181674b1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,15 +1,656 @@ -## Change Log +# Change Log -### 20.8b1 +## Unreleased -#### _Packaging_ +### Highlights + + + +- Runtime support for Python 3.6 has been removed. Formatting 3.6 code will still be + supported until further notice. + +### Stable style + + + +- Fix a crash when `# fmt: on` is used on a different block level than `# fmt: off` + (#3281) + +### Preview style + + + +- Fix a crash when formatting some dicts with parenthesis-wrapped long string keys + (#3262) + +### Configuration + + + +- `.ipynb_checkpoints` directories are now excluded by default (#3293) + +### Packaging + + + +- Executables made with PyInstaller will no longer crash when formatting several files + at once on macOS. Native x86-64 executables for macOS are available once again. + (#3275) +- Hatchling is now used as the build backend. This will not have any effect for users + who install Black with its wheels from PyPI. (#3233) +- Faster compiled wheels are now available for CPython 3.11 (#3276) + +### Parser + + + +### Performance + + + +### Output + + + +### _Blackd_ + +- Windows style (CRLF) newlines will be preserved (#3257). + +### Integrations + + + +- Update GitHub Action to support formatting of Jupyter Notebook files via a `jupyter` + option (#3282) +- Update GitHub Action to support use of version specifiers (e.g. `<23`) for Black + version (#3265) + +### Documentation + + + +## 22.8.0 + +### Highlights + +- Python 3.11 is now supported, except for _blackd_ as aiohttp does not support 3.11 as + of publishing (#3234) +- This is the last release that supports running _Black_ on Python 3.6 (formatting 3.6 + code will continue to be supported until further notice) +- Reword the stability policy to say that we may, in rare cases, make changes that + affect code that was not previously formatted by _Black_ (#3155) + +### Stable style + +- Fix an infinite loop when using `# fmt: on/off` in the middle of an expression or code + block (#3158) +- Fix incorrect handling of `# fmt: skip` on colon (`:`) lines (#3148) +- Comments are no longer deleted when a line had spaces removed around power operators + (#2874) + +### Preview style + +- Single-character closing docstring quotes are no longer moved to their own line as + this is invalid. This was a bug introduced in version 22.6.0. (#3166) +- `--skip-string-normalization` / `-S` now prevents docstring prefixes from being + normalized as expected (#3168) +- When using `--skip-magic-trailing-comma` or `-C`, trailing commas are stripped from + subscript expressions with more than 1 element (#3209) +- Implicitly concatenated strings inside a list, set, or tuple are now wrapped inside + parentheses (#3162) +- Fix a string merging/split issue when a comment is present in the middle of implicitly + concatenated strings on its own line (#3227) + +### _Blackd_ + +- `blackd` now supports enabling the preview style via the `X-Preview` header (#3217) + +### Configuration + +- Black now uses the presence of debug f-strings to detect target version (#3215) +- Fix misdetection of project root and verbose logging of sources in cases involving + `--stdin-filename` (#3216) +- Immediate `.gitignore` files in source directories given on the command line are now + also respected, previously only `.gitignore` files in the project root and + automatically discovered directories were respected (#3237) + +### Documentation + +- Recommend using BlackConnect in IntelliJ IDEs (#3150) + +### Integrations + +- Vim plugin: prefix messages with `Black: ` so it's clear they come from Black (#3194) +- Docker: changed to a /opt/venv installation + added to PATH to be available to + non-root users (#3202) +- Vim plugin: add flag (`g:black_preview`) to enable/disable the preview style (#3246) + +### Output + +- Change from deprecated `asyncio.get_event_loop()` to create our event loop which + removes DeprecationWarning (#3164) +- Remove logging from internal `blib2to3` library since it regularly emits error logs + about failed caching that can and should be ignored (#3193) + +### Parser + +- Type comments are now included in the AST equivalence check consistently so accidental + deletion raises an error. Though type comments can't be tracked when running on PyPy + 3.7 due to standard library limitations. (#2874) + +### Performance + +- Reduce Black's startup time when formatting a single file by 15-30% (#3211) + +## 22.6.0 + +### Style + +- Fix unstable formatting involving `#fmt: skip` and `# fmt:skip` comments (notice the + lack of spaces) (#2970) + +### Preview style + +- Docstring quotes are no longer moved if it would violate the line length limit (#3044) +- Parentheses around return annotations are now managed (#2990) +- Remove unnecessary parentheses around awaited objects (#2991) +- Remove unnecessary parentheses in `with` statements (#2926) +- Remove trailing newlines after code block open (#3035) + +### Integrations + +- Add `scripts/migrate-black.py` script to ease introduction of Black to a Git project + (#3038) + +### Output + +- Output Python version and implementation as part of `--version` flag (#2997) + +### Packaging + +- Use `tomli` instead of `tomllib` on Python 3.11 builds where `tomllib` is not + available (#2987) + +### Parser + +- [PEP 654](https://peps.python.org/pep-0654/#except) syntax (for example, + `except *ExceptionGroup:`) is now supported (#3016) +- [PEP 646](https://peps.python.org/pep-0646) syntax (for example, + `Array[Batch, *Shape]` or `def fn(*args: *T) -> None`) is now supported (#3071) + +### Vim Plugin + +- Fix `strtobool` function. It didn't parse true/on/false/off. (#3025) + +## 22.3.0 + +### Preview style + +- Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Remove unnecessary parentheses from `except` statements (#2939) +- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) +- Avoid magic-trailing-comma in single-element subscripts (#2942) + +### Configuration + +- Do not format `__pypackages__` directories by default (#2836) +- Add support for specifying stable version with `--required-version` (#2832). +- Avoid crashing when the user has no homedir (#2814) +- Avoid crashing when md5 is not available (#2905) +- Fix handling of directory junctions on Windows (#2904) + +### Documentation + +- Update pylint config documentation (#2931) + +### Integrations + +- Move test to disable plugin in Vim/Neovim, which speeds up loading (#2896) + +### Output + +- In verbose mode, log when _Black_ is using user-level config (#2861) + +### Packaging + +- Fix Black to work with Click 8.1.0 (#2966) +- On Python 3.11 and newer, use the standard library's `tomllib` instead of `tomli` + (#2903) +- `black-primer`, the deprecated internal devtool, has been removed and copied to a + [separate repository](https://github.com/cooperlees/black-primer) (#2924) + +### Parser + +- Black can now parse starred expressions in the target of `for` and `async for` + statements, e.g `for item in *items_1, *items_2: pass` (#2879). + +## 22.1.0 + +At long last, _Black_ is no longer a beta product! This is the first non-beta release +and the first release covered by our new +[stability policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy). + +### Highlights + +- **Remove Python 2 support** (#2740) +- Introduce the `--preview` flag (#2752) + +### Style + +- Deprecate `--experimental-string-processing` and move the functionality under + `--preview` (#2789) +- For stubs, one blank line between class attributes and methods is now kept if there's + at least one pre-existing blank line (#2736) +- Black now normalizes string prefix order (#2297) +- Remove spaces around power operators if both operands are simple (#2726) +- Work around bug that causes unstable formatting in some cases in the presence of the + magic trailing comma (#2807) +- Use parentheses for attribute access on decimal float and int literals (#2799) +- Don't add whitespace for attribute access on hexadecimal, binary, octal, and complex + literals (#2799) +- Treat blank lines in stubs the same inside top-level `if` statements (#2820) +- Fix unstable formatting with semicolons and arithmetic expressions (#2817) +- Fix unstable formatting around magic trailing comma (#2572) + +### Parser + +- Fix mapping cases that contain as-expressions, like `case {"key": 1 | 2 as password}` + (#2686) +- Fix cases that contain multiple top-level as-expressions, like `case 1 as a, 2 as b` + (#2716) +- Fix call patterns that contain as-expressions with keyword arguments, like + `case Foo(bar=baz as quux)` (#2749) +- Tuple unpacking on `return` and `yield` constructs now implies 3.8+ (#2700) +- Unparenthesized tuples on annotated assignments (e.g + `values: Tuple[int, ...] = 1, 2, 3`) now implies 3.8+ (#2708) +- Fix handling of standalone `match()` or `case()` when there is a trailing newline or a + comment inside of the parentheses. (#2760) +- `from __future__ import annotations` statement now implies Python 3.7+ (#2690) + +### Performance + +- Speed-up the new backtracking parser about 4X in general (enabled when + `--target-version` is set to 3.10 and higher). (#2728) +- _Black_ is now compiled with [mypyc](https://github.com/mypyc/mypyc) for an overall 2x + speed-up. 64-bit Windows, MacOS, and Linux (not including musl) are supported. (#1009, + #2431) + +### Configuration + +- Do not accept bare carriage return line endings in pyproject.toml (#2408) +- Add configuration option (`python-cell-magics`) to format cells with custom magics in + Jupyter Notebooks (#2744) +- Allow setting custom cache directory on all platforms with environment variable + `BLACK_CACHE_DIR` (#2739). +- Enable Python 3.10+ by default, without any extra need to specify + `--target-version=py310`. (#2758) +- Make passing `SRC` or `--code` mandatory and mutually exclusive (#2804) + +### Output + +- Improve error message for invalid regular expression (#2678) +- Improve error message when parsing fails during AST safety check by embedding the + underlying SyntaxError (#2693) +- No longer color diff headers white as it's unreadable in light themed terminals + (#2691) +- Text coloring added in the final statistics (#2712) +- Verbose mode also now describes how a project root was discovered and which paths will + be formatted. (#2526) + +### Packaging + +- All upper version bounds on dependencies have been removed (#2718) +- `typing-extensions` is no longer a required dependency in Python 3.10+ (#2772) +- Set `click` lower bound to `8.0.0` (#2791) + +### Integrations + +- Update GitHub action to support containerized runs (#2748) + +### Documentation + +- Change protocol in pip installation instructions to `https://` (#2761) +- Change HTML theme to Furo primarily for its responsive design and mobile support + (#2793) +- Deprecate the `black-primer` tool (#2809) +- Document Python support policy (#2819) + +## 21.12b0 + +### _Black_ + +- Fix determination of f-string expression spans (#2654) +- Fix bad formatting of error messages about EOF in multi-line statements (#2343) +- Functions and classes in blocks now have more consistent surrounding spacing (#2472) + +#### Jupyter Notebook support + +- Cell magics are now only processed if they are known Python cell magics. Earlier, all + cell magics were tokenized, leading to possible indentation errors e.g. with + `%%writefile`. (#2630) +- Fix assignment to environment variables in Jupyter Notebooks (#2642) + +#### Python 3.10 support + +- Point users to using `--target-version py310` if we detect 3.10-only syntax (#2668) +- Fix `match` statements with open sequence subjects, like `match a, b:` or + `match a, *b:` (#2639) (#2659) +- Fix `match`/`case` statements that contain `match`/`case` soft keywords multiple + times, like `match re.match()` (#2661) +- Fix `case` statements with an inline body (#2665) +- Fix styling of starred expressions inside `match` subject (#2667) +- Fix parser error location on invalid syntax in a `match` statement (#2649) +- Fix Python 3.10 support on platforms without ProcessPoolExecutor (#2631) +- Improve parsing performance on code that uses `match` under `--target-version py310` + up to ~50% (#2670) + +### Packaging + +- Remove dependency on `regex` (#2644) (#2663) + +## 21.11b1 + +### _Black_ + +- Bumped regex version minimum to 2021.4.4 to fix Pattern class usage (#2621) + +## 21.11b0 + +### _Black_ + +- Warn about Python 2 deprecation in more cases by improving Python 2 only syntax + detection (#2592) +- Add experimental PyPy support (#2559) +- Add partial support for the match statement. As it's experimental, it's only enabled + when `--target-version py310` is explicitly specified (#2586) +- Add support for parenthesized with (#2586) +- Declare support for Python 3.10 for running Black (#2562) + +### Integrations + +- Fixed vim plugin with Python 3.10 by removing deprecated distutils import (#2610) +- The vim plugin now parses `skip_magic_trailing_comma` from pyproject.toml (#2613) + +## 21.10b0 + +### _Black_ + +- Document stability policy, that will apply for non-beta releases (#2529) +- Add new `--workers` parameter (#2514) +- Fixed feature detection for positional-only arguments in lambdas (#2532) +- Bumped typed-ast version minimum to 1.4.3 for 3.10 compatibility (#2519) +- Fixed a Python 3.10 compatibility issue where the loop argument was still being passed + even though it has been removed (#2580) +- Deprecate Python 2 formatting support (#2523) + +### _Blackd_ + +- Remove dependency on aiohttp-cors (#2500) +- Bump required aiohttp version to 3.7.4 (#2509) + +### _Black-Primer_ + +- Add primer support for --projects (#2555) +- Print primer summary after individual failures (#2570) + +### Integrations + +- Allow to pass `target_version` in the vim plugin (#1319) +- Install build tools in docker file and use multi-stage build to keep the image size + down (#2582) + +## 21.9b0 + +### Packaging + +- Fix missing modules in self-contained binaries (#2466) +- Fix missing toml extra used during installation (#2475) + +## 21.8b0 + +### _Black_ + +- Add support for formatting Jupyter Notebook files (#2357) +- Move from `appdirs` dependency to `platformdirs` (#2375) +- Present a more user-friendly error if .gitignore is invalid (#2414) +- The failsafe for accidentally added backslashes in f-string expressions has been + hardened to handle more edge cases during quote normalization (#2437) +- Avoid changing a function return type annotation's type to a tuple by adding a + trailing comma (#2384) +- Parsing support has been added for unparenthesized walruses in set literals, set + comprehensions, and indices (#2447). +- Pin `setuptools-scm` build-time dependency version (#2457) +- Exclude typing-extensions version 3.10.0.1 due to it being broken on Python 3.10 + (#2460) + +### _Blackd_ + +- Replace sys.exit(-1) with raise ImportError as it plays more nicely with tools that + scan installed packages (#2440) + +### Integrations + +- The provided pre-commit hooks no longer specify `language_version` to avoid overriding + `default_language_version` (#2430) + +## 21.7b0 + +### _Black_ + +- Configuration files using TOML features higher than spec v0.5.0 are now supported + (#2301) +- Add primer support and test for code piped into black via STDIN (#2315) +- Fix internal error when `FORCE_OPTIONAL_PARENTHESES` feature is enabled (#2332) +- Accept empty stdin (#2346) +- Provide a more useful error when parsing fails during AST safety checks (#2304) + +### Docker + +- Add new `latest_release` tag automation to follow latest black release on docker + images (#2374) + +### Integrations + +- The vim plugin now searches upwards from the directory containing the current buffer + instead of the current working directory for pyproject.toml. (#1871) +- The vim plugin now reads the correct string normalization option in pyproject.toml + (#1869) +- The vim plugin no longer crashes Black when there's boolean values in pyproject.toml + (#1869) + +## 21.6b0 + +### _Black_ + +- Fix failure caused by `fmt: skip` and indentation (#2281) +- Account for += assignment when deciding whether to split string (#2312) +- Correct max string length calculation when there are string operators (#2292) +- Fixed option usage when using the `--code` flag (#2259) +- Do not call `uvloop.install()` when _Black_ is used as a library (#2303) +- Added `--required-version` option to require a specific version to be running (#2300) +- Fix incorrect custom breakpoint indices when string group contains fake f-strings + (#2311) +- Fix regression where `R` prefixes would be lowercased for docstrings (#2285) +- Fix handling of named escapes (`\N{...}`) when `--experimental-string-processing` is + used (#2319) + +### Integrations + +- The official Black action now supports choosing what version to use, and supports the + major 3 OSes. (#1940) + +## 21.5b2 + +### _Black_ + +- A space is no longer inserted into empty docstrings (#2249) +- Fix handling of .gitignore files containing non-ASCII characters on Windows (#2229) +- Respect `.gitignore` files in all levels, not only `root/.gitignore` file (apply + `.gitignore` rules like `git` does) (#2225) +- Restored compatibility with Click 8.0 on Python 3.6 when LANG=C used (#2227) +- Add extra uvloop install + import support if in python env (#2258) +- Fix --experimental-string-processing crash when matching parens are not found (#2283) +- Make sure to split lines that start with a string operator (#2286) +- Fix regular expression that black uses to identify f-expressions (#2287) + +### _Blackd_ + +- Add a lower bound for the `aiohttp-cors` dependency. Only 0.4.0 or higher is + supported. (#2231) + +### Packaging + +- Release self-contained x86_64 MacOS binaries as part of the GitHub release pipeline + (#2198) +- Always build binaries with the latest available Python (#2260) + +### Documentation + +- Add discussion of magic comments to FAQ page (#2272) +- `--experimental-string-processing` will be enabled by default in the future (#2273) +- Fix typos discovered by codespell (#2228) +- Fix Vim plugin installation instructions. (#2235) +- Add new Frequently Asked Questions page (#2247) +- Fix encoding + symlink issues preventing proper build on Windows (#2262) + +## 21.5b1 + +### _Black_ + +- Refactor `src/black/__init__.py` into many files (#2206) + +### Documentation + +- Replaced all remaining references to the + [`master`](https://github.com/psf/black/tree/main) branch with the + [`main`](https://github.com/psf/black/tree/main) branch. Some additional changes in + the source code were also made. (#2210) +- Sigificantly reorganized the documentation to make much more sense. Check them out by + heading over to [the stable docs on RTD](https://black.readthedocs.io/en/stable/). + (#2174) + +## 21.5b0 + +### _Black_ + +- Set `--pyi` mode if `--stdin-filename` ends in `.pyi` (#2169) +- Stop detecting target version as Python 3.9+ with pre-PEP-614 decorators that are + being called but with no arguments (#2182) + +### _Black-Primer_ + +- Add `--no-diff` to black-primer to suppress formatting changes (#2187) + +## 21.4b2 + +### _Black_ + +- Fix crash if the user configuration directory is inaccessible. (#2158) + +- Clarify + [circumstances](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) + in which _Black_ may change the AST (#2159) + +- Allow `.gitignore` rules to be overridden by specifying `exclude` in `pyproject.toml` + or on the command line. (#2170) + +### _Packaging_ + +- Install `primer.json` (used by `black-primer` by default) with black. (#2154) + +## 21.4b1 + +### _Black_ + +- Fix crash on docstrings ending with "\\ ". (#2142) + +- Fix crash when atypical whitespace is cleaned out of dostrings (#2120) + +- Reflect the `--skip-magic-trailing-comma` and `--experimental-string-processing` flags + in the name of the cache file. Without this fix, changes in these flags would not take + effect if the cache had already been populated. (#2131) + +- Don't remove necessary parentheses from assignment expression containing assert / + return statements. (#2143) + +### _Packaging_ + +- Bump pathspec to >= 0.8.1 to solve invalid .gitignore exclusion handling + +## 21.4b0 + +### _Black_ + +- Fixed a rare but annoying formatting instability created by the combination of + optional trailing commas inserted by `Black` and optional parentheses looking at + pre-existing "magic" trailing commas. This fixes issue #1629 and all of its many many + duplicates. (#2126) + +- `Black` now processes one-line docstrings by stripping leading and trailing spaces, + and adding a padding space when needed to break up """". (#1740) + +- `Black` now cleans up leading non-breaking spaces in comments (#2092) + +- `Black` now respects `--skip-string-normalization` when normalizing multiline + docstring quotes (#1637) + +- `Black` no longer removes all empty lines between non-function code and decorators + when formatting typing stubs. Now `Black` enforces a single empty line. (#1646) + +- `Black` no longer adds an incorrect space after a parenthesized assignment expression + in if/while statements (#1655) + +- Added `--skip-magic-trailing-comma` / `-C` to avoid using trailing commas as a reason + to split lines (#1824) + +- fixed a crash when PWD=/ on POSIX (#1631) + +- fixed "I/O operation on closed file" when using --diff (#1664) + +- Prevent coloured diff output being interleaved with multiple files (#1673) + +- Added support for PEP 614 relaxed decorator syntax on python 3.9 (#1711) + +- Added parsing support for unparenthesized tuples and yield expressions in annotated + assignments (#1835) + +- added `--extend-exclude` argument (PR #2005) + +- speed up caching by avoiding pathlib (#1950) + +- `--diff` correctly indicates when a file doesn't end in a newline (#1662) + +- Added `--stdin-filename` argument to allow stdin to respect `--force-exclude` rules + (#1780) + +- Lines ending with `fmt: skip` will now be not formatted (#1800) + +- PR #2053: Black no longer relies on typed-ast for Python 3.8 and higher + +- PR #2053: Python 2 support is now optional, install with + `python3 -m pip install black[python2]` to maintain support. + +- Exclude `venv` directory by default (#1683) + +- Fixed "Black produced code that is not equivalent to the source" when formatting + Python 2 docstrings (#2037) + +### _Packaging_ + +- Self-contained native _Black_ binaries are now provided for releases via GitHub + Releases (#1743) + +## 20.8b1 + +### _Packaging_ - explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions older than 7.0 -### 20.8b0 +## 20.8b0 -#### _Black_ +### _Black_ - re-implemented support for explicit trailing commas: now it works consistently within any bracket pair, including nested structures (#1288 and duplicates) @@ -53,11 +694,11 @@ this is an undocumented and unsupported feature, you lose Internet points for depending on it (#1609) -#### Vim plugin +### Vim plugin - prefer virtualenv packages over global packages (#1383) -### 19.10b0 +## 19.10b0 - added support for PEP 572 assignment expressions (#711) @@ -116,7 +757,7 @@ - `blackd` can now output the diff of formats on source code when the `X-Diff` header is provided (#969) -### 19.3b0 +## 19.3b0 - new option `--target-version` to control which Python versions _Black_-formatted code should target (#618) @@ -141,7 +782,7 @@ - `blackd` now supports CORS (#622) -### 18.9b0 +## 18.9b0 - numeric literals are now formatted by _Black_ (#452, #461, #464, #469): @@ -156,7 +797,9 @@ - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) -- added `blackd`, see [its documentation](#blackd) for more info (#349) +- added `blackd`, see + [its documentation](https://github.com/psf/black/blob/18.9b0/README.md#blackd) for + more info (#349) - adjacent string literals are now correctly split into multiple lines (#463) @@ -183,11 +826,11 @@ - note: the Vim plugin stopped registering `,=` as a default chord as it turned out to be a bad idea (#415) -### 18.6b4 +## 18.6b4 - hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) -### 18.6b3 +## 18.6b3 - typing stub files (`.pyi`) now have blank lines added after constants (#340) @@ -215,7 +858,7 @@ - fixed a crash due to symbolic links pointing outside of the project directory (#338) -### 18.6b2 +## 18.6b2 - added `--config` (#65) @@ -229,13 +872,13 @@ - fixed unnecessary slowdown in comment placement calculation on lines without comments -### 18.6b1 +## 18.6b1 - hotfix: don't output human-facing information on stdout (#299) - hotfix: don't output cake emoji on non-zero return code (#300) -### 18.6b0 +## 18.6b0 - added `--include` and `--exclude` (#270) @@ -253,7 +896,7 @@ - _Black_ now preserves line endings when formatting a file in place (#258) -### 18.5b1 +## 18.5b1 - added `--pyi` (#249) @@ -282,7 +925,7 @@ - fixed extra empty line between a function signature and an inner function or inner class (#196) -### 18.5b0 +## 18.5b0 - call chains are now formatted according to the [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) @@ -337,11 +980,11 @@ - fixed crash when dead symlinks where encountered -### 18.4a4 +## 18.4a4 - don't populate the cache on `--check` (#175) -### 18.4a3 +## 18.4a3 - added a "cache"; files already reformatted that haven't changed on disk won't be reformatted again (#109) @@ -369,7 +1012,7 @@ - fixed missing splits of ternary expressions (#141) -### 18.4a2 +## 18.4a2 - fixed parsing of unaligned standalone comments (#99, #112) @@ -380,7 +1023,7 @@ - fixed unstable formatting when encountering unnecessarily escaped quotes in a string (#120) -### 18.4a1 +## 18.4a1 - added `--quiet` (#78) @@ -392,7 +1035,7 @@ - fixed removing backslash escapes from raw strings (#100, #105) -### 18.4a0 +## 18.4a0 - added `--diff` (#87) @@ -417,7 +1060,7 @@ - only allow up to two empty lines on module level and only single empty lines within functions (#74) -### 18.3a4 +## 18.3a4 - `# fmt: off` and `# fmt: on` are implemented (#5) @@ -439,7 +1082,7 @@ [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) (#68) -### 18.3a3 +## 18.3a3 - don't remove single empty lines outside of bracketed expressions (#19) @@ -449,7 +1092,7 @@ - even better handling of numpy-style array indexing (#33, again) -### 18.3a2 +## 18.3a2 - changed positioning of binary operators to occur at beginning of lines instead of at the end, following @@ -473,7 +1116,7 @@ - fixed spurious space after star-based unary expressions (#31) -### 18.3a1 +## 18.3a1 - added `--check` @@ -492,10 +1135,10 @@ - fixed spurious space after unary operators when the operand was a complex expression (#15) -### 18.3a0 +## 18.3a0 - first published version, Happy 🍰 Day 2018! - alpha quality -- date-versioned (see: https://calver.org/) +- date-versioned (see: ) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0687aaeee52..10f60422f04 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,66 +3,8 @@ Welcome! Happy to see you willing to make the project better. Have you read the entire [user documentation](https://black.readthedocs.io/en/latest/) yet? -## Bird's eye view +Our [contributing documentation](https://black.readthedocs.org/en/latest/contributing/) +contains details on all you need to know about contributing to _Black_, the basics to +the internals of _Black_. -In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is -deliberate. - -Bug reports and fixes are always welcome! Please follow the -[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. - -Before you suggest a new feature or configuration knob, ask yourself why you want it. If -it enables better integration with some workflow, fixes an inconsistency, speeds things -up, and so on - go for it! On the other hand, if your answer is "because I don't like a -particular formatting" then you're not ready to embrace _Black_ yet. Such changes are -unlikely to get accepted. You can still try but prepare to be disappointed. - -## Technicalities - -Development on the latest version of Python is preferred. As of this writing it's 3.8. -You can use any operating system. I am using macOS myself and CentOS at work. - -Install all development dependencies using: - -```console -$ pipenv install --dev -$ pipenv shell -$ pre-commit install -``` - -If you haven't used `pipenv` before but are comfortable with virtualenvs, just run -`pip install pipenv` in the virtualenv you're already using and invoke the command above -from the cloned _Black_ repo. It will do the correct thing. - -Before submitting pull requests, run lints and tests with: - -```console -$ pre-commit run -a -$ python -m unittest -$ black-primer [-k -w /tmp/black_test_repos] -``` - -## black-primer - -`black-primer` is used by CI to pull down well-known _Black_ formatted projects and see -if we get source code changes. It will error on formatting changes or errors. Please run -before pushing your PR to see if you get the actions you would expect from _Black_ with -your PR. You may need to change -[primer.json](https://github.com/psf/black/blob/master/src/black_primer/primer.json) -configuration for it to pass. - -For more `black-primer` information visit the -[documentation](https://github.com/psf/black/blob/master/docs/black_primer.md). - -## Hygiene - -If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug, -run it again to confirm it's really fixed. - -If adding a new feature, add a test. In fact, always add a test. But wait, before adding -any large feature, first open an issue for us to discuss the idea first. - -## Finally - -Thanks again for your interest in improving the project! You're taking action when most -people decide to sit and watch. +We look forward to your contributions! diff --git a/Dockerfile b/Dockerfile index a03d23a1078..4e8f12f9798 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,19 @@ -FROM python:3 +FROM python:3-slim AS builder -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 +RUN mkdir /src +COPY . /src/ +ENV VIRTUAL_ENV=/opt/venv +RUN python -m venv $VIRTUAL_ENV +RUN . /opt/venv/bin/activate && pip install --no-cache-dir --upgrade pip setuptools wheel \ + # Install build tools to compile dependencies that don't have prebuilt wheels + && apt update && apt install -y git build-essential \ + && cd /src \ + && pip install --no-cache-dir .[colorama,d] -RUN pip install --upgrade --no-cache-dir black +FROM python:3-slim -ENTRYPOINT /usr/local/bin/black --check --diff . +# copy only Python packages to limit the image size +COPY --from=builder /opt/venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +CMD ["/opt/venv/bin/black"] diff --git a/README.md b/README.md index 20f6fa420b2..0ab059185f7 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,15 @@ -![Black Logo](https://raw.githubusercontent.com/psf/black/master/docs/_static/logo2-readme.png) +[![Black Logo](https://raw.githubusercontent.com/psf/black/main/docs/_static/logo2-readme.png)](https://black.readthedocs.io/en/stable/)

The Uncompromising Code Formatter

-Build Status Actions Status -Actions Status Documentation Status -Coverage Status -License: MIT +Coverage Status +License: MIT PyPI Downloads +conda-forge Code style: black

@@ -26,21 +25,12 @@ becomes transparent after a while and you can focus on the content instead. _Black_ makes code review faster by producing the smallest diffs possible. -Try it out now using the [Black Playground](https://black.now.sh). Watch the +Try it out now using the [Black Playground](https://black.vercel.app). Watch the [PyCon 2019 talk](https://youtu.be/esZLCuWs_2Y) to learn more. --- -_Contents:_ **[Installation and usage](#installation-and-usage)** | -**[Code style](#the-black-code-style)** | **[Pragmatism](#pragmatism)** | -**[pyproject.toml](#pyprojecttoml)** | **[Editor integration](#editor-integration)** | -**[blackd](#blackd)** | **[black-primer](#black-primer)** | -**[Version control integration](#version-control-integration)** | -**[GitHub Actions](#github-actions)** | -**[Ignoring unmodified files](#ignoring-unmodified-files)** | **[Used by](#used-by)** | -**[Testimonials](#testimonials)** | **[Show your style](#show-your-style)** | -**[Contributing](#contributing-to-black)** | **[Change log](#change-log)** | -**[Authors](#authors)** +**[Read the documentation on ReadTheDocs!](https://black.readthedocs.io/en/stable)** --- @@ -48,8 +38,12 @@ _Contents:_ **[Installation and usage](#installation-and-usage)** | ### Installation -_Black_ can be installed by running `pip install black`. It requires Python 3.6.0+ to -run but you can reformat Python 2 code with it, too. +_Black_ can be installed by running `pip install black`. It requires Python 3.7+ to run. +If you want to format Jupyter Notebooks, install with `pip install 'black[jupyter]'`. + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+https://github.com/psf/black` #### Install from GitHub @@ -71,376 +65,98 @@ You can run _Black_ as a package if running it as a script doesn't work: python -m black {source_file_or_directory} ``` -### Command line options - -_Black_ doesn't provide many options. You can list them by running `black --help`: - -```text -Usage: black [OPTIONS] [SRC]... - - The uncompromising code formatter. - -Options: - -c, --code TEXT Format the code passed in as a string. - -l, --line-length INTEGER How many characters per line to allow. - [default: 88] - - -t, --target-version [py27|py33|py34|py35|py36|py37|py38] - Python versions that should be supported by - Black's output. [default: per-file auto- - detection] - - --pyi Format all input files like typing stubs - regardless of file extension (useful when - piping source on standard input). - - -S, --skip-string-normalization - Don't normalize string quotes or prefixes. - --check Don't write the files back, just return the - status. Return code 0 means nothing would - change. Return code 1 means some files - would be reformatted. Return code 123 means - there was an internal error. - - --diff Don't write the files back, just output a - diff for each file on stdout. - - --color / --no-color Show colored diff. Only applies when - `--diff` is given. - - --fast / --safe If --fast given, skip temporary sanity - checks. [default: --safe] - - --include TEXT A regular expression that matches files and - directories that should be included on - recursive searches. An empty value means - all files are included regardless of the - name. Use forward slashes for directories - on all platforms (Windows, too). Exclusions - are calculated first, inclusions later. - [default: \.pyi?$] - - --exclude TEXT A regular expression that matches files and - directories that should be excluded on - recursive searches. An empty value means no - paths are excluded. Use forward slashes for - directories on all platforms (Windows, too). - Exclusions are calculated first, inclusions - later. [default: /(\.eggs|\.git|\.hg|\.mypy - _cache|\.nox|\.tox|\.venv|\.svn|_build|buck- - out|build|dist)/] - - --force-exclude TEXT Like --exclude, but files and directories - matching this regex will be excluded even - when they are passed explicitly as arguments - - -q, --quiet Don't emit non-error messages to stderr. - Errors are still emitted; silence those with - 2>/dev/null. - - -v, --verbose Also emit messages to stderr about files - that were not changed or were ignored due to - --exclude=. - - --version Show the version and exit. - --config FILE Read configuration from FILE path. - -h, --help Show this message and exit. -``` - -_Black_ is a well-behaved Unix-style command-line tool: - -- it does nothing if no sources are passed to it; -- it will read from standard input and write to standard output if `-` is used as the - filename; -- it only outputs messages to users on standard error; -- exits with code 0 unless an internal error occurred (or `--check` was used). - -### Using _Black_ with other tools - -While _Black_ enforces formatting that conforms to PEP 8, other tools may raise warnings -about _Black_'s changes or will overwrite _Black_'s changes. A good example of this is -[isort](https://pypi.org/p/isort). Since _Black_ is barely configurable, these tools -should be configured to neither warn about nor overwrite _Black_'s changes. - -Actual details on _Black_ compatible configurations for various tools can be found in -[compatible_configs](https://github.com/psf/black/blob/master/docs/compatible_configs.md). - -### Migrating your code style without ruining git blame - -A long-standing argument against moving to automated code formatters like _Black_ is -that the migration will clutter up the output of `git blame`. This was a valid argument, -but since Git version 2.23, Git natively supports -[ignoring revisions in blame](https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt) -with the `--ignore-rev` option. You can also pass a file listing the revisions to ignore -using the `--ignore-revs-file` option. The changes made by the revision will be ignored -when assigning blame. Lines modified by an ignored revision will be blamed on the -previous revision that modified those lines. - -So when migrating your project's code style to _Black_, reformat everything and commit -the changes (preferably in one massive commit). Then put the full 40 characters commit -identifier(s) into a file. - -``` -# Migrate code style to Black -5b4ab991dede475d393e9d69ec388fd6bd949699 -``` - -Afterwards, you can pass that file to `git blame` and see clean and meaningful blame -information. +Further information can be found in our docs: -```console -$ git blame important.py --ignore-revs-file .git-blame-ignore-revs -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 1) def very_important_function(text, file): -abdfd8b0 (Alice Doe 2019-09-23 11:39:32 -0400 2) text = text.lstrip() -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 3) with open(file, "r+") as f: -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 4) f.write(formatted) -``` +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) -You can even configure `git` to automatically ignore revisions listed in a file on every -call to `git blame`. +_Black_ is already [successfully used](https://github.com/psf/black#used-by) by many +projects, small and big. _Black_ has a comprehensive test suite, with efficient parallel +tests, and our own auto formatting and parallel Continuous Integration runner. Now that +we have become stable, you should not expect large formatting to changes in the future. +Stylistic changes will mostly be responses to bug reports and support for new Python +syntax. For more information please refer to the +[The Black Code Style](https://black.readthedocs.io/en/stable/the_black_code_style/index.html). -```console -$ git config blame.ignoreRevsFile .git-blame-ignore-revs -``` +Also, as a safety measure which slows down processing, _Black_ will check that the +reformatted code still produces a valid AST that is effectively equivalent to the +original (see the +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#ast-before-and-after-formatting) +section for details). If you're feeling confident, use `--fast`. -**The one caveat is that GitHub and GitLab do not yet support ignoring revisions using -their native UI of blame.** So blame information will be cluttered with a reformatting -commit on those platforms. (If you'd like this feature, there's an open issue for -[GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423) and please let GitHub -know!) +## The _Black_ code style -### NOTE: This is a beta product +_Black_ is a PEP 8 compliant opinionated formatter. _Black_ reformats entire files in +place. Style configuration options are deliberately limited and rarely added. It doesn't +take previous formatting into account (see +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) +for exceptions). -_Black_ is already [successfully used](#used-by) by many projects, small and big. It -also sports a decent test suite. However, it is still very new. Things will probably be -wonky for a while. This is made explicit by the "Beta" trove classifier, as well as by -the "b" in the version number. What this means for you is that **until the formatter -becomes stable, you should expect some formatting to change in the future**. That being -said, no drastic stylistic changes are planned, mostly responses to bug reports. +Our documentation covers the current _Black_ code style, but planned changes to it are +also documented. They're both worth taking a look: -Also, as a temporary safety measure, _Black_ will check that the reformatted code still -produces a valid AST that is equivalent to the original. This slows it down. If you're -feeling confident, use `--fast`. +- [The _Black_ Code Style: Current style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) +- [The _Black_ Code Style: Future style](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html) -## The _Black_ code style +Changes to the _Black_ code style are bound by the Stability Policy: -_Black_ is a PEP 8 compliant opinionated formatter. _Black_ reformats entire files in -place. It is not configurable. It doesn't take previous formatting into account. Your -main option of configuring _Black_ is that it doesn't reformat blocks that start with -`# fmt: off` and end with `# fmt: on`. `# fmt: on/off` have to be on the same level of -indentation. To learn more about _Black_'s opinions, to go -[the_black_code_style](https://github.com/psf/black/blob/master/docs/the_black_code_style.md). +- [The _Black_ Code Style: Stability Policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy) Please refer to this document before submitting an issue. What seems like a bug might be intended behaviour. -## Pragmatism +### Pragmatism Early versions of _Black_ used to be absolutist in some respects. They took after its initial author. This was fine at the time as it made the implementation simpler and there were not many users anyway. Not many edge cases were reported. As a mature tool, -_Black_ does make some exceptions to rules it otherwise holds. This -[section](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) -of `the_black_code_style` describes what those exceptions are and why this is the case. +_Black_ does make some exceptions to rules it otherwise holds. + +- [The _Black_ code style: Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) Please refer to this document before submitting an issue just like with the document above. What seems like a bug might be intended behaviour. -## pyproject.toml +## Configuration _Black_ is able to read project-specific default values for its command line options from a `pyproject.toml` file. This is especially useful for specifying custom -`--include` and `--exclude` patterns for your project. - -**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is -"No". _Black_ is all about sensible defaults. - -### What on Earth is a `pyproject.toml` file? - -[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a -configuration file to store build system requirements for Python projects. With the help -of tools like [Poetry](https://python-poetry.org/) or -[Flit](https://flit.readthedocs.io/en/latest/) it can fully replace the need for -`setup.py` and `setup.cfg` files. - -### Where _Black_ looks for the file - -By default _Black_ looks for `pyproject.toml` starting from the common base directory of -all files and directories passed on the command line. If it's not there, it looks in -parent directories. It stops looking when it finds the file, or a `.git` directory, or a -`.hg` directory, or the root of the file system, whichever comes first. - -If you're formatting standard input, _Black_ will look for configuration starting from -the current working directory. - -You can also explicitly specify the path to a particular file that you want with -`--config`. In this situation _Black_ will not look for any other file. - -If you're running with `--verbose`, you will see a blue message if a file was found and -used. - -Please note `blackd` will not use `pyproject.toml` configuration. - -### Configuration format - -As the file extension suggests, `pyproject.toml` is a -[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for -different tools. _Black_ is using the `[tool.black]` section. The option keys are the -same as long names of options on the command line. - -Note that you have to use single-quoted strings in TOML for regular expressions. It's -the equivalent of r-strings in Python. Multiline strings are treated as verbose regular -expressions by Black. Use `[ ]` to denote a significant space character. - -
-Example pyproject.toml - -```toml -[tool.black] -line-length = 88 -target-version = ['py37'] -include = '\.pyi?$' -exclude = ''' - -( - /( - \.eggs # exclude a few common directories in the - | \.git # root of the project - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | _build - | buck-out - | build - | dist - )/ - | foo.py # also separately exclude a file named foo.py in - # the root of the project -) -''' -``` - -
- -### Lookup hierarchy - -Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can -override those defaults. Finally, options provided by the user on the command line -override both. - -_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't -look for multiple files, and doesn't compose configuration from different levels of the -file hierarchy. +`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your +project. -## Editor integration +You can find more details in our documentation: -_Black_ can be integrated into many editors with plugins. They let you run _Black_ on -your code with the ease of doing it in your editor. To get started using _Black_ in your -editor of choice, please see -[editor_integration](https://github.com/psf/black/blob/master/docs/editor_integration.md). +- [The basics: Configuration via a file](https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-via-a-file) -Patches are welcome for editors without an editor integration or plugin! More -information can be found in -[editor_integration](https://github.com/psf/black/blob/master/docs/editor_integration.md#other-editors). +And if you're looking for more general configuration documentation: -## blackd +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) -`blackd` is a small HTTP server that exposes Black's functionality over a simple -protocol. The main benefit of using it is to avoid paying the cost of starting up a new -Black process every time you want to blacken a file. Please refer to -[blackd](https://github.com/psf/black/blob/master/docs/blackd.md) to get the ball -rolling. - -## black-primer - -`black-primer` is a tool built for CI (and humans) to have _Black_ `--check` a number of -(configured in `primer.json`) Git accessible projects in parallel. -[black_primer](https://github.com/psf/black/blob/master/docs/black_primer.md) has more -information regarding its usage and configuration. - -(A PR adding Mercurial support will be accepted.) - -## Version control integration - -Use [pre-commit](https://pre-commit.com/). Once you -[have it installed](https://pre-commit.com/#install), add this to the -`.pre-commit-config.yaml` in your repository: - -```yaml -repos: - - repo: https://github.com/psf/black - rev: 19.10b0 # Replace by any tag/version: https://github.com/psf/black/tags - hooks: - - id: black - language_version: python3 # Should be a command that runs python3.6+ -``` - -Then run `pre-commit install` and you're ready to go. - -Avoid using `args` in the hook. Instead, store necessary configuration in -`pyproject.toml` so that editors and command-line usage of Black all behave consistently -for your project. See _Black_'s own -[pyproject.toml](https://github.com/psf/black/blob/master/pyproject.toml) for an -example. - -If you're already using Python 3.7, switch the `language_version` accordingly. Finally, -`stable` is a branch that tracks the latest release on PyPI. If you'd rather run on -master, this is also an option. - -## GitHub Actions - -Create a file named `.github/workflows/black.yml` inside your repository with: - -```yaml -name: Lint - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - uses: psf/black@stable -``` - -## Ignoring unmodified files - -_Black_ remembers files it has already formatted, unless the `--diff` flag is used or -code is passed via standard input. This information is stored per-user. The exact -location of the file depends on the _Black_ version and the system on which _Black_ is -run. The file is non-portable. The standard location on common operating systems is: - -- Windows: - `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` -- macOS: - `/Users//Library/Caches/black//cache...pickle` -- Linux: - `/home//.cache/black//cache...pickle` - -`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, -as .pyi, and whether string normalization was omitted. - -To override the location of these files on macOS or Linux, set the environment variable -`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache -in the directory you're running _Black_ from, set `XDG_CACHE_HOME=.cache`. _Black_ will -then write the above files to `.cache/black//`. +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. Applying those defaults will have your +code in compliance with many other _Black_ formatted projects. ## Used by The following notable open-source projects trust _Black_ with enforcing a consistent -code style: pytest, tox, Pyramid, Django Channels, Hypothesis, attrs, SQLAlchemy, -Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), pandas, Pillow, -every Datadog Agent Integration, Home Assistant. +code style: pytest, tox, Pyramid, Django, Django Channels, Hypothesis, attrs, +SQLAlchemy, Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), +pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant, +Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more. -The following organizations use _Black_: Facebook, Dropbox, Mozilla, Quora. +The following organizations use _Black_: Facebook, Dropbox, KeepTruckin, Mozilla, Quora, +Duolingo, QuantumBlack, Tesla. Are we missing anyone? Let us know. ## Testimonials +**Mike Bayer**, [author of `SQLAlchemy`](https://www.sqlalchemy.org/): + +> I can't think of any single tool in my entire programming career that has given me a +> bigger productivity increase by its introduction. I can now do refactorings in about +> 1% of the keystrokes that it would have taken me previously when we had no way for +> code to format itself. + **Dusty Phillips**, [writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): @@ -455,8 +171,8 @@ Twisted and CPython: > At least the name is good. -**Kenneth Reitz**, creator of [`requests`](http://python-requests.org/) and -[`pipenv`](https://readthedocs.org/projects/pipenv/): +**Kenneth Reitz**, creator of [`requests`](https://requests.readthedocs.io/en/latest/) +and [`pipenv`](https://readthedocs.org/projects/pipenv/): > This vastly improves the formatting of our code. Thanks a ton! @@ -482,206 +198,39 @@ Looks like this: MIT -## Contributing to _Black_ +## Contributing + +Welcome! Happy to see you willing to make the project better. You can get started by +reading this: -In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is -deliberate. +- [Contributing: The basics](https://black.readthedocs.io/en/latest/contributing/the_basics.html) -Bug reports and fixes are always welcome! However, before you suggest a new feature or -configuration knob, ask yourself why you want it. If it enables better integration with -some workflow, fixes an inconsistency, speeds things up, and so on - go for it! On the -other hand, if your answer is "because I don't like a particular formatting" then you're -not ready to embrace _Black_ yet. Such changes are unlikely to get accepted. You can -still try but prepare to be disappointed. +You can also take a look at the rest of the contributing docs or talk with the +developers: -More details can be found in -[CONTRIBUTING](https://github.com/psf/black/blob/master/CONTRIBUTING.md). +- [Contributing documentation](https://black.readthedocs.io/en/latest/contributing/index.html) +- [Chat on Discord](https://discord.gg/RtVdv86PrH) ## Change log -The log's become rather long. It moved to its own file. +The log has become rather long. It moved to its own file. -See [CHANGES](https://github.com/psf/black/blob/master/CHANGES.md). +See [CHANGES](https://black.readthedocs.io/en/latest/change_log.html). ## Authors -Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). - -Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), -[Carl Meyer](mailto:carl@oddbird.net), -[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), -[Mika Naylor](mailto:mail@autophagy.io), -[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com), and -[Cooper Lees](mailto:me@cooperlees.com). - -Multiple contributions by: - -- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) -- [Adam Johnson](mailto:me@adamj.eu) -- [Adam Williamson](mailto:adamw@happyassassin.net) -- [Alexander Huynh](mailto:github@grande.coffee) -- [Alex Vandiver](mailto:github@chmrr.net) -- [Allan Simon](mailto:allan.simon@supinfo.com) -- Anders-Petter Ljungquist -- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) -- [Andrew Zhou](mailto:andrewfzhou@gmail.com) -- [Andrey](mailto:dyuuus@yandex.ru) -- [Andy Freeland](mailto:andy@andyfreeland.net) -- [Anthony Sottile](mailto:asottile@umich.edu) -- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) -- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) -- [Artem Malyshev](mailto:proofit404@gmail.com) -- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) -- [Augie Fackler](mailto:raf@durin42.com) -- [Aviskar KC](mailto:aviskarkc10@gmail.com) -- Batuhan Taşkaya -- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) -- [Benjamin Woodruff](mailto:github@benjam.info) -- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) -- [Brandt Bucher](mailto:brandtbucher@gmail.com) -- [Brett Cannon](mailto:brett@python.org) -- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) -- [Bryan Forbes](mailto:bryan@reigndropsfall.net) -- [Calum Lind](mailto:calumlind@gmail.com) -- [Charles](mailto:peacech@gmail.com) -- Charles Reid -- [Christian Clauss](mailto:cclauss@bluewin.ch) -- [Christian Heimes](mailto:christian@python.org) -- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) -- [Chris Rose](mailto:offline@offby1.net) -- Codey Oxley -- [Cong](mailto:congusbongus@gmail.com) -- [Cooper Ry Lees](mailto:me@cooperlees.com) -- [Dan Davison](mailto:dandavison7@gmail.com) -- [Daniel Hahler](mailto:github@thequod.de) -- [Daniel M. Capella](mailto:polycitizen@gmail.com) -- Daniele Esposti -- [David Hotham](mailto:david.hotham@metaswitch.com) -- [David Lukes](mailto:dafydd.lukes@gmail.com) -- [David Szotten](mailto:davidszotten@gmail.com) -- [Denis Laxalde](mailto:denis@laxalde.org) -- [Douglas Thor](mailto:dthor@transphormusa.com) -- dylanjblack -- [Eli Treuherz](mailto:eli@treuherz.com) -- [Emil Hessman](mailto:emil@hessman.se) -- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) -- [Florent Thiery](mailto:fthiery@gmail.com) -- Francisco -- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) -- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) -- [Gregory P. Smith](mailto:greg@krypto.org) -- Gustavo Camargo -- hauntsaninja -- [Heaford](mailto:dan@heaford.com) -- [Hugo Barrera](mailto::hugo@barrera.io) -- Hugo van Kemenade -- [Hynek Schlawack](mailto:hs@ox.cx) -- [Ivan Katanić](mailto:ivan.katanic@gmail.com) -- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) -- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) -- [Jan Hnátek](mailto:jan.hnatek@gmail.com) -- [Jason Fried](mailto:me@jasonfried.info) -- [Jason Friedland](mailto:jason@friedland.id.au) -- [jgirardet](mailto:ijkl@netc.fr) -- Jim Brännlund -- [Jimmy Jia](mailto:tesrin@gmail.com) -- [Joe Antonakakis](mailto:jma353@cornell.edu) -- [Jon Dufresne](mailto:jon.dufresne@gmail.com) -- [Jonas Obrist](mailto:ojiidotch@gmail.com) -- [Jonty Wareing](mailto:jonty@jonty.co.uk) -- [Jose Nazario](mailto:jose.monkey.org@gmail.com) -- [Joseph Larson](mailto:larson.joseph@gmail.com) -- [Josh Bode](mailto:joshbode@fastmail.com) -- [Josh Holland](mailto:anowlcalledjosh@gmail.com) -- [José Padilla](mailto:jpadilla@webapplicate.com) -- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) -- [kaiix](mailto:kvn.hou@gmail.com) -- [Katie McLaughlin](mailto:katie@glasnt.com) -- Katrin Leinweber -- [Keith Smiley](mailto:keithbsmiley@gmail.com) -- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) -- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) -- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) -- [Kyle Sunden](mailto:sunden@wisc.edu) -- Lawrence Chan -- [Linus Groh](mailto:mail@linusgroh.de) -- [Loren Carvalho](mailto:comradeloren@gmail.com) -- [Luka Sterbic](mailto:luka.sterbic@gmail.com) -- [LukasDrude](mailto:mail@lukas-drude.de) -- Mahmoud Hossam -- Mariatta -- [Matt VanEseltine](mailto:vaneseltine@gmail.com) -- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) -- [Matthew Walster](mailto:matthew@walster.org) -- Max Smolens -- [Michael Aquilina](mailto:michaelaquilina@gmail.com) -- [Michael Flaxman](mailto:michael.flaxman@gmail.com) -- [Michael J. Sullivan](mailto:sully@msully.net) -- [Michael McClimon](mailto:michael@mcclimon.org) -- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) -- [Mike](mailto:roshi@fedoraproject.org) -- [mikehoyio](mailto:mikehoy@gmail.com) -- [Min ho Kim](mailto:minho42@gmail.com) -- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) -- MomIsBestFriend -- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) -- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) -- [Neraste](mailto:neraste.herr10@gmail.com) -- [Nikolaus Waxweiler](mailto:madigens@gmail.com) -- [Ofek Lev](mailto:ofekmeister@gmail.com) -- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) -- [otstrel](mailto:otstrel@gmail.com) -- [Pablo Galindo](mailto:Pablogsal@gmail.com) -- [Paul Ganssle](mailto:p.ganssle@gmail.com) -- [Paul Meinhardt](mailto:mnhrdt@gmail.com) -- [Peter Bengtsson](mailto:mail@peterbe.com) -- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) -- pmacosta -- [Quentin Pradet](mailto:quentin@pradet.me) -- [Ralf Schmitt](mailto:ralf@systemexit.de) -- [Ramón Valles](mailto:mroutis@protonmail.com) -- [Richard Fearn](mailto:richardfearn@gmail.com) -- Richard Si -- [Rishikesh Jha](mailto:rishijha424@gmail.com) -- [Rupert Bedford](mailto:rupert@rupertb.com) -- Russell Davis -- [Rémi Verschelde](mailto:rverschelde@gmail.com) -- [Sami Salonen](mailto:sakki@iki.fi) -- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) -- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) -- Sergi -- [Scott Stevenson](mailto:scott@stevenson.io) -- Shantanu -- [shaoran](mailto:shaoran@sakuranohana.org) -- [Shinya Fujino](mailto:shf0811@gmail.com) -- springstan -- [Stavros Korokithakis](mailto:hi@stavros.io) -- [Stephen Rosen](mailto:sirosen@globus.org) -- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) -- [Sunil Kapil](mailto:snlkapil@gmail.com) -- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) -- [Tal Amuyal](mailto:TalAmuyal@gmail.com) -- [Terrance](mailto:git@terrance.allofti.me) -- [Thom Lu](mailto:thomas.c.lu@gmail.com) -- [Thomas Grainger](mailto:tagrain@gmail.com) -- [Tim Gates](mailto:tim.gates@iress.com) -- [Tim Swast](mailto:swast@google.com) -- [Timo](mailto:timo_tk@hotmail.com) -- Toby Fleming -- [Tom Christie](mailto:tom@tomchristie.com) -- [Tony Narlock](mailto:tony@git-pull.com) -- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) -- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) -- [Tzu-ping Chung](mailto:uranusjr@gmail.com) -- [Utsav Shah](mailto:ukshah2@illinois.edu) -- utsav-dbx -- vezeli -- [Ville Skyttä](mailto:ville.skytta@iki.fi) -- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) -- [Vlad Emelianov](mailto:volshebnyi@gmail.com) -- [williamfzc](mailto:178894043@qq.com) -- [wouter bolsterlee](mailto:wouter@bolsterl.ee) -- Yazdan -- [Yngve Høiseth](mailto:yngve@hoiseth.net) -- [Yurii Karabas](mailto:1998uriyyo@gmail.com) -- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) +The author list is quite long nowadays, so it lives in its own file. + +See [AUTHORS.md](./AUTHORS.md) + +## Code of Conduct + +Everyone participating in the _Black_ project, and in particular in the issue tracker, +pull requests, and social media activity, is expected to treat other people with respect +and more generally to follow the guidelines articulated in the +[Python Community Code of Conduct](https://www.python.org/psf/codeofconduct/). + +At the same time, humor is encouraged. In fact, basic familiarity with Monty Python's +Flying Circus is expected. We are not savages. + +And if you _really_ need to slap somebody, do it with a fish while dancing. diff --git a/action.yml b/action.yml index 2ce1c0bf2ef..35705e99414 100644 --- a/action.yml +++ b/action.yml @@ -1,9 +1,50 @@ name: "Black" description: "The uncompromising Python code formatter." author: "Łukasz Langa and contributors to Black" +inputs: + options: + description: + "Options passed to Black. Use `black --help` to see available options. Default: + '--check --diff'" + required: false + default: "--check --diff" + src: + description: "Source to run Black. Default: '.'" + required: false + default: "." + jupyter: + description: + "Set this option to true to include Jupyter Notebook files. Default: false" + required: false + default: false + black_args: + description: "[DEPRECATED] Black input arguments." + required: false + default: "" + deprecationMessage: + "Input `with.black_args` is deprecated. Use `with.options` and `with.src` instead." + version: + description: 'Python Version specifier (PEP440) - e.g. "21.5b1"' + required: false + default: "" branding: color: "black" icon: "check-circle" runs: - using: "docker" - image: "Dockerfile" + using: composite + steps: + - run: | + if [ "$RUNNER_OS" == "Windows" ]; then + python $GITHUB_ACTION_PATH/action/main.py + else + python3 $GITHUB_ACTION_PATH/action/main.py + fi + env: + # TODO: Remove once https://github.com/actions/runner/issues/665 is fixed. + INPUT_OPTIONS: ${{ inputs.options }} + INPUT_SRC: ${{ inputs.src }} + INPUT_JUPYTER: ${{ inputs.jupyter }} + INPUT_BLACK_ARGS: ${{ inputs.black_args }} + INPUT_VERSION: ${{ inputs.version }} + pythonioencoding: utf-8 + shell: bash diff --git a/action/main.py b/action/main.py new file mode 100644 index 00000000000..ff9d4112aed --- /dev/null +++ b/action/main.py @@ -0,0 +1,45 @@ +import os +import shlex +import sys +from pathlib import Path +from subprocess import PIPE, STDOUT, run + +ACTION_PATH = Path(os.environ["GITHUB_ACTION_PATH"]) +ENV_PATH = ACTION_PATH / ".black-env" +ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin") +OPTIONS = os.getenv("INPUT_OPTIONS", default="") +SRC = os.getenv("INPUT_SRC", default="") +JUPYTER = os.getenv("INPUT_JUPYTER") == "true" +BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="") +VERSION = os.getenv("INPUT_VERSION", default="") + +run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True) + +version_specifier = VERSION +if VERSION and VERSION[0] in "0123456789": + version_specifier = f"=={VERSION}" +if JUPYTER: + extra_deps = "[colorama,jupyter]" +else: + extra_deps = "[colorama]" +req = f"black{extra_deps}{version_specifier}" +pip_proc = run( + [str(ENV_BIN / "python"), "-m", "pip", "install", req], + stdout=PIPE, + stderr=STDOUT, + encoding="utf-8", +) +if pip_proc.returncode: + print(pip_proc.stdout) + print("::error::Failed to install Black.", flush=True) + sys.exit(pip_proc.returncode) + + +base_cmd = [str(ENV_BIN / "black")] +if BLACK_ARGS: + # TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS. + proc = run([*base_cmd, *shlex.split(BLACK_ARGS)]) +else: + proc = run([*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)]) + +sys.exit(proc.returncode) diff --git a/autoload/black.vim b/autoload/black.vim new file mode 100644 index 00000000000..e87a1e4edfa --- /dev/null +++ b/autoload/black.vim @@ -0,0 +1,225 @@ +python3 << EndPython3 +import collections +import os +import sys +import vim + +def strtobool(text): + if text.lower() in ['y', 'yes', 't', 'true', 'on', '1']: + return True + if text.lower() in ['n', 'no', 'f', 'false', 'off', '0']: + return False + raise ValueError(f"{text} is not convertable to boolean") + +class Flag(collections.namedtuple("FlagBase", "name, cast")): + @property + def var_name(self): + return self.name.replace("-", "_") + + @property + def vim_rc_name(self): + name = self.var_name + if name == "line_length": + name = name.replace("_", "") + return "g:black_" + name + + +FLAGS = [ + Flag(name="line_length", cast=int), + Flag(name="fast", cast=strtobool), + Flag(name="skip_string_normalization", cast=strtobool), + Flag(name="quiet", cast=strtobool), + Flag(name="skip_magic_trailing_comma", cast=strtobool), + Flag(name="preview", cast=strtobool), +] + + +def _get_python_binary(exec_prefix): + try: + default = vim.eval("g:pymode_python").strip() + except vim.error: + default = "" + if default and os.path.exists(default): + return default + if sys.platform[:3] == "win": + return exec_prefix / 'python.exe' + return exec_prefix / 'bin' / 'python3' + +def _get_pip(venv_path): + if sys.platform[:3] == "win": + return venv_path / 'Scripts' / 'pip.exe' + return venv_path / 'bin' / 'pip' + +def _get_virtualenv_site_packages(venv_path, pyver): + if sys.platform[:3] == "win": + return venv_path / 'Lib' / 'site-packages' + return venv_path / 'lib' / f'python{pyver[0]}.{pyver[1]}' / 'site-packages' + +def _initialize_black_env(upgrade=False): + pyver = sys.version_info[:3] + if pyver < (3, 6, 2): + print("Sorry, Black requires Python 3.6.2+ to run.") + return False + + from pathlib import Path + import subprocess + import venv + virtualenv_path = Path(vim.eval("g:black_virtualenv")).expanduser() + virtualenv_site_packages = str(_get_virtualenv_site_packages(virtualenv_path, pyver)) + first_install = False + if not virtualenv_path.is_dir(): + print('Please wait, one time setup for Black.') + _executable = sys.executable + _base_executable = getattr(sys, "_base_executable", _executable) + try: + executable = str(_get_python_binary(Path(sys.exec_prefix))) + sys.executable = executable + sys._base_executable = executable + print(f'Creating a virtualenv in {virtualenv_path}...') + print('(this path can be customized in .vimrc by setting g:black_virtualenv)') + venv.create(virtualenv_path, with_pip=True) + except Exception: + print('Encountered exception while creating virtualenv (see traceback below).') + print(f'Removing {virtualenv_path}...') + import shutil + shutil.rmtree(virtualenv_path) + raise + finally: + sys.executable = _executable + sys._base_executable = _base_executable + first_install = True + if first_install: + print('Installing Black with pip...') + if upgrade: + print('Upgrading Black with pip...') + if first_install or upgrade: + subprocess.run([str(_get_pip(virtualenv_path)), 'install', '-U', 'black'], stdout=subprocess.PIPE) + print('DONE! You are all set, thanks for waiting ✨ 🍰 ✨') + if first_install: + print('Pro-tip: to upgrade Black in the future, use the :BlackUpgrade command and restart Vim.\n') + if virtualenv_site_packages not in sys.path: + sys.path.insert(0, virtualenv_site_packages) + return True + +if _initialize_black_env(): + import black + import time + +def get_target_version(tv): + if isinstance(tv, black.TargetVersion): + return tv + ret = None + try: + ret = black.TargetVersion[tv.upper()] + except KeyError: + print(f"WARNING: Target version {tv!r} not recognized by Black, using default target") + return ret + +def Black(**kwargs): + """ + kwargs allows you to override ``target_versions`` argument of + ``black.FileMode``. + + ``target_version`` needs to be cleaned because ``black.FileMode`` + expects the ``target_versions`` argument to be a set of TargetVersion enums. + + Allow kwargs["target_version"] to be a string to allow + to type it more quickly. + + Using also target_version instead of target_versions to remain + consistent to Black's documentation of the structure of pyproject.toml. + """ + start = time.time() + configs = get_configs() + + black_kwargs = {} + if "target_version" in kwargs: + target_version = kwargs["target_version"] + + if not isinstance(target_version, (list, set)): + target_version = [target_version] + target_version = set(filter(lambda x: x, map(lambda tv: get_target_version(tv), target_version))) + black_kwargs["target_versions"] = target_version + + mode = black.FileMode( + line_length=configs["line_length"], + string_normalization=not configs["skip_string_normalization"], + is_pyi=vim.current.buffer.name.endswith('.pyi'), + magic_trailing_comma=not configs["skip_magic_trailing_comma"], + preview=configs["preview"], + **black_kwargs, + ) + quiet = configs["quiet"] + + buffer_str = '\n'.join(vim.current.buffer) + '\n' + try: + new_buffer_str = black.format_file_contents( + buffer_str, + fast=configs["fast"], + mode=mode, + ) + except black.NothingChanged: + if not quiet: + print(f'Black: already well formatted, good job. (took {time.time() - start:.4f}s)') + except Exception as exc: + print(f'Black: {exc}') + else: + current_buffer = vim.current.window.buffer + cursors = [] + for i, tabpage in enumerate(vim.tabpages): + if tabpage.valid: + for j, window in enumerate(tabpage.windows): + if window.valid and window.buffer == current_buffer: + cursors.append((i, j, window.cursor)) + vim.current.buffer[:] = new_buffer_str.split('\n')[:-1] + for i, j, cursor in cursors: + window = vim.tabpages[i].windows[j] + try: + window.cursor = cursor + except vim.error: + window.cursor = (len(window.buffer), 0) + if not quiet: + print(f'Black: reformatted in {time.time() - start:.4f}s.') + +def get_configs(): + filename = vim.eval("@%") + path_pyproject_toml = black.find_pyproject_toml((filename,)) + if path_pyproject_toml: + toml_config = black.parse_pyproject_toml(path_pyproject_toml) + else: + toml_config = {} + + return { + flag.var_name: toml_config.get(flag.name, flag.cast(vim.eval(flag.vim_rc_name))) + for flag in FLAGS + } + + +def BlackUpgrade(): + _initialize_black_env(upgrade=True) + +def BlackVersion(): + print(f'Black, version {black.__version__} on Python {sys.version}.') + +EndPython3 + +function black#Black(...) + let kwargs = {} + for arg in a:000 + let arg_list = split(arg, '=') + let kwargs[arg_list[0]] = arg_list[1] + endfor +python3 << EOF +import vim +kwargs = vim.eval("kwargs") +EOF + :py3 Black(**kwargs) +endfunction + +function black#BlackUpgrade() + :py3 BlackUpgrade() +endfunction + +function black#BlackVersion() + :py3 BlackVersion() +endfunction diff --git a/docs/Makefile b/docs/Makefile index 2e0e5eeaf9d..cb0463c842b 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -17,4 +17,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/custom.css b/docs/_static/custom.css deleted file mode 100644 index c06c40a2dfc..00000000000 --- a/docs/_static/custom.css +++ /dev/null @@ -1,38 +0,0 @@ -/* Make the sidebar scrollable. Fixes https://github.com/psf/black/issues/990 */ -div.sphinxsidebar { - max-height: calc(100% - 18px); - overflow-y: auto; -} - -/* Hide scrollbar for Chrome, Safari and Opera */ -div.sphinxsidebar::-webkit-scrollbar { - display: none; -} - -/* Hide scrollbar for IE 6, 7 and 8 */ -@media \0screen\, screen\9 { - div.sphinxsidebar { - -ms-overflow-style: none; - } -} - -/* Hide scrollbar for IE 9 and 10 */ -/* backslash-9 removes ie11+ & old Safari 4 */ -@media screen and (min-width: 0\0) { - div.sphinxsidebar { - -ms-overflow-style: none\9; - } -} - -/* Hide scrollbar for IE 11 and up */ -_:-ms-fullscreen, -:root div.sphinxsidebar { - -ms-overflow-style: none; -} - -/* Hide scrollbar for Edge */ -@supports (-ms-ime-align: auto) { - div.sphinxsidebar { - -ms-overflow-style: none; - } -} diff --git a/docs/_static/license.svg b/docs/_static/license.svg index 36bea29f654..25aa18b91af 100644 --- a/docs/_static/license.svg +++ b/docs/_static/license.svg @@ -1 +1 @@ -licenselicenseMITMIT \ No newline at end of file +licenselicenseMITMIT diff --git a/docs/_static/logo2-readme.png b/docs/_static/logo2-readme.png index ec5dafb64958d222ecbe9a29e9e13963acc6d73d..7704ec01ed98a803f59465020422ff6cd24c0958 100644 GIT binary patch literal 99591 zcmeFYj0ef?3thkKk(7k)Wwj( z!`8;mncqW*@?U501E2pn45Fm?*C{U6LX?_viWFk@PNo!GtemWDl)`8f6cmC^CT9G~ zU%vjIp99~7C@oxE9QZ*XcXxMIcMev2Cvy-xA0Hox?IY;pM;71=7H3a87efygJ7=nY zU*td6`C{s9>}2WSVrg$j@y~S)jqF`rgeWQhxzT@L{~o7{rP=@PWas>UrUgt8^v{1l z?5u2{|6UvTso+0H`4yZjO@TZAbA4fU!GE3k|2p=6?js2L=jQ*{VE#SRe;oy;DvTxw z`tPy{qseyDX+3)$q$~ABRMlhQaM?E0)ZF*k-*k)L>Wg_sWtGa(RFVx3lLWYxwr?NI z8Z@=(3lk|-wJjzlm3Ko7{>t?FFa%TLPArey9dx)Kk1Wlnk(%$Au9_tr9WH124vx{b z3AANk<0Pb}&UoAk?kt@-A+n;vqPW63$?y3PJHk7eJD5u%Pdg*y_Za`$)Bpd^|A$sU zB#&B3pJ4sctpEQ@?{BcO z1>BE~4Ifp^L75MAY!BtwkT)k+h7TfH0!+#01m9<&@eKmERm>|9z|d7Q$AVBGl% z{*vfvUF~rA(|u>Z)O=!{JBsi+9i&^uhijRSr`&^?N-X^h;YJ$OFEgcDn8vic#lvs-LIW!628+t96I@Xzh((?UgzzR zs^jBh*h8EInWuAejC*8cq@>?-VD$HFSq0Mv#>!jky%Fxhe}5t)I-VZyL~x7zYg<}w zV#>?QpK3fDY8tAm9WH(bAX}amF*zpE1%_MJN#brT?rd-Cvu*F}3|qKTjx=o#-{?q` zr`k904_B(MI#y=;Mi^i5`j;PMZuz*k-K7kF*Pv;cO|m$UZygPG?(-&E?^|DA-ya$p z(ug4!c?<@I>qGkswa2;nU~@15F}J+@Fdu}91=FuZ2S(~K8aAWMepA!X;A$RvmBsJ6 zkA+1pd=F=0VghUH>u1|FXk+nYa14&fhJK~8{FQ21FdUU;Up^K4p4EA;wlTNNc#>s>j#CR~`1sBtx+hO3uXDvhj>E=J{pB;S z=K$*wuI}h~fF8iBo^*Y-y@Y}B{0;wIn%SDaY}d*4s|tAeySTbu*e9o?AhIA3FeD?R zjgZOLjEF<3Lmx{?X1@VD%8Q0CNeyK*cu!ZKfh&W?b!aGe$f0%HX569Oj2#111!GJZ zW~^|5wZiOI&D<~4>z?lGgznt1o-Z8;niAg%3kaR`ARZn zVMXN1RtksRgVx2^@1{pvjwa#p5gFU7mAjGqP&mh#Sag=>+oQBJ%lr3;tIGUGN@(ST zluehWlBYg<-EqM&ePS=5r`9@$D_dnI+5CjhKu8C2vtYWKc=gH7gn~o z;n0~trvUQ@mY-wwew(Lr$*MuVck{EXtem_&t;z@5V%j&#pA1i-*)E#2&VI!%ESxQF4$~xP(uih=mO)II6iZvwmf)u!Qs2=s z4R75J3Ewz5j*a=LB<-1)t`3%-a`2B^=1tCqt?Ze&>FM=rWZMc}O}kv`7BgMdr+X|N z=(`DZj@2}*+^oPRu11R5#`UCJ@O&$IxdmYEPkX@Sd`pFkJ&qjpI`mhwA&{r5nVA`d zY+>J9=ryTg%|Gz6>e)%~9J~tbFVVLA_!lt3PG)9il4!h7?1UWr{K=6~QFwzO!`*4U5WLUyh!^@($GFs-oBJ1WxJxrC9h!3+VQ;X;xIccQKZY(NBg2wpyx4I00V`9M%;#^{W^M?XG*rJMUAN6m z==Iel9e!I|+h9*FOO2e4j*i~ZaTGA_^Ujakpw@BlNOnh1S@LB-P0bweW*N5o6>s6)(Q0xVi%pY zYDmT+5+<7*EU@cG)wKLG8aO!1akSx#Ny4VV>MhU;i*))i6}t>(#x`17+Nspc z-IZiD#-zcxlasKIaqkv{1O+oO-n=Pf|M)RpjX{ku7?XrImevxJTsZTq>=(UehYu}Y z7t&5!r(2stNyC zI4Hl)D&y1V$HmQ`oXvDzT8@s64%P>QpMKq(?fBF9HQfgTx%yiQJ#5O{-Tmt51ZDQ0 ztY^cWk+L3hI`NlE@DY2wJoc1Yk*}SZSesioj;6M+|IE^KR~PJJFEbv8)s(S|Q7N-C zoa?rCBM=6?cRnr4j+>~-%cAP(v0SIJcP27~q|$3?YD!YkDi{S%%-5MG9335Pu8+x| zUgIbfNR)}69rm!dUujRH?Euc8eFl+E0H=UN zGKCYaZ5EE)oX&SAOaOf1YgaSGB$r(@&MAZa#y#6TTaolg@Ty+h&ZuHn1}Do;g>*7) z)cw}JVRie#p?#8|+_&EPDLZwS%q)$re#i<|0qOH4j_PYDEG(?Dv9t4ApDdI?M9xG# zz5`a~Q(A?WUfZ(Y+2!S>J(-Z#xpSquHP_=Oav~!6m#C;y)Qa%xmLbLJ$6a#TS-g!b z-@dym@&i7EhofxTtAVCoYH^#zpFa&g@^Y$Dvl?xwJ}RGWZEe*nGpb+zdZ9*=?*6dQ zu&EuInmU9-FS7R~ntDz~guBnZ^KN;wFX~|Ltleo#+G+bd?Qca)(YT=cFc1w1!2@LF zEk>WMEiBj$rYsAW4L*_-v3IE*Rk6RGSq5M2_iQxnUTx_4y8E<0-LbF0`}^x2`ufAE zbr0cUL1KHCKt{0wQUydIi~j;JHG$6EEN#G-?m1~H?9fH7SMmLJpSrrcyE~?)8u}%= zvF^K4F^Jv|#H-gh(Nw*Y^4E+>#*@wZqv-i-^QWXPLAliFQGOCyYWuAvwMb)ahcXS0 z2pwk4kchhnHSw(bsxR^&%=fP697Z|8#>10R0fo{W92^YpO&xItyxj<+%clQP^T4Sg z6^AQEWK5&s1JNXbHFx-O&%g4ez0BZc=2Qsp6YjM4pY8v_|9S$7_%u-^;M+b~HRUR_ z8dFgf<8pf|>5gK+5An4>eQ1q7QhnWL+auVhA1s&E1Jm?i z0j3#)nCEjloHN(1ob|X4xc5=&;AES-2ofUxp@!K@L`cZG9m3RevVQC0u_F1@=;6ap zTAJ<5OFYY?jd?*tLL%*-+8-Jg^d30~*)n5Y_=IN7u|(5HV{&h*c*4i-(1*gI=o(!;x`uV`DsEYc3u}vUY_J96lPyGIti-BEL&xhdRkE zFTXfAa6``6m9!p=hwilzg4kL#z<_bb+Z&HjsIZ<)T< z`u>=@*^fDHf5X1+Q^6UAm^bZOVHC9Ztjq7YPyyqa=h9Th&5gD6af23HtvLE`_ZSnpJ zlLXUOw#0sa5c3Y6(OuF4MxUALPrCm>P>V;G_TOdVej@?nPx^D*hJYRY!xYH)oHkWP z&5ZdUJ)(PZVJA`cs`2VUmcPDCuC0ACUnuP!+CfF-$D>QKEMB0hnjMN6XHq$}^o!O^ zd&g9|jcm6HxUxEaotovFdKBc5!+|8K-9LA2KrnRh^x82;r$I4grT0hl{7O z$Ng-~Vucm{Rlg_y@CY?WLAwOCv+Xp28__)^cc@qhrqXY_UmtHZVwP12zDLVrW`((Z z+-IA)NkgN5^5ec`PWY6`bYnh^*M%f*b39`qrc9`)?nPU2d0?{=4+ zLIxNi2Jx!bW>QkG331G(7>nbSO^?zWa3T94Lf#cw_m3xSZZ~4{*oyaeh)%jKc}C=u z0MZLZiHaY@A5B8{S326f_DhSW6BLB^EccF0Z*|MO_2D-}(LlUZ1i`K;8$c+-RTb3m zvE?>*xVHWh>dBe6oqKZ~<;rXTGO7jq*Z#iM#f%16>6vFFO_ifjbpI(ekGAuD5?zeQ zUDMW>=CN0QYYN+Ol=bD3qW0GpXBAp?>J@rzm#a~3q8ODgKW3o)cM$?|#=aShX7Jd< z%;sxNk&DMuVZ`W@9!k1+DY!Uszsl(PM^?9zPcEG9G>=cBO}EAz0~-|0YRl-!G{nWl z3)lkIk)K!ZjAaz7S85l!9WP0JM+r2CgxT5Cl{G)VX*3wd`{?%5c<2Y_CcH_NlS@ofE*nlj%GSV1UXo{sU&UPCwsP zH(H|8*=j?vA*R!%W=G?hGBqaGo;N%NEC(!2OP?>EpIYz!&b2z6tNC*1F|(1*Wjzfr zCWYpwOicLKTm5u}zeY_eWR={vk1rl%gAVOdA_Y=x-Lr z+I4<0O4`TE%D#xsNzq|Q)pUevcqVO&gIVemCzN zTPG){>D!BgL0hbX^^<0vecw%&wv50WWF=G{?O(In)dtUd1A68*PF@DSFHoQNu+E%p zZs@a46VCQ*PoW(_795kgVAof-8hvf*w#4d_UtJw5$*G(1(RAoaCYjkYzo@84h}%C_ zOY>j8_-~h#`X&%lQ_@jUR<_jhtx{N%eH5_js__4r?_4l0aN zupd`4GSap@v>!Kk{87=hrotsF*^T=!8f0{L#lK7{L@koVzQkTxx#zL02 za;^gw7po0>2ILR`;H33FpOBa_P0=f2ZP|QBnUljAI20N+n2gTa{Owh;*=YKNebXPW zsUkV4q!&G&wm)`qth#v|UJN1|mAD?vO3k7ruj?OZAM?!)$q##u^DB6DCOx!g3l9x? z{@}8l{KcwJXZC|jT!>SKktut_OG+bzqGb#~vy{WJuoZj@;&!Ypq6PF{lucAi#HbkW z4X);G7aQlH5sD&Ndv@jZRO|MgN}ZLZH(<(!YGgXxXwJtsOce2@zm?^aQccvgO8eit z_jyT$xvg@U$I2L&-e8QQaAIkq4L2KxLb%MwGP_xH8kek1>e5Dj|Ic#&_5+;T4k)Cx zmX(!NgolTNwF0`()i7UWO|JJgh!_)wLMN3udAzg92_#v@US6e&1))f?%8fd*(*|bu|?{+Bp>s4Dvi{3`g<8uZi)3=h0 zdXC?{DZrl=e&(bbSk zuju3W$6F{~5eFo$x7zGZGqf=Vj`W-zH?B1z<@Oce6SwgMz6#RDw8et=%4hMHHm!Kc z_+I}BQ6x|T2QbmEtBUrbe76b{mIsXtagWZWM>ThfQQ8OTb+YC_1S~M3Y#WnpD9?R{pOg7eIAG(rI%$BIPh2 z8wojm{v9ur+F4Xvb`vkH5cVlE)8j2M?zo`?9@*?QjjD-Atjbp8Slo;r6*z5 z+cS~*-~FVwWa%}Wv?j=dx+qFlU*4O3Qhrb(CAxrD>hMrzE;QQ89Bt2FF3HO}I1;bx z+8cKqIsW20$0`Vb1@4GCb#c{O!=anmuf+H|A8)P0HC%x>UjK0BQiAROctb#$+ytL4 zDU?Z$UBGzZ#+hDxdU{#{#?#tZ&ZNrZdwv$oSA}aiu6Y%{eo4}ECKtyA|Ev4G(d`w2 z*G+e+!{_Fd!2Nj1vnE#S|K5?nngMHNJ-vsYk!c*oy-_F>1JtyYByb@RlcIQVx;Hs~ z+O7TY7M27vZ<^|-1%{@ScA`W#t9q}k^~sJpm$@ac`u zrM!m**q5s%GQh(i@U=v9{h##X_`YK+SzWfUE;NRA^waJmefNu*BefJ(f8~eU3z~*8 zV*7T>IbvMogQKQZb*=n5RX5&@{baq$C1?hT7zX3}jBoW)K_*kL`xh2O9v(jDi{Q@Q zVx);@Dv_6N=^S&S>)y~ah8HtuEGmMdhtosa$_vpVH@}n|t$HJbE@<~xt&r;nhe1!` zLic}r3Mv9Ka##FzQ7v}7(gliE9$U+)%IiN_eIPw1Cs8E#J(`pO(U_8ia$(ym3I`kp z%TO{WP@K_atbM+Hg&o7|qL=_glt@X-SecE}zKOYh670I|l;+v-lGT<)82wHAz)b(O zkg_SErLmnTB%R-M=no!~db{nB6}?IFzY*6zQU1RCxinJ`|0s~8iSTzPmUOi>Y0K4& z$QViOFXTrq1VOwtno(c#8R6G2arNd*{|?WC8DSw}q2JDrf<5O~%l+x_j4NEv zoH#J(YHv#74H0Lks)wD4*Lc*QE8fF<6z6X&wAR-Zh?rF zXcPv&Vg)MS2lkfrqXqn8ufPRKdy&j1*Ez7-D$I8#2urvG+Ij4i-yxu!(PY26vddG6 zV-z)0{5bi0MeS_?etGj+`YQtQbI`E{MHla`AfhDHK41>Xu-;`)+3Bo2YMLJ}oF%sF zJ(!|?dmN%r!1ao5MBvr8aTD&|>9RrvlLhGR>P^An2#vDK?fG7aMwR}UVd>f`_>R8<>!=?=7%+JIk_kxJy7QTS$1Dl!xH@atun`O1~(L0 zedwF)nx+Iv6Pk&-P{g<2K@}TW<{GMFUC0zaw;Z#~n_!dK-*B(Fu*$xudqg6WBp;dWS{(P7VR_uE3pYCEoGQRO z8=|`!)(l@I+v|-#2)%V>7Lns4F%C9zB0_O%+v{U>+=#3(Y4jA1ii#1wRqO!v7Pl!v z)mO;7%v-=+os{22)tvZ22J0Po+)2q~673;oBkvE-mCA7wMXLiAv|^6$ZNO%YGo>5u zQmrX^Z#BQ3V}nPNSTpL=c#)jdaeE7zKw6;1(AXGl?PA4vs`+9Ohx21N&0}<0nL;yt z5OYt%U;-1SI*a~27zmn{RR6bmXFh+=ZuBJx$Q@u6Ev>XSTS)j0Ey)XL5tjO@kkCDJ zuL8o?5~|eia#s^*jBOHr#h@?IqE*|WysQl-K+jgM@U>IOm#ERG(pwgy0wBos=cL5;hQU~e^ zG$WMgHkwiy*@i@UVI^V*?}FNP*+m-A*(Grb z)6Fg3>KVFI&?L3uxHt)|?;YPaK}i;D>Ea$&g4Gwf!Polb8dd63Me;PCIp~bW z=t^)Brk%El5tpOCPe*QsRQEPgKnM6Y0U0#={O zii$&oqW2d{LxG&y#XQdyxC$YN+S!Sc5f^RZGtQ>0scE5hlO0QM)Mt!L6;cCIyKfF@ z+U*|fEUKRHPV1_UWK{;uIkC~eUg3dqbWnRNu9{*kTU;TNz5{0Wvl?hM6FG+H0xHI} zg7)L{uD_6>!DUf2P zwxKwsv^=_ag}Urz_=wN+%1*wz-&2d%&|fVU5wq3bx1r+UY1An#2XqPMevla4pgHC> zfW@heU+fp!wHhR6 z4mHEe8vw9aZ#CGeXz5w_{4@IT4~iK8v5MCWDDrd}geF=GqR!~|Ped-f{Gv`Tkg;72 z)Dc1KutgNs*;9=#|AXA}?_dv?i)LzSYW~@5hTZDgiNT)>4AxMb?4YGE{tpq`%0!Y_ zT@`Vc1yVQ+*ZjEhidtD#7cbp}02qy2s)1dF22~CP{R*h7F+jBpc3#25ME5Mw*l*K) za`=@;R`b18cqwU`!QrL{(?RhU+x$n3!&=w6;b!fTU*z0zX6qo7u6HQFnM&$g3)LU=DtpOXR6y0z&Ar)e;(Z@f2CCv*4P zFY>`841)DTbC#agysjoB$V=h+zH&l$haCGrrMURoR#De+gn5N62WBXj&e`BD`HT0@ z&r{j$b+RRc8e&S-2uc5&)2)K|Q_3v)qTvscoU!Y}l?H!Hup*^42kiuoQd=wY!i*Ca z`#)lzcEw7?2XJO^n9IA#$;o*;vj4A|5Xk=wUjwM+lNKpJ^ndhfvb`?|CT&|OXw=2a z&A-rvI)jW_2#soDdPycRULd<*!hf0$C*xS6^iGw&dN#UWhJf8gCw`z`bRHKf20E%= zM5nb}1ABP`_3G{tJEn9tXdBD%Ua?^7DjERrND1H{au4C&UoVv%$WFY( z=D*@uDN6|_BDg&c74-XLW+*sQXJjv06Usi7FM$P6PkJtZi!F*@t3DqZ6Y{?NhcT5} zPBE5dra}w~UN-DNNABdPbM@q!#7A;JM2y_tpN(HtX*ZOub_Jo1Cw21v53M<5eriuf zLB~Ij=-)tn@Ph!g3pWhL{_Y_c^2k()gQZ7Xh{U0L%{981>DCn!WL&{280ZGX-y*%3 z1llt4VYf)D0Rj|m%sqB?6lvAO6o_+F@z(?N*qRgtOLpI6H4Wod8f|ry%|zuUpMoFv zAFfu5Ngs2lH&8(3Jk55p5p%9TJk(!b6v98iy?H;oru=^Sr&=9u7;_ocyzt@Biulow z`b6jLLq^!%+>P8mmbzy<)Orns6BTeAa${)SLDF!OW33Rgkk*vH0680b8f#r z_qCz@_VZY;&5efO7ZbF(o3?yki@jm)O`MvxgV3+InegW5W!$&l$-c~=?@i5I9xYlX zd{0<+y7nL3>zZ)@5NE*%<>VMWc^}uS5)x}c0aw!5K6ZFq+i}lp*XUbhz8{=%kATAC z$-6(Al!q8%PvIZ@pCF7BK!H+ip$*WOQ6O=p2cx9hS4ioa5~H)dUMt2JWDGZ5AnKBrhqE_f$|^>vv5ba3Ge^fQ?GQepj;NF9-|<(1*RAibPjLH(B7T=y zPZA#whhUMbXK>r*y&MqVNb)u4B@Cat1gfE!Cd>%rrtAlJ;*RSvVX%LTyHTU zlQ;A~T!!=})RxQt@qWeA(-XDkTX$Zho>_GX+T;YMQ*lkq7{`YrLRxS&wwTu(JJt@+ zl~ylTucRc%N|4hIs9tDuulk4|u-O=rDx~P)FhWKz=n&n@-d__~(;9B9+eNw$Z97&T zzw}BTV2lMD*C6NA>~a$96io)cVywjcT+(Skf0M2r<0E2`c2==byIJtZ3+_c*L)y;r z{=C^t4?ZRBrO}Bm%bM{RogI>stpA%sq3f4$K3rVcGjlZ&%mNmzatSKkAHT3;YZ;mU zfbfzY^EToiyx4~J`#??FT})hmpS$NMZyCA zE^Ev~bOP+|Cg;HWd8MFRi}I$pf4?lq;HtskCqwbFM8oabkn<w9e=4QLoGHQL! z@{lu&h3rK>J!Cjer|p-z>o}d3&md2--H2>;Of4B^_g~#iC+#ZU{o4)mPLHgZ&FJ0Z2r}Z0u)%%n#_JVZ;yi>ZMb*io@W%jpOwKV7X z1vfuaYBPnaxTh!2Ew|(-8rHgP3ixd19A=vAMJ{##N=hl+9^oYe03abAsbQj z^8l?h(gM@(AzkmWF>2Y17U!1rwDe}OmXnsXpf(zdNW{q8>zuR!SB!}_kb3IIQY%oiND{H ztvsc=)`vGIjf-|TK-+-b7Bed8y@T1?`uMT(B@R@~N*5O;EHJ+SqMc#AOhk_cEf{G6 zW^uAkPb0&B&sf#tQjy?ef-RFIyXwAE{;~g1;;Sn`S`Lk5asALAd6By-T}T() z*K8|PCg%IzWqPaQwwgP8(^)gWhHUs(PF1!AlPPaz4o`B%XGo%sujU^;Tb|77(bT7E zIY}Mmm&&kHSeFXgsxo=cP->cQc|8`Ks}<+s#|3{J^q}X>;&h0y?+l(+?$Uk6&X38U;#nA z_8Cwyr`Fae_(YsK@e7KWF|JCavq6#L4t5rr)g5hIXWuW%G96$}))6WaD>O6|nlUSllJ?dFt!u>@3W+Ji!zDXi6B<)_j!3oN z57AnI6&fouc4}nLY>S~UI92-8cPp6M;VLerog1!tPvKcbZS5L!DyJDB2E9P<$`V0PFuDuQBhRSvH@raNHqGX-C$r6q9-W>6#Sc;> z>-q+`eB!P2h`xQLIhyVoRier$8g82Nz!7|v`n3=`dlfs`wAqEjrBJvk^PBvtY8b9!@^)&A#${g| zky(l}H71*N;AReMs{J^(53rIwu-GAg>A@O_Y0$rWN5P>UAjG=;ha^qfwzmjj>)lw0(<4{!n z_HOOxxAzV&>95b`jAOz`0U=g$m5<>}cR#tmY&mZA^Sgf(PJ1G+q|?CYzl2&&_%p|Y zFoiWUU1zn7wnx(P+S)Qmw9?3Evh)WbL&e3yXI627f;YM?wsa5g&X3vjJxf%_9Fhnhr9ck5P=9z*iZ$SsR{Uoe^U|QXH zond51&8bWKYkMoeaLZwA z8h?nud|z7sApEWniFWirfjgL~UXiVsVi|iJ`@IC($9;Xbc~jhx?xE7Yxy)B5FvHWH#=^)to+UTclZLL*nKYw)JMW} zeaQQmFyJik9dAOe3gNLzVRi;ebiz_T%FZ>jP@n@u?yGDVFrO<*E)3c>(Luk!Nus?eHJ#M8 zmOU#mnQQfhyX^?5PI*dZ6Z22ecR8hqyqIg(D#oqxFp&j2qnAH#O+@;b*Gd!JRNUxZ5V@xap) z8LpwN(31132%zrpaNdm$8aWA>Pw{6U{7=B|gK#-q+#XaxybdaxU;=Bro7FGI)E!Kac5f(EF8jTHdWO6 z!c6f_AZEl?B0q#$;$VOasH>$BJF%UET~$oLTsN`{S(g>*K6EDDDyy__pw|1~%i_7n zOV(ZI2;qKJJmLg#Uy02*wIBjDyVfORuMuP=1Fl%5zwylB}_HvS-g-R7_n#q)SR!ZBXoakb*@-`|<}pKW`&@xH#fR>@l(P^DC+CEtGSnI-V%VI=% zLPBDT2EG=MEZ2MOIk~A+p~j!a?7lv>J}16RPqb{s7&KIvYbl=@W`S1ldsd7M_bBvpuJP$xbgCKVO6u}mEY6DRia-g z`?&C(jT1)AI7aj<`N66HDf73_D}ylkLoYjT9}pm1y6o^)OK9y_2klsbOaq&x(iGlHc9Ymtx%{ap!9TbVN9qEyM`c&$vh< zlYGB9K3aGG;qnhy{l5v3y(pkO1?=b9U28i0xY+1D_`Iw0B#~@gfLi+m!I!iK6QdCf z2_l~D4-a~j?(|p-K;4bdF^;3952#NEIwC5}`t6#TfJmx7Pt-1vc?x+(Ztm^R?wup0 ztGtM!I4Vg@8kG5$RJW$j$sc~1{{+(%@-A>RbR& zTlqjNx(4#7%iVSnJ=*Amo)kMV`M@#1sFixJ*YkNIbmDH&+T(*BMf>z}Kjg)3)IVAd zL&>AIc57WZweKgZ&UXq1dIodh6Q;+Uv1$2He+s6k`fW^x8MxOhxbY#yC+n)G=a#2% z;CZKv>4mF)xP11p)8;ZnK4D{XK0wSlS~jYlIRc<7qYn4iVtEF4&1{ugMS{b)7KfuD zKM91XPxi~J!tFaB+(rVmG~Y%f)hGyE;1*)yR|M`N5LhnwL$BacX|y{ExcAQk)tVyk zS#JOAH7Gmy`#>|O0^T%yIdiS2ICgjIw3R*IXjuBiP1kZyXf!8Z6#UAakU$4vyj=D7N7FWB?aBP{Ga19k&=h}63AJ1v(R#pon_$4KQX3cD;daFT0%9TG zzXTl@+H=b}_~?Q4_7~&L(E|+Tcigh`IEwI7c$_i)hxz==#aaFUmEfm-pw+cYR&bLv zR5%cj+D!}Apr zNyFi|`t9lTS#kJf%TYbrr}a3LmDY5LfjjRS`SS1l3J07oa@EgBMvvl?Xg@DLkNG~v+Q z?`z;zqy#V%j>I!e@=E;m>;C@Q=y<7R9S}wpfiRyMDF z=mfuI<5QD^@r3$CmKcz2eu}g`^;5R`jpT1b^WnxOLcKA1=hb|l4OiVT**92+*g|F1#rlT6H;g4Oe%j7ji zXip_QO5ZsG)HHdEtx3GS*AJYuoarYT^Xn4050?RL4KiM&FkSzUo^}$s`O>!$T8bSZQ48;EZ|n3u z$(BJpIi;C4i@0yLzKD1V@+>xCD0gL77%92jTq?anhYs_#zS@l68FC$jEaF+x}Zv*%HsL< z-sgSfk7!$}+^#B^dw&&)`Hu||EK}}3&=bcAkhM$ z&!F>=uju@ri0?g-mPG)YYVrZx_U;74`R_~)csqs7eC^gRVtmlb#QcP4z)L4VC27+q ze%|6c)%&yKYI#kbSGro~2m$U-Au1+*Z>%|=j_>3nn=covdjV!If7eDKj$QtGEp?Dn zl-Jr}ta@P|=w=bv+S-2MP%n|ZrP9Y;Q1g>94Tje`k=qUzmP92+c_q3^ON*Fp=vqV6 zanq%9D>im$Oa)ytmBYu7w71Tjekly*Ftlr+O@7&`oA#s9qUTn^+%g(Ki>9+dl6RHp z_vv)`0tOKi5#=RPk<^rSs?}Tcew9ivWe!s2(=)4;R~D|xHWiB6Q6BsabksM&b$3qL zD;O(co?l%o>PuRGn`W{e#P^ADaB`Ma?P<(pQQp;lc9;4?w@miN)9i0v2Y5?a+TXNS zSVpb`Wa*QvN~UZ{Z#q?QY7Ofz$SEjg`p>}qFJ;imVvV|KOPoNY%$ zu(nE)xZgXje?z{ELDF(EWej|hWdE{89`FbV{;6CQd@kqB+cLFFW+kkyB0w<7a{E23 zH%||!w&{nP0-34Ec#Ef=`@4rfe_V=Gf;%5SuI&Do(p?kx%~tC8d!CK*h(xccW3RFrtrE-8qgBrI9wxnYXyj4`KRu2F1ZZ(pohn8bv5OZ;S=?*Zs(d*NVz zx#xq3&Zq3?=9SxRb#?%VYm92IgX3HoV)~52r?l^T-D97)#zTgaG@SHS@3s zD-tLDnM(5Hux|X=X>I6UwGEN;`;m3dhI9;1UF}aOu4o0>djh|&WaY2qdH=is zsxjiY%wa+H{71=2y&6jX_@`zQ4TXu{u3TC)YkVfd)kZArCp;#uzmi+Ay^?n=#mx>8 zjd77^OMUyDQT&LCFljnRt*L0|*IPmE>Nn?MN%(NENQ}A;vtF&L%|gAD4m+|*iDMyx54IAzRqT#c5L~2iaW81~?n++b&C9S)%_{HJ;_3`|?wnqu zx!@c`Kv{yx@)0gV+7rC&56gSCHR1D@b~IHRCzYzgqmGp$!j*r0-rCei6F#Hp~GTIiI z7H>4Ui2g`AmkglCHT*UWF=6%LkoB&_B z1!!CJC6_1gth_s)nxdX#u>T!U+wgRJxPYrzp{t`&I0UPTR#e9m;~@T!WAXm|gr+;1 z${s{Q<&e>=Ly_s?~(t zVXif4O^MwGc|MSi*CQ4M$r*lAiz!Wx{Nd|5WO0Bn1$*d8-Tl(j6NHch2A|rqMnMSG4N!ctm z%3)w&eA$SXrMl5s@DyDrosRM%8xol;%=6dMC^yDCzs-HM($_xaIK6 z4ikmZUSiewT)6)~OubcDm0`CwD&38AgM@T=8(#i=N#_v1l<-@iiaEujO84;E3t)y+M8^qjz8$V(N81oc=Fj&2_?i(wl2 z+|2xWOX-6@!w`Kg|HWaBZ2yr-3%dPB+*8ect~w?Pab1SjX~X7k+tREAif2hy7?C)4 zSd*Xm*b}RSByOMp0fQf2PDnfynuk9XH(wj4UCh%;B|c+TtC`b1rLD0W4x_-RSCyr_ z_#GP{mE%hPmYHxOWDmhS`qUPR1g`Z*2%&cH88! zsxvy^XDth@*PFeJ?VWcJ2{Mmp54JGM5CT=ipl2YsqEb2nuc9XczY5@QD*qM zJ;37N1+=E~0OX?ZqKeAtSAn?lJr=bio$jgL>3P}P?D}IORTsW*mmIuMo!!DGGS?Xv ziwY&Alk3Bo2tb~bq5Q*@gl>H3GXl(lwDaff{v8nd_NaMTzhsi7j%|URmC9V;0Tx+h z@sMXaCPpdUPGzyI?vi<6E0R!7<0l^^%lKFLn?dBV1gFd!Jr=tCS2WMmNrK7Upi0`% zOotyBQ+ez(_l7)|I@PSnQ`S?5iD+ATk$%ArL}xwv9Zw8&qeub1^7uoSBx=!D=8fWp zCY9Ts*;@r$s?m?@m#D64TNyM@&H-sVUS!|Udrxf38bWRkh|`Sdi7VMKy264R2=308 z1N@2T+GmOBB4C;uSAF;FyK7ArcJ0L0&^q*WlqWGL=Qu{Nekm9izpb44*4^a9hzRt& zb^%|mBLI->Flm>czrB0D>4f^%XZ90vT0E1Y;KAAy^k9Nl6Arq?V(`8|wCSlFzkdEI zK=^Svsr~!Kvdh~j@KjHnvR<4o2B=y9>ZWb$;FIrsN|>wf*fiac>uO0nt+ZwVnGui8 zv=v7Jtl>cp9CIK4In^b-GV+d>G1=Ql{9e!5PEj{b3sVjABhrwkx$*KAM136xy5D7t zVI55pV{^V;+x5-ouA(5r_cyBQxj@%n1HMV$r`r>|JHxk6dhA$&@F2x=#XwcW=Av&E z^jG9aCs`|PFK263lt|Q@5MeL+?^j98dh@$VsS6oA&p%pj$Q@$+k1<$Z{I_3$>c0|b z9K%LZI75Z#9plJR?ZceZ6`D!9c_e-MZ`K3REkd6+F|me@r9?4=f_yzCYV}*quvMPqPF^_y$?=Joxn9N+!} zR;WsrqwV!hWBh>_Zt%pQb+9b>3eT z8@Bmw-wcY980niRhhU*R)ZZRVg+{LZDqFPef4psSuqF$-+et`b*qF~K9nX)Z)DShU zXT%FHULPxaeH(c3jc+S|yZ^T*{&#ac$4dY<(~YY)`&U7>TqCFNI2-FX0VTv8+|3wQ z6>RGdY*dbtsHpD&?F}|cqAnhjvkrkJ@XdPx3z@PEqxZtuY)R^Q?Pf=C6WMIy&B;6Jhl3rXd$A2HAJH^;ly$H z&5>pB9;sbNfb2W~0S?IB^xzx*+`w3JT)b5K#3AH{TV25X^;zPbI{~G!RZ*>ILI%K~ zft}l{0`a=rjynauiHsZ4e9I4Z2BhGU!Z%b4p0UMW9z1_XdXVarPIb2w?91!7hKqm; zmk;U+4vFW8H4q9IUOuOPzN31%%60lPb0tI#5+H-t3dScog*1?+qpQq#WMpr**$wfLQ=CXc%@)MR-kz zsf?c?E~ETr{fiAQmQ8z<9&W+_-M}^1^%Rdf50Q28cZx&?OJ5tmwg9S}wxwq9)w`!L zhyOd{Mjf4^SnWqPfFBO0oE{04@pZ_r-VHMX+73pt-98r^8@U#A}a7`OCs@R9DwGAK38aC(s? zME+ELVk%p|Ah%X0(p7UDH`@L|M@m+nkotpl+J1HTMc#&b99ZK^UyOdMc6jeEs#KFp zKAaa@)7n2#V`}oJvi3Rj7vkqBM+qXyhz8+Da#Aj;om!nmxwSd{l12CzOv(ww&rX|Bt(r#Oi{aR@#yeJ z(*=`jYHMq~PQN42*D9+(jz15JJmdeZOt5C|y&v6rV%D!eweR8LjVqvwc%5K~a>)IWctCewo;|Lp5aGg7waL5L%9HLucH&RGZbSB} zjGUynFDTMghd5Q$Z-PK$Na|YFREG#AO@>(|5(Xwp{7F?W|CRqMhp#lD=nr3%S(Xj^6uu)9KFkXu|W-X<^kREyW$>Cckjd4VYAVE`6V zu<9Qyx{{DiM=T9v{TC9I!2?KvN`0vR@2&(cO=D454}%ij8XJ;;4HE@o(Vl3~sT11N z7v>JRKsabruJezbs(XNl!*-4e)+zB`Q~cB|FgUuI6pp@Y>mMLN2LtaT&x&h$Uz3h( z4DHjw%@^fh<4BaD_SHsPc@IkYJoOV9@;bgpta8B*UTrzy*ZZyG^lz(KB`or*RwozQ zi^NN?f{i1dP)le&Ei?x02Nte_I(1)*9oky$HoJeAP=4H2W!tyzpmF)DpOu-!Kr_co zbD-*f6q8HG<&Heymg{WMlHf=$XO0rWV^aT~iliGU{{1<9H<<4HV?Wo2>8OiovCx3z z5U;GyDQ(Vg!4gZC5PNzW5rj#X z?Q$th*2K5Js&ks)EUr)5h$(>bsB(zK6zE^6z-AW>Bv*W!1XLZFZUE9d+fwrxQno%2 z4kN;bV99|F)*le!oEWoy*E+u#)i?(DXw`v6>4EYV?LYS$a2{X*$JToy z&)x$FfAeI0j$g{@TOqX+`(TJ8yf4XHbCBp*a(t%rb}$#?-9zSyH)wjpdqc~km^<%x zzsEmw?$i%Iy;^T^|IxdfSz*X})H{d?%5L47uL4q=F>WVkgI!5)p^yD?0BD1z8!+U= z830wkzgBP?;>^y^IoE_CrGBmWbVh_>eDh0PJzC*@g~yzugy_%&x@2+Uhd(OIjAK_= zqo2sZS}51#4xAZUOS`5EBUkKL6HMoWO*Z_5-rKc&ZCCKCJ8;q7#RGg- zBCyJf_wPNN3F|pZ)lw_muRa;fxuH47E=G=EU}t=2X!t6U&-+M%6A!ni0!sC|Bq}qI zw*aqNf_fxN84uh*ap|`0BQw|PW8cjAw`bTl2L)({q+eqYugtr~%Qw1o#vsBtb$Fdi zNe;{j!j*?9sjYw^^>!Twqf-8_{40sD^oBhb{=TbBb9-o5s^zGZU9s=2(tj_TA6xT9 zFEPy5`i+nbBUBqFJu81KeFFhu%(j)Jb>7p0^>6j`;%P+;4J2L%dC4|B)BO>s1nT3x zJIRL1PO88mdd+LasYQOg3nlmEYr4;l_-jbOTHa6L4L8cX=%6(aSb^pdu@=%;s!8`r zRfPAPKb~$1y3dKWDuvUpOS4$JqxS2{pfAv?x%WjEno&qZGI`LiOSJ;9Zl}U`;frpZ z<5@k24-VBxIJD(}TD=mBTIT0m7htMg2*wBXvo!8g2{c@UGnN|78GIvcdVM_lCvwm> zHKitm!D_O&Jzf9`7r0{pJ*Nzu0JtCd2(dH{L>O&mI`~tx`IaZ*?DyVdJC)U#+yhH9 z+g}s@ipvIYNf#d32yDO)M9sN;Aa&NGG2px&b8<#I+20Agm{o(KTaC6I-!k)Y|Iqxm z*|xV!sdK@e{d$9Fzwmn?abHW%_ohjRX6Wfk66}HtSiZWkRFzhqp;WY#>lx!0&u;^=L&nXdD9{S`y?KZ>{R~hZM z9TpbNJN1$F0Th5y1v7z5YjX`I0;iEk=dya)1<4yL;Cz9IwkcWu{Sv;Dzn%LQEr;+> zKWPr9uezGCc6OV3WVc52pTbgmzHhxPj2y7nP5K3Qz#y@n@`aO-Y0D#lWVENbo>;Hh zJSiYkmKi>pSDg|pp%iCU_ytI0&i#}X>Tv!am8(1dYWx!{z@re1e%x4`_+f>C&vkH3 zql=grK4sN3^v`mo)m&D44_`*@q2dD|EM{_^fIs4(-{Tt1^vm1p%YYsfnaw&WUVuDO zgv6&ypI@Jo4P(n4yWdC16X2+bzPKVsW8~oDIRnHhHQat^@&dpR42e9SOLFAb^f_vw zw1?exhT}g6n@20TL$3sX3s!a4S6LiC!Bh49q_K0?G(G8?rk7v+rH@*D%|0qa*ZME= z+O2SmBj@l32@G8U+`UEf?7f<4DZQ7!c_VwwZdcT@jUf#rOEbnsvXM+awprdbl)Gdk z?h|AfYi-Xvwzojv99Ka`Ef?k}J{pB3x3*78&FAs$yyYJo#a052E5p6qUJ(b#@0nglhVft$M@=m+@b^Ku0pf&04xh2j^#K%^=tNt58I+s z8O%Af|Bx|F=Vp|b&IaM$`_leCmvgibmTD&;B&CvbOeh}pdmw`0U{^jU5VifsQn6!upTTjPL7w@4)gNQ_`J=a$^8B6 zK6$Obb4<9Wao;WJHWs`o{lUO??G1ma=wcO<$}3eZKig;en<`bG_|u-pEL+EZ@gDYq zEs7N!vPzaCSM&HA5kl5gizFpYh&khWyhf>xf8g+tU25~*i={b4?s?dG)wWX2YPeR( zG;J37c|fL_dyv}}V)*euY_ck>)y}+?c&YFUOB@C-K@Y@osaH_`cEQfYjzzhfF_nAM z+!G$uZi@m@o&OZgA35f*{4ARv;AXB7k9~M9qtPJoFCnt%+zeYy|sANW!d?c^HF7K%C1S zFp_X4+?21_sN`%Rjw)ndnR;MonS}U^gE0~ zb7m^WJMbEG1AnCsQY1|S>g{>^x)qs=0;B%GZ%}vc0Ud=vRcpo;7n6R5+Z*N&g@nTA zA)Iyk9SVkibmosLHADXU^CWY858OX`d5${}1oeBLed~67n>60&F+UGTmi@K=yF)g2 zN**-j@A?hS=1M{fHMaO*c1MRM!PELc*Br7KWY8X9`nG6GztQ8VFs33j!F@R0`^&G` zyBDo)PHf?$x#)J-H?dG4WL=6rx10O?B6&M1GMOITnfE|GQuTTFIS@(6r$YKstL=~$ zRr?nLyFfon(bqMtp1DGjwzfi`|oQ(e2DJ}d; zkA$3#Ma$sSTEnp0MYfB2tNZAqcY>2671(^*J!(sfbx+QF>Y6TH&d}UvH0V(;aX4r> zEvPmv+@5dpnRzOL9Tj8V$+S-uRpfz2cQ}Ua@v0;^Y^=^^rm%4@2Jjh{12*Bf3vwBT z-;$&TmNo3eqY*enwB-Qt=vU>m)w^|^8uI^e$^XHiPJ#fvb>?0qA!n<@IsR?+ANhip z1mpO98@bm_IKyP)-1Z?MWrX6Mqfa2I^;{v?CzncaLE?nbzb66P2aJ7dh6=O8QW)k4uN)U{N0y3Nby#S)d zQ?_irySsagSgi>MZw@ zkEKl)yE;rXsU$)M`R4V2r~cos$G_!wb#~Tv!32Ojkl)Kq)d_Lve=Un!=VFa*XoqcnmN6AGt&w+F+^yhy{R=g82ZrwVi`CZZ2C~Up z34Cwn5TOvG`hbZLR-yV1_>c(B$BYiz$P6*VI({++xW1O;b?e|Bgx1EcBBYC7NvwvT zDRlf?{j|#QgV6&9qXPD~4exu@l&gkI!Nb3icM)q0lVC3A+l`*4H*0>}5PR0~CUG&p{W%SIzET zO&0{VoPM0~wXkr_3g04IgP!F-TvX+#H8wgQd_2rktEtn&SJoar%W<-uoEUU2$}gD~ z=@qu0E@{bA|Len&>Ka;y*YnX;g!CULPZAi!(MHEvVim=Q4`D29mpH!7F* zJE?oF9(81f_cT*iHtOMP2-Q>_T}Xzb$;^5(;yy(H1wT%hT?Y-5S)|&$ zv+;cY86oU|puOX8#*la?)mzj}=ZOoI8hNeBDw@_jT*xm-fdmZUJ(uk)Q@Ei<5xYxQ z2pWw8!NvCaRoE4`Nq1O#YKKaF=&Bp6-CAq9Qo2B5^C51_C+lmvb5`^9z`?l$9ZPkk ztgkcKG4IuKkm}_8aF!G`;_OY|^Ksd@5huDMB8YN-YW_5E@BDh6fIKL>#eZ0!+Dj*n z&QbC=fhBqHO5zJINOLCYOYvD){+Fe#&$e@aV4~zbEq-dnvE(iGQsy0ix%ub@C}%95 zlnU)?oZ>hJ3RH7i28#C{wb4WbatCm(i>pSnnS(B6v3!5QVK)l{(X)nk2)_X1^TP`) zsNt<|wWd#7wfF(*>#4=uc4LtY7Ax}c>jN0@AG zmC8utG|$O$^;7u~lJ|>-;ge%Z=X7d({xYsZxDY0u6L(M5%G+q=`J}ZnV*vccaBeA$ zKgO~8D2o;Xym3)kQR{1KSyFw7H|P~CaNy>#Tndo-i5w;jpEJe$q2Z8m=|BG*_PA-{ zF4A%c_D3`fVu_6dp&7slw`CI)HsA5{1}xjR&5SWLbF`%agb$F96Ln)K*Ivo8b% zS~ikEyt#5eNK{Uici(J0hg6nVRGta`)Mao3-^z*+j~4MFup)=>yVe9ipxGKYcEIJf zunnuSGCt?qlXIBSvu{TcaSz(0PkllS+J||(fw0(!GomdWpPph>XBGUtMZmh@zZ^e0 z4~|89%mAHz1B%B z+OEG9E$g@soknlyV7J1*ep5I_x@_akZeE3T3pqVdnjTwqQCz!4K0SzzOY=^ zOx{~1SbRRJe8tK*J4u{lwpHL|9Pe`prR-U~9}@8K+iz6vL_P??yp}|AWm^@UA2i@q&nPOj?ANK&_@|iah z{g-~%Q#ccZ{?_&CttoxLL!4Dx%h09f=Jn}}H}MbYKm%3)0u1_%h>dY;WY>32-)BA( zu1h@Ez%b$M`JW(%`ML6U;++QP@EbN>tMl=TO{n}yMhOkQpy~DqWEn#5o6tJK)L&%w zy?)!T625dy)#LcxR&z%LSWPLd{sowkTK}3t*7gjU!Y5MD%}-JS##@Qw*qaz_AfZE2 z9ijzD3$Hri|1zaHb|U3sq`5y%0tl_&{0^*e)U`%FSG%$bEueV5VG;~qo<+Goq@t5h~(uv&C1FShLLlAWe zD(w!`=}D~q*#ViWa4MpgC%jv+N1IM7SK8HeAgTzJ7(yM}#=`i>^brm@leR-KmnS*p z01z@&gBr8sWRg7KRC6HyX6sw(b z=g)lwQQV$+P`h;NPDNO)!9>(>sKoz7^X$9h=V;qLHpX$sRCMxQUV2SDePu@?ZepQ?Ke z;O#0xB6g057wt#K#h4CoglHd{GC;upCw|fC37}fV7h8St&)of0Jy%tfd0DCsA>TIk zj1nSG5NOc7 zV%O@_`D@?@;|ki?yx+N{?=sZS7jq~nG5A0;(9ZnruTes4KdL6Sum5 z1LPhj1l*GUbq3mlfpEB(Lxaj~1jX_Js(BeEcbn@v;f7MThd1_y5OD+u8z8JNuFKIU zy1(Cw8IU3!nD=MfE+>;aZ)`N0Wx~b_k(MdZj5kVuGb}?nyTZF)Zm9`#w#@}q*?@#MZm-rdVRbXw4X`{|we<{w?x}TBsUR zAan)g?K#7b7W{<-GLs_W3f4%z3TJV)MLI00@~+D`aJCk&M|I9OmMyi2)bc=w7b4=3 zdvFDM+Bi6hW`ZEDne<2F&(f_W9p{|^eI$5wfL^ehnjOiI-W|#OGey&lktpmFQ~x&^ z_amfUNqzp#b3 zUX#B61MLI5A`B$j#;p~mENjoVZvrT~i->ahS#{&Kk9kJuoKwS>N7sK6q(Y02|2a3l zSL1VYyf>9VfS571nZGq_y@Zv3tYUr>v0LQ_e%f`!PzAvs2u7jeGy}nN?<2L2O`zZ- zR12c0nud(AS#v`rGc1C<+-4fFg>d}yr$$3k+mPT>0Q+j+>8GB{_4AFcSxF3GGaytc zhRN5cT*8B9WDB+%>^(y+cxzYGGJZ;!Lqeojz|4;Lx_t%SBy`;F$5P|HRN6U#!+xA< z7(*4d0lho$G;BLLWIi;X{~nl$NX|0xnf@)ye%U5L03da;4s=d zpF4Bl@yZ{8i~>b>drzK9@bYN)9b{*TKp;b6ccc_5I;P)K>8lX5t^5H;julU8ennEk z+m8veUgau*Ud0(LyNuMp*=Shjy%}5D(;`FEk?qE?9Q8q4^+NTmzVa;i=8Aom;A=@3 zAqm+SGNn}{E-rzS&{P1r9~|7RtZgkrSu_q=b@&OR_vX_8G}T)(k<$PBU}uVTIN$*R zsv}qO*(WBkib)$UT{#CNN_$}g-0rRaAmS3hv7g5S?+xHUvw6uN8`5vfPG;=wA-3!H z2v5sk&M|6`3Y#Vnu`S`+(M7LV%@c5 z)V4JKyiyqO#I+7tl+@gn)S!@i5CHmGU%AVlHKXYY&HVR`<~}=v@97HiuYF~|jyM`> z{1N)~!z>^%Lv8^n$$^JAi7$nFWdDf~CWr9umqlfT{imbE12nKsaVdLXGe;4_wUS1* zp^`Cwz8u+b*e$jIwbNCQa{2CT3Q3Zv~6m%>w4?UMs6u$*OrkslZyK`pvodG zwP5+L^WieSwkAq$m>Q8sl|u7PQy^Vhp(jO3OCJWqz@t6vJJQ{WUrRbc_N3=LMK@YQ zu@wO+u|#MO)TL2JYMA0Kqej^&Jq^Ef|JnJA{LnhY313&czF4oz5M%Q8Mke{??1ESUmP;nYT;Z5 z(Gi(kN`0K0^ zd))Sf)!~67%F}k@-D4T`JQEL=*fKpf)G-Ea_rMNuHd+23+~<KO&NeKF114LNq2yF+hKLxjdL_YOGC#rf4kNxHz5}2JmnGS=?r93~U??s5Q z5IGil8J_A6n^^@>kYjC{;d*M#MP>YBME$fl3IC^O_@7#-&2zr#n}eW)5VEH~6YbAE z`LI(dUDo|>>H~nel505+l|}DFGoChXvHf%RzOxH&(Y#9zYAZGYFT96}AO4IPAv#tY zeJ;?Q$^ko40yr>A)$hb1MSyrbtlIySUTz728dU;rqefD=ltj`fWg4rvZTJQ6JZqR- zoVB^^w0;XcM$>b-!W7}+nMl|M*IK=SX`{$Mvtc-4WU^fd216LDMO(3!*{({n45TD9 z{yR94QOi^QNoVVQ^Tqh6TAauS?cM#c9p@m?Kq$FZA@6%fTfkdQqGXOV79f;(E3POg zrLCxqo>ih9H-z1h{SyRE+2o-mY}!EtzVmTzYQ&Y*elO~40hSDroCj;*oaSAc80Y`0?&@3qsu9?+x4!pjp ziC7B%c@N6)A0HogDZdnqub#4ChMUE%lMq(Z^-s%H@>R_*HrQGoO1}Z9sd6h5_1t^g zw0E8A2iH?7bcX5dl(Lh{m}sa;Qe2Dk##29flFuP(@@h$MsF*i7>6sffb07sE_nl4n zmg?+Ha9Ay|kg zR1TB`#W|9r1=xLC=%=FG?Z&*NKpVqVvqYYE1F$%?%0d@XRMf8Gh%xZTz1r?A!vS20 zQr=kO$s-oU9~6>x|Jm^~w>tuwsfH61H3ULF>(yl_Z8p0}%H87o%}VP3B-fUxEs`kVjeF1?7gR?|isbP=^Mi5* zTpyT$GG^1`!&GpV*C*AxrphnKX^e>kzCCl)@l%d^(gNiP`zzyE8p(5`YD^9RFPiRX zd7PFITY$`JbEFJrGcvyV@mIaze=7PC)1NTq1xthY1WxSw}&k zN)tA0T=3SXj#e_z_kw4QQ;!dO8IoMLH#hu(Wav+%7q8iZ4810*z9SURhZH^`A^{i3 z@?i&~k=EacVcYL{Mv`D`nzJL?|HUJY+c{o#^aqQ5m2wU;XdTa25qU6chi4+_C})#? z>zrSROe2E%X_(Vxr90YnY5pu>su{8* z9@DKf`*8y#RE;MFx%yuKYEEZx^Say2rmuhKhWTdhkM4aflO+qblky2RSI8OVqv@hm zvZ@uUHe5;i68I+^4U24R5~XB_Zb^yD^6TtMqYhW6^Y=9@uI%F3hLTx8VPZ-OycO1g zFA?_If&6oXIw0OZ&)S6gUp5t0DcYs zV(@?XM{Sc=fz1%_yVFr7Sz{jpAereLTmhG+K|`GLkJ4NON^{JleNYV+XPi>alOJA~ z6QQV|^~55?dkr$bQhy4ed9fsb65dLdHRqlu!*W2aTQL?uRTh;_q};A5G7J*n%zQF& zvlk1cO%kW3Af9}RvUmtxPB4e&lJKEbu9J5|u!`?%_^D`dm@1;-8`n>OWGB&_U# z<5f+!dk${T^}@l!5^#;WfQRc6r}3kSrx#Ow2gQ= zM#3LnoKBEVFFY;=Zpq_Eg58&ooWW6tmg(g2^f7ZjSR6S|)xqXNysHDfKIKdXngye` zhT4WlN>PB)!1qASfWp&n-^o)8KUsc_db*y=coQ~zO!wDMiG3c5Rqpd}rJO)uZi&=k z3V2oVvUzUKvpl^Ol}Ug8b_Gz@zABA){HJsxsd2nN-w(-&h9}eA~r!I7MT9c=akKh6sm=)=Rg^dpZ^&JGSz?T#23?O zJl=5LxuBV+`+<^JwYLZc0Ua}kI8xjSzp05LQU_35)Ly!$Xy^7$z!4!2JM8_6#>j*t zvwL}YnQXlWN+&r%aYhO51CwtQLf<0AuS$UWbm6h_#|pfT2cYO`Zq{<`K%h2a$+(J) z`&*7Fw8zR~eEhUz>PMU)bt*THp(`6hC}~EoZRk?TKVB;{pm%kHdiV=>V)*dguIFc7 zE0|)z<>-K45X4r2kQ!GXt$W$4`570JM+7ws%HQtxjzcebj^#TI?ft0SG#!$h^<6BT zdlwh*Al>;A9wk1?NbUKDIsh^!818W{bUK^y7I_038m_V1?B@I*%$m9WC-C+4b4EV;WaLnjX@{Beo<%fw~A3GM2DcD6K)nZa-q+dAYJ{^NVo|l6WwB-Fdl`Nr>@w|DpK3kc*)7*k2mtz4c@T~f z%9R@_Wt`lk@{+W*EGdz!!>I7%9v8mwD_v&lv@tav=ll%24Hhg6-Xo5ya=^wo=-Mg> zJ&C1_^$%*;!yZB6=QAKxan4o3_0t&;$@zmFr&0oEjok0~qT3XeYw9+9r@cS}<)%eu zTgub}xe+VmYCRAGJsb)JWV-K1vtQ7g#MTB|tpGl%OT2a12amL|#C^-a|9!4|JwJY* zlS{I}H|?P_E9K0zg+cn%pdY5G6dwaqNH@e zgT+C!Wn%j@%T0hQ^bQzccCKOrpZj0y2<3Gu=(;nQ$lBf?fW*^@P+>IO5Ye0VB$(`m zifdnbVFQTP{-R<7gIuz?zb^Bc{K;;J{1b~1g5=R|WO6c1{AURixw|aQppfC`5Z#d6 zl7x{!E%63nddT>|FIkS+ei{x0PA2)H^_6-8(KL(?p6F7)DD3f3S#f9;DR$Iacb&ly zo+Ql}5K`3kLVYdYOHeu4xJs95HmbJvaVCZsBRi^-+wL#VE3!5h@z*6;p$M~{p=3j2 z)_in~{&zvoRzu0GbTZ0D5vi>3@X{ZtDThPNyZFhIeo&CB(hkaS>~y^I$nf9Y=nBQ!7juH4`}lrmM1a zw+^Ph@9`tL3z1O^s2!0{#7%!U82OqDfb8C{{~~q&;$8ovI+Xc$a4q`;d9(P@(g}oZ z5@u@4qo!j#Nr=(}4*d44-YnS^%CKC|M(VE-AO&}mw@!1-JeKlaSexu_gI}ujGp;V-37)=PYXTGM`lULKf+dde8G{r;X#;0URyTr zOkbj-Ja%6>N@C#au_ZKAN2f3buKc~@p?ssmBWIfwiw582ZL&mUg%PoUx~7%#4#-b` zU{^Nh>56iK;JYrHcX`Iz`^_eUHQg&Aqe%TYvVPj|`wp?7!OE9@kg03Ais5VlYi662 zCsQicffy*MN?ZqJ0CsIoVr6XdII557?dygsPnm-E%1GqUcgQIGy_|LC*|K~*oy)+A z6R`h+vez8D&_HAzG+Fca>j8Jz7Z$Z{Ub}A#O_K!t;Y=q`oE#Pwn$RwCFJc~T@MVl`WFxtTHi!Y(#^DzOte473YV5-L}( zoDp%yAU1p#B8}MK2rCEmW>2h!&dSIIH3WCCa5ysTn`QS9er&gl##{NX2X&z-;4;{J zV#jl&ra}O26i0vwg+XYu_5h@VDhyhK&g*M-m8MmRB=K(q_f(Q7DOxM1;2PsD_*w_7 z0IX*vPD24c^TCAj39HK639b0+v9}25wBODF0cGTuPkPOz^=;7N#;?b4j?@(Ody#b-&eetX)Yabx_`eK$@>!C0(FdG2tAlrf@kko0! zPhN>x-%3FdCJefVonDU_{~=Y1`)!ah0>s`b{kZqwdk-WV#mK)y5f@)aOUDRkOsnR} zJN@Wd9Y&((-ZFDjCvPXV2wl*Yw-|pRc#v^dFyGXxRhYqClYZ|lku1$S{F(|sIy-5j z>u60RR5dxh?HjO&wxJq-9`%UD5{hSEWImrlDJZE~D{x7)m$>a0F`qPemb80EGny-U z+NEn@n7_95n)Bgxs`j8l`k`n-;X-2(??ly`COv%Z!+qYOYy7@Tk@=%!Ui6{u`&VrU zGqlPE9U^3!MGw#SV_#A)>kJv%YK@}62074i7YLDW#BYe6JDUM_KhqMgZ7rua{j<4) z11rjComWVvgIPwYPStMPa#%mGxc_gJ0i-r0G?Z>@NV^}iLktfFlweU6`y2ybk4Pj1 zqwlSOMqF!Yz|(IG&}MdoiDb=0qP~7SM#(899q!Y9)D0}B*1dZs`l-hvA(mAs z(+Y>!ok-S8^=0TfZKr1 zM_xZQDkNM>a&ypXr+Ke>;_K%+xF##CA$qs~a z1;)QTXh*p|lb1c4PDAlc!?g-Un3QXBURN+Y(zzE*)L=P2xy*ITGy#}sy_P6D^xjqm_CWZ!?O^0)5AH@4uikHLre-8D( zbw5uH1g4FV*8}q7vX3*!#LXtG(OFrZd|%3{)^4`LI#`kjjQnWGng<0M3<02cC~X3u zBIK^Gugg20KTnICMcfF|_zk(Ri@2Q#b$yc)jHx}m7Cr0@e%ZCJxHU6G3z7AfU@R4b3nkuwr*tAAckhy*UK<`*!6N( zSmI2}5qF|u^vW_(dHS(yJu0ptv7Z$3-U67aCWi-I+YZUVig4F37=+*o4!H~`A`HR;h>|wT^ERQ_z)7=^tW<1 z%pTFcRy-dSLgtW|tpz@p6qRmMWLxcoov!DYRIZm3<`JZMmT& ze#KSsTDoa8U^Pp1o7qKu-EU^QD4koHlCp0V#`w~#NTl?5Bo#vwa~RI#_ROcU|KJ%u zrqomM^-9{UNo00pPrlJkNd)Pw;9NDQ=Nl5ZGPi~CYs@T~bN zThA4?+CFx1Jms{AkF1K>eQ3c?nbck-T=Dp%!=|v0nuxN&8gSwO7za>!CH{L^p^n`u z0HvAMd2L-R?9WZ??mi04y#QTI;7c<9YhjzvL6ldYR^~}MiY}P0K3zw&W@AqW=C0j; zYj;9Cv_nd2r)!`s!_n_~_&(Fst`8wrJ42do^x%xjTm*Q87 z6DpkXSC|1WC*}_Ra zsV{cu!>utt0e?|thW%~;^?aMm8BDJ=~a0ek=40?n#N8f(e!RvUi{ zEo^*G&|yeV-!!z~*?Zy-H-WjBZ#jo#b4^4WNLe2~UubO0T4Q^bJaHIk(R}rvNIFo# zeVN)(S<3a9R`k=aTFm<7q7?5|pf=_GCkrD4WHT2)G~q*qppaHj|61KLWWXu+q{Z~qncIk`q!vqR!id<2d;2^B_LE*krvac_;A5d`t?-Dj<9?U|#S7!b%90=D)!y+|e`7B(zT8L;GGIY3IP1 z;idsrAyf<|b~wKa!iU>OwJ5iMExno}#Tf~d>x?(}5l)NI?#UE(9ryGjT5oD#QYj?27_~}cy-lfq_x}0tchj!%$ltnNQUDzK6#0S^3>0t?Q`8f zp9CqmZJ%hUFP6Ah_Ko@$|IUtBS=XPrHk$S|`?*i)!!s>Vt$G9!v2GkE?5miJFTC-z zE8u?6L)ovieI{v8f%TPrv@87GEHy5eaTmE0)ZZVzZ3Ml2oUwSqd>;VqRY%F zCqf{R9yIAn+hEkM|0{dZNuF&mqm279z`cY`Y@nm6l#c38Gu`5CtTi+pd^#Wh-c+pNW2|oQ(o*HAF)-mpm`u47 z>jPovCK-%m%>w%v2*Hpub`TQD$I3VNdac6uSI?O(%F@&3J@x&O=UDZ|pob#KM0m!tp zKr;30<3V6vx}ZN7Pzxa`jfjeqNLx;%4dn@#J>2q$(%!3iEHWZxq=~>05Eys11iPNr zSuMI_hK(mF&$0#zi%9Kc%sSQ7&%lXDS`nTd{9Yni)q7LfgBIZ`IioTM7w4S#6X(9m z&XzI@*$B61K?Gf7+W}X-{}SQ#=>p7iavRp868CnpjCtbiJ%PFPoSyCxXYeglLaMG$ zZg@%)z5X{Xk7)%?4;-gniOY1rM`V3fvS>|XFUogV&YKrY(iTF~!l=Z?VR z`RPaT7~Dym9=`bGG-TC*o79K(rv(9n4>uU8Se;nvVz?EIiQwiY2 z#r~DX%aHjbC?-W&1WlbcIq%cNcl^vt=y6iF@~4o8EcX=X_t`rT9ucx|I^P|N?*S6p zx3`~6<4vGhh)C0{iVi&fEGRw}2ht8=brsB&Xt}rHz4yEg zu#bq&jHYnt=mq|$9=0zh>f45R`mm#Y>J`Ei2;A^vw*2!VH`(s?OyftL?w=D{Da1;O zi}9X$0)4gX-|%EH@n&%7qRBR(T0wSM*$5zNz z&4lns68hHfqF-Jr=YZV;>Futeqw3D3L+q(Ef7z5P{?&afg8j6BxM8ZSJb~tXIH_49 zh&l0||Ai`>D({rjm;E7`Ydz77OqV&Pbwj;F<80IH)|tV#)mmU{IPbpYraDLao)Np9Rrm>I)CFuRyAdOx>cf{5k9D41 ztmd

5r|zS_hcEOuUJ(7jsMRBM8{4-xbPL1cED-W1a7WP6Ca;o6^evdoxAzRRR7o zysso)CxMkf4O{89VwEA-*bRspZUH)CI_O(>zT)3*su;nzPet)W^)Y5$2sU4%AUzrK zl)p1vZOEoozqGTLGoI$Za3j;s0|^84il#eVGZsvx1wio_zBBHo4w!M>?1f(9Q}E@T zpnj|M9sc&B=fG&W71d_Qyf8tlxZ!(ktJQ^TqaDrtivnJUqNX^_E)(oa;SD^S0qVt` zhe*ZESf3CG+>K5WA~dsX%jxt$Bh~3M20hoz?mK|VL;D{CaOP5lA{8 z6P59;I_kDi5K4#vnW*2;YC`bkrqJT%cEKpLAl>sLvenALf%Y)=zLO>4uxFaf<4E&b zV$G5--QSOvoi+N*t`iZQK1|2^!?etK+`_DLZwY0d6UgbXMRzLmD-AblXVY4F7n!NG zI-~)k!v-TOwOt_R{SaW{tEDBsXX{UMK$q$Ob!}jA={-0YM|+0 za~$k(wk;(&ufXK}4QC`cmQB;{TEAR+;h*$GFc24A-;@6GUk`{atiKfCIjw_QDzOm) zh+qU*L0whqkumfhR1uMw?J=rJn>b196rV#NV8&X-MPUepZ2T1-zsL`Dim}i*2^pSg zu^&Zv$=^NJy9rHL>d*kdeucctVa5t<0xjM^VWyDYzgOG-;X%{M(egzJayd>y#XPLP zx(nhS$>a?ydv}327p{@=37n`v^}h>RvKtE4P{ib}9t55PIBwbvpknv8h2iB1Bhv=7 z{lh=-0I@R)a#HTG_r81M!1bA*4-75Wyi^}tmQO;@&=H}to)0<-{6&uA*l!AJ=kgi;|=YzkouZ!1xFl@BpWx~ z;m>|1_WQOGAi`0(-;n!{o9#LeSiTLG{+*BX8AWLew<#BzJ`N988ht`!D z_Wq1Z1c>y3$R#>*qZ!a&9=^!+k+|+lHv7yd@>q-;Tf^V4I-{?c{alhQ!s?Lw*A#H~ zh`te-`~@q1A2YoMa9EKc8w0gs4DYSf|Mu?y{cE&3W+$`uA(GU%^;77}3n(d%Ur_Mb zk|*xJZW0llfFk*7_A$v^D*g+MkTwI#;D$<Px7{95#utwDq)0Xvj>CZRZm>jgI_vXIzW}T`S!WtVvq_* z(|oB!*EBN9$c(xo;WVqR0U8HJ?H_8ofl{)8z>MdSAZ^cu4D*D(*3W@l-vs5S!ngem z&Z#SQEF>}?0FO6OeC8_9vDbe9A(RR0_4%^B^l^x;Y*va3avXo!%S6H!gqeVTt!X+F(8DnxacQm!-m@ch}fB1Fpb<3eoZh;j2v?VRANI(R${jH41s809ulQ?CrlFHtcpZRJ@e1$M#*6(F3)y zr1Cj7u#klx0rbI8ra0e#Fd}XU>ZqfcSUOV1j8BzH%%q{{~1p}PzDZw1`r;q zCnz(zSaM=@6aJHFvLsP1g6DfRRFFd30CYr}*#B8lE!r({Y$cC~P%cEyW@+cMd+ykb zcgAKh`DQaM4I=kY0L$+&ffEj}<_Mmo zM}=pUnJC^HT7H`w8GisW9CS5Ni4TCo|KF&^GnQNC*6x$4@^hf?7#=_7NOtD5=H`Cx zneh-lZPgp-cCNpA(GC()#k~LWW$M?VBao1;$s}7j(cS8(Sc08%6mKy4VyjW3w^j}t z(@3`I8n$uaqfstwTR_gUdw1K8G)RRRw>rRrR?D7xd8F2j+t;&Qu@&Gu#y+lH8fy zcsH3(jHPl#pe|S0pc$21XNxgFah~|O^lsu1$i@iYS^c9yP-!tmMU;nCM+-LlQ={j8E5C?Zu5A>8;~O^x#J%bQpWEy?F9IR(qxFs3Hqvf{C4kEg{`mHevWN?XuAnV{U|)Hb}P16IZ32(R_uKs^Z}lMW2B~!!y@=q z``=tDpAE=kUSUVcAN}`*g61o6Dz^{6*Ze`dzR$mh`M30|=lEJR{;j{4TWIMdk%7h> zuPlbGyw5?tH!NZChINzdqKu9spV{rcYRT zkhoivGp38>X$-y_#7*BP=?l0TNZ?zZ`R|rH%?;2Ew@x|Q^>^uP_f@R?PTbY63|-dE zz?-D5SFw09ZsXg|jW z)L&yLs>7ei?4IcWdEKEDQmg@>f~s$Evk0gOarfL$D5 zybmQ!Q*I7%40ZBw2jqFm?)^Dv$X-Dt0R=caWthmclH3a3aE=J`>Lf{sHDG*^{0e`- zxBE+C_BCAa>c{$ip7#fVu*1*0(k`jPx{JtL@`kAs5x&>z>*)on1;_U5*X9&&06UlT zy4@ueTVUe5Z-2Jgs{NF(x|ildas!x2Hu(mbf6WkS{Gqk4A&%RnE!$@=992c2W+@(X zvwCo@R!lse5^or~NG6pu=0d$dv6k_<@KL`LQ-1s63Q8v#xN8z&iPXEF8{$HUS0=;9 z7nUKkse*f4W%gCl!Q4Yntr{{rfiF-G)jl_4LE~Ebj#6LAATk12PEA3S>`X#I4WuKZ z;t*ayx1xJXY~p`D3T&ZP`pp|aql24CXY?jAT)tYY|q8};B%095J0gBgjHt+ktN$~M*GCE z{<2?iYZu8-a<&#q#M7~YI7R3r_8Fbp0*u~l*|K;^pVK|I!%ZK^tt3U6lpi76fK?5M zq-KPT*-Ki3^AeNrp21W-6d4z63nJ_U?(~@@1(w1in#%xxN;(Kf^t|6@19q8zSuK2^IHlnUd==PbNe1CFIXh zJnMyvNYSI3S0HHjkRST+>lT-O4~+nrmwHcX8fYmyWmCh^dHw5HJv9Dt90}?QG0gkL z_1?#2M!EW9p_zA7Nhxi4v^`c%P_#5~2o7WWS$Y|O4rLt%mjjo|B^^(~)p=0aet|G<`zL`n7VY~@}v%^B#f+4q6u62i|ul`gvYRMCM=7gOFn0QtziQeOEV?g$Dt04Kh0VNfeTvV>11_54=1 z+|(Yj^rr(BrCUnJb&?T!O?#%rBN zVIJm3p-(++wgE(`TCC$W2fgB?pe#}Jw6IWbLl5XRJ35a$U0sv{H?3yvZ+XXJ!U+f&P<0xSQPqlX7? z!*j03vb9tQpOoSidHqoEvxL(vP;i7o?f3+o;%F%|ezm4$CyI`&{z{5wUuD0q$t=m+ z7EHF{oDJ~}>WZv(-XCu#i8x-U!1D-H&Pc{2^!V#B8)|p{PgLa{%2!D$~IHPYj-%6-{Pao{ol{Z##%c6VPC*OZEas^ z0aw%~(kYEv)_&6jB+8$apFiQqcbmiyUS`@eN@kxFtKFbBP9?c(e2kYyDHF^2DatSZF;Q7(S?H1~o8j;J zV4)RKC?YYP8C!25*$^oBHHf%V6!LsZ8kTNcZr~9vb1bakl6WP0~j_GSng*azB}SfE+|Z6h@0_=!qi!3rcZQe`G=s9BAo}JmBvo+ zDt+E>>vEKjvRHKCgN<$iZyriGGc{vh-ItwLXVegnBfv}Q{ohwImElyDo1U82H0*0z z4#bDd7~B;@&c{f>=}QelBcd%F<9vICBr}PF#x2LIqP3K9L(XfR*Uy{pWc%OONJASF zg4YUJ5{DM>h6u*(gKOt&HwJo%S*4y?-#0UJ*dn0PU{*IPH-O4Q=0CsiP@Jh<qo2rUIztUw?U8;h-Wj5+ziV#;_krfxhN^09$Ycx099ViS-ZdMTF1G^;h3f?Fb39X z&4UwRZrlXk3Z*sRBGZkZwR;*S2x*JBj@omNj|aPe(rhM}_@;Kx4U0ZAA@lMhtkSps z7Er-I{RWWvto6N@$4_T;k<436R?@beqFtK9mw`ZV01-NZvhzoQqM0|XVUf4p(F@B% z|5@w=&5Kxuif~2B_UxzK(jy*KL^}3vOa!Xfdh{&g6`HAO&OsYF-#@f`CO4lS|1vDe zM5i8Rbv2m}eW-G+H0mMNqu5%cekd4}tFx!nR|Q|NWlmWTC&!5>?g$l4(C!!TRoOS?4~n=aDrxEFhw%RVdCU9b&N{W4Hd%ar(WvWK$?; z!;Nyvjpeg4_zQaaNyUiU%zN5f)gy1@cb4BDHsuGu#s-|7V74Zcp8f#73W&+BNt3Wn z!k_4TqA+UgcXd@G;Ih%7WslG1O(~;^v~Bb11E|ZeX-_BdlF|}9Vgu>!>!1`qvfHvf zpkSoh0es0evClo4_dDqIB2@nEYtDb|kvFJe?Yb8%$gf1Q#jZVAbckI#Yz-t*wyU58 zgC^YDMZ--{x@lpKaRgMhqOoA+x^xa{om#;J*bB^nbb;W9MT+|1XW*?mX+{z z9LUCo-^J_9{JKUi8do6moel;Sac@~KacZLco^at z7{Po$EOh%rY=Kqy*oElk%aO>b=g*iwcFNajKGu)CQ}>Ytt{USHt69?{j$_+;x((8? zHJ`R>`vY)-Tnv2xI+>|(@3YLxkE%~iSy73cBuA-ptdUvGCQ6(`&JxEWPS%Pkf;5Tf z$gw)_?L}wA$@uqS`Xi}-zt{b$s0jpo+Jg;@%>RES!y@~1VF5w@-`<ndFWgqYiJ+ibBW;7A8nSRD%& z;mCf3G$pa4e4HUEeS10b6kt`7_sF~zs%x1R6Y8nUGplAhKUbfECU;cC$V=xbo5+=i zZq0Q{F!#IS8|P(JdfeKdk2|XA$GQTp`5ld^e{XV5ko#NsXJlQ2yys9n7})cVj$vY& zxs!h>{c{?B%1ob+k=w9#nOC@mUTBl`qp|%9)<^y~oBgTPq2KH*&>Q^-0;%<&@!y~C zz4B;;jFUMIR*04#b}-N5ITh=$jlLH(#l}2&>Y^2rEfPn7p!8}&7jgX#yjpvQ;;GO% zDV1{vcS4_afmf?Hb-9=OE~N{VXZ7Nbfuqj<;)w7$Hg^H%_3OSprcX%2a#-{h%0LP}IGbt3< zxfg!lhoSiG$^>x0D)BF)1@ausvDK!G!8;q=>UPs@%O?6xggZ%eZPfr8J==a86hPUR z#^=D6g{LDyp{NZglTxEqMtjdiJ31RuP&^Y@rczjC1wm{ogsAjdQ1}h`!aWB;!s&<{ zm(~OVL5JFN+aOlMWELZ4N|wPXK5Y9stg8>xY#2H!{SwWcd_t~AVqL)Xdn0-0XL^X5 zBY?n^RoDNFo?Vs6c(pl`h-Vrkm?2AH|3VU0eo#_TY~7;4{iA7I{;*eR!E^dk zT7_225W!iQ#X+$yaIkLrquL-FI&b~3+n%)TSa<9kCK-5*v=Z{(gdLEc{J_3!sSyu??j_e=Kum z^2afM-~KfgUGlUVU1mXw*^-wD>a~Giyt(@ zvpkM74c!+^I}6l`)EOyLJdQl`8r)2@JC}fl#$;Hp`&XlU6QEZm24MgV_sO9W>??&2 zud?N_m&L{Mf@8Akx=nBq8sPH!upiMXZw9-|!?%~91{3A*rBfDO#?t(PtG7OOC3#Jl zIht|Xkv@acI=p_Z3jy0{DIhtVBlAO|T-|p+gaUmqXi`iq2yp50f`O^TW3*~_(lAi( z%fY+p51pH1Nk0w?{}4EpPE&(<7OUW%yeH?Kp<~GEW;w>s$-AHW`e6!-MfmB2!{kKb zi~&`+XJ zeKmW)0SDw=Q*K?(i3Col|E?ke(lGbTL70c^rZZKVsmW|6%35tt)b^}w)18{Z!r zB=&;>MJgbsVnjlNz-zl@Lq{`jXI_*jDoC97?q^@zp!`CNqtXNgq~K%FV{kp3{6hsT zSe9?1Q-q6j(RG8uR0w?i{;jJl|ZA@LEXMqfH$%6-P z{q}xK%q7tyG>c<8Af*TJR~-C8m#mzgHyMq%`H&gUq><-seRXVkUH>QU+&UUWouZe8 zOjlVJJlXPl<$_F!=oAhpZ1H9FzNqv7Z9X0PcDfdvz;wKv+6Mtm9M(oHP&NMpj(T50 zlUOHG=>qTJLaDZAUiW@fm2`iJ} z?D>P~-+&3>Ljj!Hir0T{GtTLyF(T(9{B5(YF$M+Pah`E*7;OFsDe}xs)ZmOaF8hqk zR$3Qo{#3GV@{8%lpN}-C#70J0D%+>uIiDi6+J|L^Dk?iCyQU8IRAouugf;!mj^4GI zaYlUhu>78)$laSRw<`B_Ujck&{~p*MG*T=93t@1P9(zL#k5nO;DMr>FU}Hy0+_+Y% zj|dAFWP7J_9>kZJXv~u#f`%0+R*cjreE8oS^e=-?GfkFjO@<^GDlQjueCZy&S$tyf zVpJxmEo}rdyCr^L$8Fxk(`l-ph+6$uPb?}G;+Viot z`F)H?^oHI(DqEF)+483GHW%^eJ(l$QJwQcNQ*GG2V98nO$a2e+lXBRGP2!aLRP?0y zeCItbamz6kuiMfpOly|P9OH1I{<8b@t=+2~m^HNP@`}Vi6_Oh5gj;Tgvz%)Th&S8C z+;b6;Ev~x*f+6}SNXpe_ty$xMMOZUmDJ3q_uPp{!zfJOMEr@TcZonmQUdB6}b1;cz zFYS`87unOm?*RsW6&hXeSH#^r;kp#EQ>!~;1nK6}%{GuD0<{|i{O22YfT&rn58@Gj ztpd9W@o+v1ZH;z|W5J2wX7SRTR&m9+8Z>UBp32gtQR8+9Cd1*Lm+wd`g*1je3VF-u zIkwojL%`nyjFUoR zvRMkk6m1Nfz>f(gonza_e7zwn;OT>oVm7pVCW_{<(em03r+eh?$8d!2fgS&u1@>^2 zTq5T`CF|iH%p{0Wg9qI}E$6}Xutqjs{uCBRtKNwgP)fakZ|1RmoOq8*8n&>QP=W6fm;O(Cn_n>sw{94>+%I&=!0lm27t5pbgA-v+ZF}g%8n_w~mUP&hrO2 zw@yDgoqjY+xgOv+L~t`8Fys7ZaO-Q&>w{m)c;U=I>BjGaZ)88PEh2ryA6qJrBTZ>?< z*~y^^eq8t;x>ct5Yr3v(s7aD(aC< zEB&&I{FiLGn(-#^2>|0*OdFM-*a@t552FBg`|cRJ?YB60))cEcs3=!`ONYM z4`UN0kkFWRe|K+_!XnzxphXkmWYgr!=wDYmXg!GYeqiA>QoRkmrzAu~oZq@R#1p*+ zOoPk^;x}_&h%KOVV3cwI;iK40a-AFkO(iw3FFrUX5?k^HidYg}L6Dpmfb<6eUlInb z={>;N3NN3`j5FomXPRJx&$$MWC`k4MDz4vSFI@mfh4K+XiNyDj)+5YqC@kJ-zmOf{ zwy9^gQ`Y>?P>}3b;#0!%nW4YEw{cg#qqqwD6ly+KFh|22O6` zvO&k8LvyovlZ9Y3w#aa+QNF5n2W$f~YnG6b_yaPFQub$Ji%}ZeFJ9#%Pn{}Hl3mFj zeTNFiSqY>Ljc3z01jk;X$Cv}T_9SH)#8U1@3nW32&2VdZr0@;^zf9#J`Ta|+gPt1&oO@3EJUnE zxsQVVu-AL#1}#6`XubmDng4rZqQDb;d%l)I4VA9bR!3{3f6zmc5Qn91(IFIUxFg;7OgAePkN9s%RAJnhw>Gt$&+r<||Rf$=}|etwntM=I`u`G~~V^608g&L>2nkeMTM--2jWDVeEyJu{j)cg8I-|AkYIi z!1;GsW(h(A^a70&T*Hd8@vxva^$#ln?|_m3-ilwVgfHmuTzp(xr9on))8@bIWpeu1 zMmlC13>{3<6Pz|X*O}G#TfaO6UjA;PSOfAD0!XA{r4i(t~MVU(N5@Q z@DCA0LLs_r)Ba%p6EmO=m|IB zo2`#chsmf8Z+GImZ5g#nzUh;uuQvy7oW0Ay?eTO4L4p&?Nh(WZNG0GgRvq7kS?|t% zr`;7psd0p*Lw^th4F)6Dy9O;$>An83vEs363>~e~7gA<`N|_KvgO}~_;DMsa$L-D7 z{LW_UdD23ASYpXxjGRMXbGUg`?jgpvM>Mta4F=PZo4)56;2~qw#!9FVkCLa>?~PUw z8Zh>=r$(Y0^m`E$IF%+pfEh|AzBgh=EMHr+wM$xJ(};`|UH*}!6H?1{TzcWF)!OB6 zNX=Ip;8NuAD=*zv)pRwd1B!~9>Eq{B3p|IV4Hlez_EdkQT6xC#B&FCT2!>ny5(< zC}2k`!bdmw@u_7}r}mbB|7239#&kc6-}e?&k=muT^ZaGpm)0*MaqJ3b+(*!lYz)32 z%}uu)!SLW4abwo8@D1Y4%*93RCg4>YX#eMPV*OtaRi+bBG|V(0h(Q+vFQn!N5^mZI zdOCCDwOx~Qr4S-tu<_#6VEm98?LfboI-L(o_y$d$&@Cyx+vAJWw_xe}yo^2$DPN&m z?`i2GZMpV=ALik2WVkk2!9nNBevkJG@bdr+Hy9duN~{kY*By)111A4V@cQL*!VA7c z7P2fk?4B3BSjs5vfR8(1vJLRBZnaVerN&Lkx5`t`I5H$9IF?LLg)r;TbyQGW$%9o@ z`+!{QJB(cI5<=DIz#X!3)?EEUmr%Y_A&XBh@0FZ{$Lit~Za!kOr?LnL^ zjo}j#LY)XDjGVL0kGqMTH~p=p6R@UIqv(?Qgq$z7Qy(#5Yrnwm)u=J-Hg&N6sUe;5 zgH5zM73tUx6$C3f01xOZ^K?}iQva|o)*Y4$uJb1QGA!P;eOYfblFg-ly^YSsIOEX> zfOkYJz)brS;wyE7ligZL@(#xmiRiHcQbOH`)b+V&FXFuBzAwi>Ywa_cAL;!;g;F#l z?*uN3AyXu)WM~(t&$-XvXp`hBxNGo)_OB{4Qclx3*N`|TE>gG75165`uQliNT<-%3 zYTlE|o?H%07jYhJnWxSr-X9T7{Z9G9^w?L|dcsD(9x0~PV2hx=PN8Rqp4dv1k@?d@ z#P8WEgl5w2VZ|f@){KaQR&i)zkk$?lmhJmyuS1yhfASrSIJRZJgjyLNQ~{$;6&n%K zT2e7XQAzsC1C4`91w)+CYwfHGHjRU(j0fVPK?iL-5iv|ov2#GoFFd|G1t7@7c;@*{ zn9jo@CoL>WeyuCzga47lT!H~|^A2pQqEv1oE46mlUoA-^6J*hb-TAPKPM<}}u9)?H zJzW!UrDup3j-L#>U7LZ5bn`Q=h2)cvbva|sfd@cwZ#UaQr6c{ z?3Krh^;;yZuS*i=YhL%H<9vb}9B{XkfrOG$A7~7>1+%QS=kTIDahfLBy89qa)u{=o z99ykL`v?Z`Q=334moeF0kl2{&=~KOjS{<*N_r>K$9UVkLsim7fI^AYJhYE^`M9H9wL{I68%&hB27!v9!a@bc)j9qpe@0ZYJop@ zomvm0qk^Xh_0d%j{^9oSBX<_hx$xTGZc9JQm4rR-uU?DEef`7D^JG^Wf5+VNR-6?< zFQ6g*6A1`+#&nVtA+P$U9dr>i#7Q6EeOF2NpxhSZBZ?hwiq1Y;##18r^u#gO#&Td* z^ted0fOugLKi|ELt^twHUq%9#@_bvdKiL931DA8#;{|(OIRg|MXw?v90pJT10(SGk zUs;k@a1nl~Y-aprkm5o56FO7_WAe5v6xHw$*!iDbf3}MU%djKj4#j&YS zyZ79>j~S9*0WAP-zHU?f8+;{hh<~%<7L}U)-nqjAe*#40-S#Ikly4z0X0W_%L!-%| zkXz6IGk!x46uyZ8gJhD)Fd=wx#@U}1^rk}=8c~V7O=RCHHU)>Gy_M{aa~zwGOj#V{ z(q6%$VK4D=isf0PY@4<36aF|uG=n;bDiDH-+qk_Ca1Cvp`aws=%9U);#Ckf)@PDs{ zWmXDSbe5@DNLL=)SJ6(s3w$~k9zwfyt+y>IVrfJ3ksv>ZT)Ai;bv-(+Zg1-nuyuMf zueO*eP!pRUO(>7?S#oJ$+0)m3^>JLGlrhQj ztpov)uw!0f0OU`ygeIwXxE=!5{?WGzh}5Pju_^!iP$)iupuumFFtDgfdbak)OHO*H zZmgX)ElNF(bc!Dv0X0`SYFpHN_;P?u8Cut0v%4#eRSd;DmS?yVPfr+aB4<_e!YDac zu+`{oJ;^D0yaDV9?wE^vlfQ@eCTD4;+mzYL!TA@2z{PvPs9E~iMZf*w9}yHf?HONe zDh9K}MY47W*tJ}~1t%8SUn&L<0$YZZPw#{&8Q$cPLiA6%@ey4R2iOTM6UtbJ6gk~8 z4>%}AfsH479gog_PvwN#j1jIuU1=G(CJH$JQ;37SA=<=YFHicJI%c_ zYqL3aFgZN+#Gz%eha>^Qxi>E=iPIKC)anE__gC)Fpjk6m3*%yZi*IL=U@4^l9r_F zmb(^$g0;J%dQ#|89;Ec`R6(*qm*3araS7?vnNb!#+mo>kT|5QWDWcYh@ygDsF4x=B z!cdRR2Vn!=m$Y)oW5dMkr=UA=1xotVl8?+va3dvJaujzS*Ap#g7l9<8*A*<$D{6TXS>W_l_T4caamh_9lBN*vdan zzZY-D5wjL5@6qh=DWSk*qx~ND80Xt>1Yp|$3bYW|;4se}+VNYVjDxBnH_6$*H_oR* zdAQ7~$ULZ^q~`=1@d`uKRL<-Ijx+B@ z^1)=U(odq|88K^ALsHeqCQW70ccPzqCJIta<^ROlPOn6BJ|)>1{R#K(o8aP_klb5V zq)dq=meAqmn4{4dANzWDwNi9Cr@+MV-QdAj(vafMb&6#MN%igZMHT#=+WIa;Y|D{X zWlH9K8S`yX5HlCna?X;^--nl#Hxe$Vgvx+lbi(Ypr`uu9;KADl`PY9*a(%`=!gXk2 z5Hg0yc+C2i(qI4IrU^W!T9?~7Ek8ELZb>NmMMzti4t$3mlMDfOyMeky2yb$BiRkuj zY{t@w-iLnG%~5XXfubdA+Ks<)x?@Q$OnW3v6;s68uTJe@zEGR@qZ|{t{Gvb=wNK@p zR)-PyP}ZDO3aTeu_yQTEF+_pYZ~_BNoU1NxgP!MZtc=x9@?ql(BxBU}#(U3Z|HIs# zrsEBOZi4`0F7qqDMDdI+t+!kg#MGOfh@s=&hokkS7t){efLl}&LMzL8VKg^X_H>On z*!PkDpR!(WbjD;%E3u10%`(98DjFvS1vH0xuGt#2d@c(Z&W;sfiG$v#v;D>y8kVtJ zop>!y`bk41N3j0Jb9FE$iV!;f)ZcvZ%M$gc1g~goFC8$^lQ-G zxlI4>^Mnry>RrQQ(iRnscylH650@}1Vspiqq6SlX%7j|sNm8W&6l2aZIm1E12wN4$6ZU}aHDZ5CiI|t)uI!? z*e`Yl{PWZ3$pkVWH zGCZT!Ri`=c-B^j+5UOAKHDXdmW|~H3{*w%!8)0;|`*w4#d+ErS^deb~C)p|bE1{cD z?JF^^uFT7o72N?__8Ga~x(Q}8#3u7{42c}OA8OHCAvi#g>z&$#?Biz!ccJJvFz zPSRG>kAzvLkX0P7&Fk@dets{Sp;gE3D|J3jb-vF&0;Ta-HBuDzTSBQuJ>Tdjf(48D zvsS4tM+Ac(LaLLGah8SYc9*7xK917O`p9gHn3kAv8yVR*iEt4rwF-_sy<6bQGbN6jidl7xc*oNRYAgm#m%`zrw%+I$a0Le+)ox&IH@@Q z|Jy(zfpogsRD((mL5|$+KRo`W%`hi`8ZM8_7i@Br`2x7xsX}q1%LGqLHA_N4^T)#e zSdPs{H$)=mR^tnG6Hc#6;-pG)O;b)sVrw~|g~Jinw*hh@(Fu_xCRtdCO5EY$L`KSsKS2y$zp_{CxNO+HmqnBB zVaYeYBXBa!PgitFP5rPVF@Y z;=*3v^XhIrVtaK&^?{DfD**Yp(YrIRH9AtY!B2^RsUWhWyf5fv?OtV~zE~1wx6M2z z&WN?kHskwLd~>ZbJpWDSUohnriF=OJdL9cP9@iX$eo0u4-#{|K^-&5Yl~q5DKb;fX zNL&49Tg6_k-13X*BVQ`llnAADD87v70s|N2(>>s+m5n5Q$M>%SKQ6Ta>ijCuoIE+34z$LH7yW{Ut(v3-J{q~) z!Ko0%vE>4Yk2}CVV`HerML9I$Nbtp_Su(u{L*O0HOa9lAU3cN$Rc5$-NgVUVi!T1zjI0SG@Ob01H zUsi25?nF~%l3?ZtiRDuI|MAo{BCt-wBrkp52p=qdY7Y=3*8&yut(ySW4wHdBxC`c9 z_^{uo6b25fRXhF7Z_oQa$YTg&zy5dg6-O7m=(NSRG1Ju?BeGhs=0)+aQO z2%0VQg7_^<_LkkW-DB~{R=D;E$+X&xBtNy?Ov|VEQ}F_g4&|;DEob~tBI6=&-piNg z0H@9!koT3;Zq|VYU?_v^8T~7V=3T*b?<-l|NK#w4FM&DVCw+JNqiOgkRZ&_POiv58 zbQ{w~K&N#J2nWy(eJUalnA&%@nV%rYArIbL%@9VfT>iMB_D25(^5kv+62RUm>5I$( z{}qh3`(^;V>h7lTA>0b4xWI-E?V}L zobBL#{{HzCHy<~r;(``lNQlbYp~W!y5$T$!%w7Mk)Kd4QV(7fPu{@U)Ot%*XKR8S{A!Gr8nXoyV47O(Yb6jd!`3kvHYC_i`>CJ+>3L?M450#rvg4@ z<9{KdAz6ds{kh}u)?}Wd$aoBSKPs_m1XJPthzA-Z;r<8@9vACqN==~>KdC@Q3k*49 zd(m6lpwOhZ2U!yMiwW`lM0nU1%tj`-AwU7NP?@t?k++nq5NO&PxhXHiQ9^~{5FZ3! z=#-VQ?lkhL2`UT_m5YCo+^(wja?sJ|K zs~4*airT)b+PMu|>d5uN@xq(XOZarDI@0TP)rCKB+Hwi^8Alxdapn9*&&{lko2YX@hu<`}t%&R4D?Z;^@OyN6

2dAm#Q5xDsV*ag^~|0q4s%Ix$|tsoQb~fXys9WyhIl zcB1{$n$YfDVqXI;3e$K#-di%e>ec1oOr@+Zxr1y9n369Lv_nKlH!vztyfMcR29UGe z;oAt!UA1_ri+s-i8e2eU#FW_|A?*Vv3;Kh%!~rD?6>Bw7kEpPMKqLe@*w zGXH50aLv0=YQCMvk+3$qgxt=KVE$k%u%tRMZtUqiEijp6lMJEfSHX?1a0MP{i8*CF zLjUVCK;oXetfsKh-)#Q-2iY>V5nAgH{A^2L2+-O>Q1qYGU<&aay;LedjIQ0&)EWaW zUNDm>`4geSd&Ou9rG>{4Og0y*1u{rGk#$15`s2_-t2s?-{DVR>3!9&OGTqhnb#3Wj zo1xAWzxFmk`$YCPT;eF#X<51{&d%0?H&mF~?E`WJ*p>GKdlUrRWdf@lmFH?~j`Z}n z@9s0`wo4t2mllX4WIq8HNmK!USW(0&;mMqI@++Bz=u+fW(qAG+pOKNwJ`tYdxxyBP z2s*T#2=yp2D?;B!S8)@#4G=P!#Dam;_lFJQp~9nhrGZaI=b7K-ejel$R?v71m=3&0 zMIn7b(}2OaxHJN86a7&%Q#Uvx_jSPaiv{^Thy0CYInazduW@EST!r(GfKei@gz~$I z%)GIVZr$P;C?BJo5H7(4^;tMRqdZX$9&WzN0+yME3Nci6LRv> z&L);m%t$n)QakaYzyCQbQ#;=L%%t&SHMElc8H~8I26&Br3)7?Iy%==(3M~=&zX+ld z4cM4%pQlkbQ4gi3(7uQpBIoii68y^U+ZTI_|oh2nQr0xYtqO}BN0Cf(VfW}eSYV$ zsCUAs=o8b-U`bwKBZnCO(#HjDN7Ooexzvm1Kk4#GdTo8S(HYj_ecCI$j$t1jeyC0m zf#AQuo?uK%1~*mXUr-SuJ3)8^NcCLif#?emX!#Js>OBEjzsME?6(${G+d)z-20NCs z-?@Gm@@-Yx7dxE>*N*J$GdqOkB-I6KD8~08ZPgYUAa|#((vmdn`*aN&%G!OBUw18? zfphqmpr%ZsF7%)S@gvNT8X_6Q80txRAH?K3O*2k3~6vA%O z(L@L{Kdi|j9v*gH%X6xCW-{usZ;~J-dGt+rOAgwXXp6HA9gjX(*0r1{#rFE`;wM#a z!T+kWU(3$&w7_UkZxnYjvR9t2IDP+___UaUAxgitot~eWQ zrq~6PPP~Qw_kyEb2Bd1gEqS_pir5|&8gEz<9^Y24#nPTt;A%kO3ef?K{ig6{NUPHB z$Plrwud7W#GVwIgWrHBbt#)r!K@qo;;qcah^r~F1R!jLjc1Rir6v%eqn}rR5YqtTI z+J6P;0C1_CCjccfgZuAY#1#NCdQc=71n`@3h8d9ojJ%+*&(*v9Fv#c7lkc{ZbM{+I z6$OAH6QivM>jZl}Jp#D~naos{w#g+IGw@l@R>S$a*0uzvCY zT#6EC?Pi`_j}*!zyJNJw=+sGxduv#VdC~X1>lz=ziW_{ADk(^wa(>9jPcIzQI7zac zmjW;QdH(R*s`Er1y^i;Zn2&|<#T0DjBA~6Iu~TvV=cXpP4&g@&fx^2aWj`4c;8&!5 zwN$f(UZ8#Re3@CvajiV{K^cUDb8)! zJgDY7ze7Y0jVn2@NgCwnohbZxkh2CD!kgc1TTj5}MJ*TtLcEkO0ekKm5aXv3kp0zL z+)lF#nNKExSolN}P_pT`KlXQ@)cd0A41iVii8n*|cr+U;o1@bIAF9qWEUT#9)-T=N zjf8ZEfOL0vcZYOHqkwdWbcyr}(%s$CE#2LD7JHv_zP*3=&r7^mYp(gk828}O!xDgl ziV0?H$xr=)IxZDp^af^G8IM5~=Bb48BK_>BKWN7`^CRDys3dxrFl%UX?#m-N-tlM} zT%G|M?>72+x&&vCVr&G7+enKcWB*M={EGf7*y4)l_BO6BE;(c}{baZbb~iu*)CHI{pa}*O5ia4VInBa`pnEjlfue<3R$%*3rfKrA!^RXsx7yV9`5_rV3S`5nIE`TG2D z3W$EKvp%jw-Uj~-^ueWSMRpna}=ArJSpk7jEiPtNPi&rGJhNzlk z$br(a@|D{_O`0g7Ji*Pip<|@#fFNV4(PTZqDZKE3l9}QSAk?MbMwMWb0gOm}CONN_Q}SCmSnuAyIgElQ5pgyvdMLyahO} z@kl*E)c!VI0(cD(nL0n#{b9C zIEIRWmQ)bp>fvv?N4fXSj2flUxqL-xAc&Hf2U3Mb&I3KXSTru_GJ>KbKgo07{K=QL z)#gYUJw`U@Zz1Fm0tgF4F$f>e1j@qy4WS))gx}~285ej2$~%wbKPS$X5}iROhV%9@ zRzkP={xrqB6+%J*BP6og)b1NaN1^Te45(nfbS&SjoL%%CM30<93z60z#JFv(A(=Zu#+zP}zgCc_#De@Wn=0$WrZ{8ndvVouP zhyEkAA+g=-!W80L4XVh;{0WIi4Uy{;({iV>4t=FnXEu*f7mP~;b6Ap4hs~_vjdRg@ z9<*$oy(uB>wSAU-G9dMpYVn^xC_)|g?uV!~+CohDt@Pp6PtKB_8?byEcx=L1Y{d?r@az7Ek~V!+^8BcL^YjU${z(*;zv1x*?C~TU7s;4*|Z~ z6h@tzUeFx1HqWKUR%WS5H|`BjJ0J(hfCTe>E?6C&d7f4?g1VcL@go?38Y*7eZ}Wqq z|C*zIX^ZJ)YR5>s04nVi%Y+N>{IQDWoCRf@@dL*bc3?P<0tN@i=i5PKt!&$Ttda&huEoknCq&xk*E9YjubZg#yc?-20*4{EVckDN-yJZYtQnuKG<7A> zsRaWaQmi@B3B)AGd*m*%`B(CHMqkk3H=C!ZBu^9sW&`BLp+}qN6OQp4n=Pp|o_Q@N zU(PeMsvx*~FNh6j2P0bS0xxe<`|h;Abe&hgXId7WqW=tOqx|2|N_2R8+5 z!(Strz|YFwVJ>|?43dAsf%y*Pc$hYLh4^ByRa7^6z1xd_wWcQdp707*5)NSOC;1!D zqWiXe949!7NYZLsy99vnN!7UXVig(ul^V&9vk4D$cQza1gACeHfnmzV_~76j3~Xsd zgH`bOVct=UvIp=&#CnbIn3TcSMRC<3&o=sxqS2};i^c#o!AhFdNg)tDCWlu)Nbev` z3nXei%--&Dmm`41hLgP%os8;G%m{)oqt=0Lv>v+CRJYxiw+?OqS1z?dSJm+wXY99s3}rL*;65JmpQ(n8FCqJ+0u{8eqQkZGhR@#(^ZjKn^vIF3{Ou?-o>6Rd>M%}vXcTnZnrGpjgVmL=eXw6|W zgKdfqsrq7;E$GJ=#M9R8lQIz4>sxRsaiDeKi!+hB`>b2`Ys`kLBA_g=Z0M==?n|NQT7!28D_9|-q(rO`8v|B4c%J(W*5jEnml z3Ie>KLG$^cFzu?^o9TuC##rr2>1%4vh2eVa)qKtfZ6x9$M!=@k*4|A`uWz}mGV24g z(TbkNJvp|3=Ev#47+VE3;>(AOu>h=caUGstxQtr$xJ(AkNsNJBVjz(BtSyrUuJ7WU zzhui^ZAlKaGJaxe+xgz{zED28HCE?wto7VDJvaFCAB>!A{P>(Kxp|c<` zv2Q?roj~MW5;;?&bIG3FOLC_AfI32+xdd!z%Wb&YA59b@0g(HMfl`)A zh`3WP#;G)__*&_Y(2Q4kg_hCOr!T$>CSfYP_0U0DEhqIvIx=j=P~@y^M(Qu>-rYPQ zugHP`2kA#S48~XHu`$$^xqLs#7-E?T<0!@5^tCG0r{7hB8EIeLbpAf&X41GU_26B_ zBO2#*1PZ;kB4Qw1Jy5CdQ)IYI=9-fD73K-I)qiU;$V+lyuS+m2>Df%cShW)`DoW>>IQj^8c8iQ$iCIZs+h zR2ZkUoc7{tETduFW_h2X*R!vZWSWl^(qO9X&Lc4FXJB<141m`Jwd-s(M49zqHoL~+ zj*PG>l@GnTI!^(D8|;{q{UC^UAJ4di9?DQuaQ0#dA1m4}n^-RCLUfSR6aj)NunygW z9>)3(V0@v2mt-FS0r=qe*e-$zd#j=e*@uf{XFY;HMdfri)4S2QHa~V79aIEi1g7k`KR{CK|qomm-y6bJUdq=L@fVJa$WC2$S8sw@`-8@jV=> zzaZ7*0XJ@@neBIgToY$D_}l%zpa^*Eh49!uXMa}nfg8R{`Qp#!z2 zZA&&~$YC$xk#d5XlU>r+ftB>22ABG%2ADFdA=!<$VmsF18M!6PRKEWWgA$d>F3qXq zrT)Z^pe5DuZyicdm3K|$jhxYp!kSUZ=j;!t*xOWg#VsC_0G0qjyxVvF10#-!jd`%3 z<+5>1pTL*doKH@M=Kyhn`{fh}ZQ})u??@C{niy1BJ2N9ciJmjP!PA5K3ysJrJ!~Nm zjDx)Wad<|))oe)RB%vd0VORm`2s^hv|Fg&vuLYR57s$rd6h9;wPBvjYzkm6_2QaPl zVIYjI^32OXxMv9iNXO!r*Kg&Mg=>00aMpWWbsmaWmhCTRd|O+OoEQI&q-kZFKkT4} zWc#d^=_#_DWHr%gR?6mKdH^g`r{o1KMlC>qMnV^6zX&4lsiMS7_r0FB_z=sY+?-3? z-3^J0R^joPLaWl@DP&F7NvsA>N7t*%ESAZU=*-Cf!J;X|C=7lDwY~U8isetNchj9X zqLYun&|wK--yOu+zn?IX42=g4HV;tfN}oFr@K_2Ro(i(=V2?DBG>p7uT4;3 zggMw?Er;t;hn=!gaUiFkU|6PNKPsl-^8=F>gwDb&W3+zsUNuS(Ytlcj0rjf9*Nl0o zuYD|m66cVeUo`y=o8q|TfY=D~&ZyOl_|C0skI1Dz4J&ddLAU*Xz_&!wMCWZ^=!n@& zzlHPe>`VlNq+TCZQor75doWIhf1p`E7}+*{$;VXk9)IQ7{X$NfYFq<@qSJwo?VNe^N+=q zKJ~7QufdSs%%7FA`38MQ1HaMMI?lj5bsq*@-lty8Q~2@LB2Ut7&kvOtP8p~VCIqH^ z`YZwdZ6od$l;6X43~R#anjBeew|}STmAnq^**u1 zn!z~LqQzp!2K$&9+jP>EInYaf{?5OVCJ2IL9VhSf`Mh1W=KU)4_4bA;gxI>pE`!-6 zGURE~cxeNmP|cXOxFZvYZWRq3)%5qsmOg8$dKut$ov2vW{`-c2)$Pgba9{~NHVH^6 z6i##EHKKDUuy*>N{y3vJK#`7jxcXZX^^g!$5B!WiNQN6WaG%w1+0*8o_`$6}6f*e{ zRCNycIfyAm7K4T{gc_`;C~6+Nz0&z6Gzp3CM!oac!+9kdkuQ0=ohQNEgGWvvtqaCE zGB3k1tV?7=7M+@H`Tunsu$eDCw-$)CkyF338{wv4EZiI=sVNSv?$`liuP(rJtAM0w zxX^-pyrETCqr7w=u=!IyLUp9WB5$MYjy;$1jlo-5I5+cBUPobP^x53|>xioJ^himn zh5%V=(fl9-Wb?3TkYkEoS%8y1bF`Ie{NIOpJJQtT$Lo~8>(o^eXW}gogVt^|4Ibzo ztHGa9^)B4q-GN4V&awZ^P{Yy(g_psOy{Il@>J)q%jCs@05e*M$@G?^LisaMTzXuZ5 zT)T|?xD+37{M{%KxCvnH7B#cqBQv1qz(*VC5T)i9+$nWy-+T{mSf31ZV(6}qQi@jo zWl0e`L!Wrf(}O=Kn1tFxsRuk5H6%dTG_E6i80FhU5W<@w>+@w9_oUS+o2t zp`Sq6L<;p_pqc6;LmS2d?l7)!L0=@ETKO-{Nr3hd@^sdb5g%;H zIBBofxV;JNWx5W!lxjeeM}2~nj*6zcg1_sarQVZr5Y*T;Ty(-oPkc-Ej#)-~HUUL$ zxmiGH%L8mqybEk$?oE0pt=3rUpe#t&l@8~gh3lr&Mx^j4e788`$0ITwQ+g*%9f}Cd;GKePk?h zBigBEs)5I1ZWnj5=R!KBLvJpMX#$rRP^_ikdbH8yW@fP87Dr!!q&FOYWSz!Mq+4YZ zJl`%mr9T^jjYYm3RW#IdetCM=nffY2HQ4R^dZ0?UlkP9@2wJ8MUT&C-PcebC^3Y zCs_aAP360i!gW6{EHuJhX&V-uxxM5n-3+KKtW_ptb(QXkcM7%# z-kjw-8vEbqnjrO6RELVLGW8p%M$#H%C)Cy|U2?o)Cr8~SyZQI-D9jb9%U)9L2B?2@G@3yJSuij`|11K!t)xH_ z%_NgLlS@`xjubMjtqMA=%Kq4U(uyp(Qhte!a&Zxz?;0j$A_ah`i`8Z7sG|3Tc{sHB zF49Y6Ln=FY(iovDSBd8zv=3g3Sl*d!jn!>gf?ZNmQd|#J?3RC+RYc$K80A)}7~*t- zjV2sTt#=Fa7hu^ZlggI_;SK`|s`;QS6q5AI1!aVU9|(yCpf209RY#MGR%dkQSH1c#6?(kR^f+ zKl}4trt2ZBos-9amLsKod2kc1Lx`vCR(qF}+W2;6(XEZUq1qm0HrA}!MR{gRpDQhz z7l-`3z=lOBf<*r}u|?O& zFcLPLW?W5sq2PHZRBhn*8c}j_twPR}%mi3w@`(i4kJO2js&nonQNu$MQxIk4R@x97 z!fV@)7};kB2Qk6SYMH+YsXZ1ak5oGp>8(?BLwzPb>F&)u5V za-AdMQo=iqc3Opf8KdLsi}lrEKRV+%xO3TXxeoJa`~$>6x)yFEGm57hEhposM`BBdVEM*6NOpspTRRvA4?XlF`g@XXN<-1m+#J~ zjN{tpO$Gh)$k)(VUGG}wt$yRGw%de2Bbfs{`wH*42P$}fFz4+1UwC^Q$ zZzScb(_$Ym2qnQm>y07xM;Od`!R75U9D`|DhO4NqlJ+uHhpY2vcn;eX0iw+>_36qZ zI!*uWE4q1^1NfuC9m@t8ZK^K%livj~`S<(_$&SGPxHz{-J6El>P}w<-JVgaf;~`-_ z9ZoV`wg}rlsj=NzKu+@2P2Xkn%~nv9*RA&XW*e~+)>(4WL+_`$1TrF7%Pum}TuGht zkjf#2x7zfZodW=DLcikZf|BUpC;7)R(pk=62@~+Tp2JkdfNg)x5eTVIvvz?dONQ-M z|IJQ|+z-0GpV`v3^{2Rz#UkgU9`X`j#4+8o)^^-7?9FEG7QP&S6_vs@K8Y&=_)1~r zzs~)y&1NDYno&)7LtF(`1!l&Tv#Y&@alt>W1g7C3w1F{E*6*?O`8ZczSc@2={xs@- zs1zKkKzb0x5lop=sPPS4Bwa0KKzQc`kR-}$jHD14Nb8vn!RUbOn&79A9*~(&8N>%l z_yWguG(51diY9yytW$IkOFy|EsvlM{%2`Xz6mR33RmSnIv;f;WxxhI_B-4KaulHNf zOU8~i+=GI9Vg9{rje|Ii*mrUc`bk^*{l4k!9oAQc#7Lvw4TY<*x7%#QZ(S_DGsB4r z+I*)qBg~vtrWJX+TDX;I0N|^yBFiKBKP{om^z*~jTZYG4>72PfapRM*3PLU{B7Ql} zC442Oa#VHaz$RCX8||<*ds4Alp>O1{Hxn=Kz-C9{V{77LX5tNLyt0tTvS>PnJ>G>a zdZ(WINgxr1N4y@e&*}S4Amra#yFh_HIymnffteXTye&>w#jkj&pZ^>j7ULDB^LC$^ zn4PMMs@2Pw&g*Jj z*Y3rnXOypVwkpbBqJ;1xbq!8|O{GD9&=GoNj5OdPA^VcOztwWuTvp4PHMo}00G}0^C`j%334E<=woDu%nnu^d@ zVP9EyiLaZ2`bot7L5JHsn7c(WUlmDo*axI(EY{pHT_Xf{`6OTBL)}pm`60DmfJNW- z9F!}aPLAe_I4^JLe67u2?nRvcF|&(KVK(qVUZLP$|BIK&3~$Cz+_!xLNCxQXae zWxS(pa4g4K7FPpSq@BFMITZcy)Gqyj#d^Ldf%Nqj+sE2j|AsP>13KY*k#NC<bA@wk)kGU{A3w z>r*0qcd##whh$}m-EfDLX;q`A5)GKn^OLly9_kn`qWfaBGUFoowBTQAqvyhUZ0R?- zSm=2LFNlUCVI{W-JATSm_Ib#%fj&wFB;M|mXK>n3)I>6{j%ZJI$ij4&*0)SMzInOl z>>tcv=sdR*2-@ucR=7jmCDH)P0sA0Z8;=RU>^0PfHxMe|16Y`@-}MD&d;sC#$oD*% zzp-E!`tC)C>3f`Gq(p$>O&k7bCZ%5i4rPQoycRwR@9rWW)=o-6*v@<$T&W9R!*&X6=6IL12#T1NG zyV0+x`ekC{o>E5^9b1Oh9$6ZExp1gnekH`>e&_bL{_6K0>DObCIqrM*HTyX07@J;A zZB=Ky;gH)kQ^Rzh>*m4lzM3SFGXAukK|V@TjSW)In`M0UMN;4`4_Ks$0_EL8>fmN1 zL$)G+^;_i!>PeO3W)10oD(pC}=D*T5J}kLh|5jo0bXE(Rz*6E@0cFPrqsDEmdajSD zo12V5fbw*-3Ti!jEEF{chbW}ug;4ZWgy(8swd9kJD2DI{Y=Y(f0!fP5Eg=WSL8-tp zF`kIgT-2RFe3e|u#t+Smu#-^VsjiMB)$aCqaXR?NNXXr30}ZIPoQ?4G*d%L8G8`+pZws*0B?XUc88)uhn{L63 z6X6aJ-)CAN?#ME*4FB-i8Nw`&j!Pv(LcWsXa2UGlN=|^<@(f2&n`vL7e5^&}#39^h z;e(?d-;1v6`wbYrpCOKZWjGF3Ab|wzQ>GYzNa?F@NFT<-`1$%OC;!v+*kD)Ev=o_= zFcVgT`YEsxkkmjZUl2An8g*(-dP0q7eydDVBS*0BsiAk+t~Hg6`@x(9=JwGl(xIm>?U=RU}@1ZgXP94>1^^Rs-mfA3v~Y5=>!% znu-(%=#u>9j!@aP;{+C2jVCjW&KrBnY2%1aw%2{~swh>{7~rktmS}CN3{Pvac8E#G z7u;`dTG0Q(~~=udP($I$okyXLnMUU3csA`t^Q``rVUe(TW8`+zabo8o}_m%lk5w*#i}SdmP)5IePNcN^$F)_8QT6g(PD z2W+>#_i$DKuC_UVrwZI#iZ6wjvLDfk>(%EeZWmoX;*jW%nh_obQF%tglqK8F&XPts z+6G(@exfIzZx_tQR+8N|{7(_u+J(H8{xI0r{NkWl2SA4U9fAKsE4R0m-zEgfaN?)7 z%u#{Ghnrl77~pLNqOaBfMN~@7DeNoBu zTR6}dbKF>}uS~S-KAS`3@`jJ_$Yhs@#G_o0=R?g6Jh-awNj>%_QFv70B!xUwp*|b? zv`)Nm({o+WOwz@+zK6-1vOX0HfeZu)r9PcHO^ZJWW}HZtd`}4WaB$sv1S@oo z84f;eRpDw|+*OoQgJQs`-|{|%H+fx+eIl}qN13zEyL4Pvovk8+B>@S|vy667d=WsW zHd4@nRAB#5GzMRI1{6qyt@$|sE17dlx!e>P;`)6jDg1W`)3IS*nSioC4C;0s_-Whx z>j1u{Gzh=NI`|p}xv&OV`>TO*R!y0%?h9kxSbDx;2RZJsS3Q#rx{Y070&K*z7kJHgnrhNX8vf-~E#u;!o;i>-92i+{_fs%iJ< zXhe3QoAWBr@q1IR18QIS#Gg3OZS>8rN$WSeWurzAcKBGJSO8~ZlPd^~l~il_EJdI5 zgRD-rHobmRZ`q;2VDD<;O~+hWIpJ(`#yY|?+1R8b)2YhMr)nSslYx{e%6j|fsEOnpm+vEto1suqj@x(aFDD} zCoI?DvmJ5|Is1j8{+l{^>gFZT77$9YANbbiK(_y*0iZCn_ASq`cI$u$*UVVEfNk^i z@6^3OYx!H>T%cx`p9kk*qV8b(=3T41T;&en4dA3pzSZsZ36A5Ziv{?V8}uC<|2e0- zwV&2~UFnHzxp(as7S)G8jFJo+;K3mAd+!X5Mzr}F^ZNz5 zsgUkm9B*=W9qM-@92oNcLy+u&N4-H+LsnJ8GS)HDkt=jx;#^uG)E2O!JoSVC122e` zYEofMDk^$Ub;6k&Kfmh1RU)QwmzNoP z11A}YGDvVG^6S81U#+M7Bz&{{&0bQ1QFP3@`NGeB>{_~@%4fHx+(rUBy;s@) z{j^ToU7~EQ?L`;GYuGHYi0$JL%7bS(w3gJSrLPh=({%5lTV|uU`yN*=Ngk~@7QS3% zTVe~>b-%$7U7puza^RXTzD34Zn=x7Z^QPyAKbN%!gd(Af7*L>$sk#M`a=+DSph%C$#vN57lpGW zF8pWcV}=_5$V!9szULRt3xCBlccFrqw;XBqIPy$g&)e%xQ%Gm;0}H}hK<@&xK^^*4 z>HeS@=~@@-)cvs=4oOxk_(6y&`G7D8@4r9woZliQ%CuSSAF@ujSZAZI)NoFKbv95Y zKNiH$u-TfIXF z`QDCgY_9D0W02c*ypr$to~nJTiG;$me4XKv8B*SINs@0)b|gZsIAB*@^V(8JtjdgR zLg0`i#XKkQ{R+qclW&HA9!2$-p`ZGG+!<=WT5uJ(3U&W7Gr*)ZjovKSwDc?KO)d4f zgG^OK;%|bu&_Y>{nMh$F^dY95nPvWP`mg_{*>!$iBYbgyQoVSRn8`jbb`OX?6}Y$? zm|Hxx%fIM9E0ngv$pqhweDh0o3{Zpm_S~@?ruH&E`hPFg-7&piBpwSUZ%MK$9$<+L zHpQL7(_fbWl81IBw>tqy8?~|T>@F>aY~=C6@>4o~qg1%tE1$3=$=~YsRLT)t-7i6o zAPC0i407+x#UFCeQ<56-tDrP_tCh`$L*Yo1Byy;YwfCH8jA}(Vx08hnuSGM` zvAfjOed@O~mL}4w(49?ZGYc&drUi-oa!Ni(oRqRoWaWjm2?WXXhw77!QU|N=lDU?X zUAT@9?HxEl+!uD3x|VDB3rY-xAAIf)BF8p1IhZbjx*<_botnpr`wLrj%tgf|g(*du z3C9{DJzH)|8b|oG69TRm9}sbum{FaC6^_;T=PJ1){E@!_S#jyr|qQr}G8t2$k=K&9N1N zS`0TxVfcHkT|(Hez00hE+?69GSokPJjot9v?#Xg`NWa$L)g&SRoe?=Ue&}ybnCNc( z5QyuLYmPoJCH*iZofi-u%jyVu_(B>=-ULE?egEARb@Kn!6~v04UW82i7c+7q^T`U!vi`qh=BEz3{^acrthlwizc?KZ7{E$Hoo6 zo^YMlAx1X6-GS7AOe;!ecr+J)bk;2Js_zx+W%z=T9u!<3=# zD(FY`sW*B-lKVcr+I#m6#oNeh(5kuP%D_5#a;?1hQG>{$s|5d}F7q#gm_XLXO3CBq z9bJWvPu)GIXjg0dXOd>Uh0{ZOgg+*x$@ViK1TB+4k#SzZzu@|cT&q%9r>G|Y1hvlt z;p8~1>Va^dh;&?BBh+Z&hv=DYp62u#m&7lkQwmFQJEHgN2@9`*1E z+2TZZW9}n%v~)xqdLRScX8$-UxAhlty$8lB?E#fL$-;AjE9ChP!mxDvhEu(|?SN$qTAELTNNLu4K%Wf*WOwt4M=v~vgn+&*&84X}Ma&2$~-C2XyRsG;&Ioi9A?dK~?VaXl_e zAh4esa=i!Bn};%*N|9A$2xRN-WgpqM-OTA?zjie56^oLW%Li^GzqQoV=0FNZ;`b%7|z9Y!GNZ{_o-2(e(@{0g?!B1G+PVKbXPk zJ>EEy_edJicUuqyp-`;YI~MhNuzMjvP8gf54Dv?2I+3Yr`l^2FdvbYv1XMt3bc|k%g57ieF9or0HkLA@D%#-&%@vb#`x}utzg|dcw;lczh)-W22O}Ad;z2Ywqmb9?D{>Ul6tXh|2s<;%XmwCM!jaNQQJ$4>g700vI$t<)p$1pYrl2u%VU1eWdKW@~60CXsY56w74Fe z%;nz?hTvC?Ab_TB}U_KCWDcovD=mT9mAP19D;Tf6igaf8hKALN#{< zF4(HSew?!UEk_)D=R;5O3-i!^(6E3~jDSxXszACFX$ds5%@tDHmEqVt#4Gt6jVsT@ zI6(^2OW%Q(=;!b_NSNsOg_e#H_(tBIC8z?E3MJcm*jG`#a~*u*m&YSOn^P>(WL|2} z<~Y_Ozm610x*8VkMHZpV5g2Lk!70uUE;kT0F0h;1Yv(p7^odWbf5K~U?Za4Esk1K- z8!Io-O4Vq2XJ;Uch!hS=f@&2Fu$lB5YI?^}s1rKcmLFAeHDN$0i(|?9YSEt*i5%(@ z`~Kr!?V#pD3q-`#n)c#x0T`f16Ol0#XK=y=9mXxMG>d@Vv#M_7TVCc*Pxc1F=;yCl z1#i)0T=ry6+JY7nt3Jnu_#IZbzFL%L?SH>i66hCz%fCycQZod`|B4>RYp1YReDYNV z{R51R+Pj{n@4fqfA0Jr)n{Utvsyhm^TY*7y-GoL4c9_Wsjb59jIw~hq%6GI)-X2hx zQ+J_48eZoOnBY>k!vyab=*oCjFB~6w~j3*mfyCH_`H5B5$db0>PMnFs(YdQ zyOHfQgYy#+T98`h>zHMkyPMItZp$YS1M`%>qIU>Sgbk-aGjdak*|oSqwJlm@B?GG( z0B4=fDTI(Bul#${IJxFgl7Rg0Y1!|bzOaKtA;)_u$}|Cav3-1UYV>Ecez=!3|3&9Q z=RA~eXg}2z@HPq?@8GbL)#lJmJBJl-=MgSU+*17m%c7vQk?1zdlE#3RQ>3AXV}7MI&+lh){>4XshMm_j$c$GSRz38_bQvzY0Pn_M@P5L2 z&w4kZ0U&_H@Td7teNmAVFX;uWcKrU<$eP3Dikv4#vk;8|qA`g~t-R~|-u>d`b6iPb%sCwBcNZw?!qWl#F z1dD@Ym@u!4_B4npKI%CWat1Sr6$%}dV?sEJI2|D=CbIoA`EkGwV7Qsd3d#8A3#cJ2 z;tdV!-H2}_?U#$iF+nc%8+^^aePgFl7#$4zF>V*NWr2eM)IpipJ8KUY~S3@u<(#oRygK`Rs-h_c@160r~!z;BZie zY?Oz4hsYh=z)F*A^nMMBFi0A-kxQ^u1Eh{lBNz^GH9d)2Tt~aqi1K{0IE5L-Pl@d$ z#n{#8M6|U7e#_NulKR`fI9mEDzsuBAv?c&<}~bb-ef|=lnD4e@z_!>qH1F0zQa- zbk#aF&tnC~E|?(v;7-8p$FOo-pdljkh_Y!>3TVr2*5u5ctVI0Va1|6s)^wr`rD{{)SS0PT!S}rO4Bzz9)C%B&kQ$ zb{~wyy#IW({ze$F4q>nACZ^g=1Nr!Co^cinpK^Am(I^}>M*z1FBXLQF2!bf_p#L87 zJ3{GUY^z&?VIXsA-oe04OmwUQyu2!mU%8L&+je$=OaC;AaB1)p))wSe)NrDOYKG^c z1zsZRscjXj%DUH9bj363|L8m?xIIr-Ixx@&gcE4;mLG?ZK9JZuk8{Bkc5hlKVdeUy`mRWOi*5F zoV&*r)2HGkxGQwl^t%O{5H5zDIQpbA&5Ba-;&Wi3I6G@<PSPM%}K3Gf~8-oK|fDAk6`VSt_a2 zMaXstzDYFDtXu`ev7WcE{Es(!8Tcr?!Ubo~GC?>NZ3ZEVe3Z{%H^xv}1dyNG!kv2B zNFL_}mr!;Z^vR0upXL8D{-@O_OO%=3m{9T24hajF3@Npb1e&XWZHzN$nCTb?D75U_mY z!S>?}-g+k!hiGvJk~;)BgS{>`+m|l^?B>xGH9?|9SA_Zcfhfwqrc;2&YV{-ykp)Z& zNU!MkFd^6KX??gsamFchICn79%JZ3pI!^DKYzqhI+;@I>w3oM`5d*`{&sVBd#DAXi zOX1u<)ji7Daj*>9)8PG4KUd>{3r2s!1kws`1G}|<=7xEma9@(m?JiqZX!Oe~exwz0 zAuYp^KLHko<740mHuFH(;jPo5IZoO?0>YlJuh}~z9f$TcLm#uMXZsPgol38Di;0g1 z-DKNcf}6bWl#?@lrrl!9!Ok7fsr|ABwG6@qJRZ=z7*pWOc@iG(TX&A0R+@!9W~KgT z=#8NFN%;v*X%Z$)g}yD_q96cZq@^JKe$Oo?86615;bN;mrHR8}2M6cwok79bQ@Jab zE)@ksCc}blC89$0PtuP$F`shLq)m7)da*ut+zg%4*%kiSfV+=e#lsxDB!q11{#K1r zFIKapUm+EplGO2=rwoE@_j&&qSqu&N9PxIf`KYpN5zgbwcpW4U2G89{Oo*R^?8h#9 zk0pz0Nfk`}o?@Gh5oz!3W)Qd(1hGSG%>_Y9^+H2>UFyqi=7mzurvCV}-0GbiNOtMn zd&}FfpEiq_mVe?Q*MVm|-uDRA3o(-==;yPpL7V8%_Aivk&1k8!S@ne-tWK{no!rmM zKFyj81sI}5X{qIM zNZ6#rg24gcL7R$^?mQ{5Wtetth-C7Or&z;7A1lHn)C(bV)2~Y5kAYCCeA`kQ&Fp>t zcO6#Km8@Y{TM1HGRK>3RACz0Xf~%HZ975E&8Sf|86OL>4`_aAa0aI(wb@H1nm}#x5Qm$B-Rr+?X$~So zuqkLZvOhUx&xKe2g&n(h3$Y09WDNX0pY(axNZ*LLM@pRypSF((pFxWZ0X=X+tE575 zb{G)HiRXOkYmYeCE)f-RhCoq#9x;;lvBcRz(T2Akcvw9y^U8qcT(!(_-~$>HosUpI z?$q1fgNn5HDw14NqPCTTqC4@?hh+P{||~)AjkZ91DShIJIEeut2T~fpK2pMvqT6uj?=7SMsRpC1^Df zjUR9yF9ji9$9cA*RAIXD_F#0HvyvHbNE=X zUg~d4e60P5#%Z&J&{w2Ua;={_O=I=LDE&q1&wyy>3DsABNf0^<%qlH_0b(I|Zv0-P zfzT;Y1Syd$eWglZLpuyf&SMFL2B0fsF=mlyW7AEYobzdt85wYpO=eZ2^dp!q+{c|D z(eutRXHo7{N*)|y$*BCqt8s=M8|}IK=B+XEI|8nyAmaLSpx7g`!Heq6XbpxYViZBD zB9`d&zM8WEd`OjkQ%z0#S3vlA-8yFaZzr;@4F4&v4u-WHag}Nrw9e~ESg0^z15w4j z?t7)(r*MIxiUCn|?gw2-lTeys6`n2Eti=>o^4n*&RW>MpxI|BaZw_5I7r4J=)~JlkjGQmIZ~P>hEbH~v-dQd{-|Bf z!s5MnkAC3&HnEmIAg@wk(~3%#LFT&=jWxw!**Z82ZNch@fuY`th#|o_9R5?~o7}g? z{LRoUOlCxgf|~H)!3{!AW?E5wVHj*YbZi{BH*^xL=#RFOZhC@&5g)E!~Zhg25A%+|zumAJ?uf!?IO~8uycc@7L zm%0-cf!$x4D%U%Zz4WU~6(3!Ew0qHUr%h|l(#RHEF01$D_O}xLlT6@zX5WE@_W!1H zSmDlDffP(PayawuKrE$(gfe7bQ0RZ-+AJx8=DIfHWo_dd0!9185U#u~)*~9A7WaYq zS0#b;_0D_YyHC8mXu^_?^Rfa8D=lLsD!RXyAe~^C<9LeoYFT_`E@Pin?TWzVj$nR(Q%ms+^Nk^$~v0CTj z{tavaSL^%)^__BQydTF{W1CvSh`#BI(A6kM`rItw$;0~2F#7aIQ-=Ytg0wSc){1`n ze~$$l6AT8l6Dv$qpqr07^eV7KL*Jz>rcD8uG~Y#K+xH2IF13Tuae)zsZDoESoa`3_ zWISj5v$6n&5EdrP#oLCCLtxM)!4pn_Oqt8}nFHgN4i&kd(Tvqs8Sa zX)qtcyd)`*79OT+?Hnb|dmjMTwcItG<#G@x2eQz`jywj2{L@ECe-ujXmWH%9Ho~ zDZ?FRoDf-7*<5OQPGZ4!6vH8tWS~13L|`1%@Z3%>W6B?+GynvGxJ|eC+}N&u+?E1q zg2+^oxhN6x{!APH*G{|AdMi6Ai3!7zLR^;-sisX%$k3FV~j6g>BDMPd}EUW7Oiupb4HL; z^zJa7NWii=aKCd5nVjULtA>4PiJEFnqA|eA5DY&vM4_bYgUk7~ABd@4fT`f0n||KE zUH*Tpc1BFJ<7Lm&fyaOERS6o1QL_aC(_!xzb0(aRTx{JBeNFh~V#Kp}Rf|p&;x(iT zYa#}Kn6NL6hOiP$*Dzd@UpI`!m1ea_3UlOR3Dxg`t&GcUAxRX-c8$e_Cb1rhTLC#9 zZ^HL_b1pW>>k3Lt=6aV@9l(P(jdhCRl{thk{`_+j)r#OL5vsl157f<>GDHzWCc^w!=HB`f!bZfETojS>fyZhXds!xJydv)F>zmTUzs-Xy>lTdgDF~_ z%mAu~%X}~ikOoJnxW%W6R}8qDZkJim886ZR9)Hg#>!JAnxzCWW^H0-&Y5jXri^^fI zt3V|o2aMmZxt7I0B!*e=XN1e1$<=wl!lR~HG7=Fl=p_7}%TjfyJyYEQFF4$aOn2<7 zq_mc1j|Z0r=i2IcR_U&fKJD0@bxL$%b*<-Or=XxIs){)6V7@eum{9KVpJ+6079W>A z9ID$9Dsb-&?*C+%WRjaO6+t$;CvSN^xnYu8K8_2 z(TTT`ukXd7xY~qug?fKpj97ReIRkzwSYm^hbVt@kV`*FkHD&%Fa78krmb(*qHtd@D zT!kLnnV-y86^aIfXg!dK7s8-Bz}PARej)2rYM{=oDHlhP8SQ*KpczI@Xd3ARvdnU; zb1X)QXJNHE;ofgU33`5nP}gx8WCH%t<>@ZsQ6)9iPzt7{`0992lnG*@!zo(YFHCbx z7g#;lKQNnjmK5$ffxzgwHU;Jb@nRgm-kSv=Im;yLcgLNZnvL9kIn+NMQ(#X{ z*BNFCLAJ;Oo9I)S6b6|y0BqI_=^HltJ6Iu5na$GahHJ2q(rqsXfwl}Z(WXKtJYwCII{ z1RijTcL15dK5ZmS^k$frW=}kk@F$z#tAY%uV(Lr&WncCFzinVh}Cg5ORV*5f|!~q{?SuoVHNe?^68X$YxZwXTy7-X<%<1%Hns(r_M6=mG?XnY-U5oJjx3zPeevKH+Lj05e-LGGu_!xNOY@VUIBRht2F;H~rSIywX9 z5IlLEx`W+H)!g5-9CDYa8e9u>Q5t6u(NgVA>b zuCbXX7tH`h1aWRuz2%lSo)lv=S5!5n{Ya}sxdnW$Rk}_c=GteA`_bV5(~ymrAC&r6KQ4>SCTK$7nmao;CbzgpQy!wz zeNzOkMzkAQH0E*?^u%g9@bezQLSU2o`vvp3N+OJA>i8PY6d2y;p^Zti3pp7( zy1+yMXyuFbD7r9e>)<15^7obT8Y6tKlcgge`MOjSOk&6rZCId=C*^iB2pSD`2^1!G z@aO%`CyfJ8sb=KgUzPIl7}ilHrH<8JZPG zk^hAdh(h!p*Vz+b*Il(_Qf3`~zCFg@?hR+ac>=3`t?WQk-;QUucP<$5hV5{z+bWGmaI{KJuSNB$qe>L11U6+p5yRl#9j`?x%OA`Dg71zF27l8`r zYIr*g1VMub{J>6q2Ndi|;q^WJ8mr~~WSpHd|*6h!+ieOfn9)ZV3a|Lgsj zvaXS=eU(s~9lm+PL>m$)f(wS^JMH-w0#3Eh^tofLuhVt`8AfFjPjS<}(2O@g2$g#& zVe!q>0Q5PGWFXzp>_)6yGWUFcK+LljP#tD%`T-5{T^AlYH%CV!UK4L%1_dcQW`f@R z1b8*<+0(0bK80`ed7EJ@meCXN-I%_e>JofV8XyGu0@(6<`MV{6T8D_*FkvG??{_#~ z{ef!s8VMpwiN(F!|D3kbiYHg41Vpxd4P*dRgDLeVdzi6z^?`}p^PWNKvJyRNX4Og~ z0KOx63$We-&3unr0$&O3x^` zY0BR=vraP+AG6bPG{p%2t;WAdipsyf<)~>H(q4urUn{Ugs%!c!GKqyvXx)*c=%;Gy zm*ENZks~7fcTFY14IV=2UV!~o$Bn|iMri{A<2>E(<--!_;V*VGk1I)IrL>-Ov&a3M z-S|PYC{9#vqSFrwvtZvP-E5Ai=Qju`!vtq_$C{mRJmT|&J2YPfrAMdr$2(q|Ny>ox zjOQ^tCz)QR4@?QJ+j8Bi@W8NdHs1Osa^VUQQh*7 zkuboZ-yZZNf$}gJ{iVH3YN|@*hC8Re8u)n&P9-DWB-6DS0Eo%U9nC4#D`Gpxp`wP_ zB=Q8T^h*g%);07tJh@d&Tpv@?Ba_~v2wJTC*lypSdH-(KL}fLQNsbN#m31pz=cjW< zHqx0et+CL@p99_D#m=C6?AHpO(AVaY=eWSJGSW_AbPRz1q%OPz#q8yXD{Mf29%@$ zK3p%QFh^i`RNLb1_(bptDhZ2}D;wG7v#Aa>ay1$s|Q4!JHzFfv~gi&>Dtrp*dvEJmSm6Wj@EClNW>4K{Mvu;&(FCPc=b8*ROHKLxro8FJRD_8_VMzE z&^o?OrMYi@0FFq^;<3I?-YjA6Njp9P#+;A#f>Qszux`iASK2rz*jF8i?q@I+_kZ&q^nse7YBbjy(IKL>0;O0vI? z30#hxH+6OXVekV?t&!c!$A)tNZJ1*h>+F6$D;N~R10;$2F0=R)D zxB7d*cv|68yPO(>pc)(5kz%5jsx@C!`BYuX*^w`);P$E?m= zEvon2rQAqOZvKuc99UtI8;3Ltz*bhJlG}7CE!yHFoa7^;h2u0LjeP8{qeh*l_isPY zFR3hFDEWl73OKJx+K2Vridl-=bIzGQPx^-hP+klPq)vC>uxy_2$L-KznozqjqI%Lv zJUBmGPX~rz&P|q$Ocs((J3*gYO*~F!_6;-ri)8yx2mmMW>|UylXVrQ#Ss22;UxM~^ z^5E!zWWqiLrsCa^%*X3F#priRi~#_->c*Cy0P%jp!q_q%mYE?wk)BF3#eN__2d&2o zBaPs;1CK3+#-y|t3x_^q7ERw0@id-n3c%nJqEy)4K*07D50!E>_mIJuL?Z3)E}$N= z`LV0bQym{l)mu-Os3$&V3ZW!_?*)7Pj5N2-k1*2j(-iZ#FTi0O(njZ}!b~PiJ_D9ruBA35pvT~#9loFqk#nM0E2euY` z+D_IHe#r6!oW*$8vS#CRyau~Nj%dX7y>2)HwV(&{5iC~5*p{ft4ZT9#X`_V+t{v-C17EeqoOB+Gpe zS30L@-*+it!hcJ^4UaG9K@G}6r2*BI$_COXg=lfu7`1`AIsW&y5E*W(9@-)+Io@RI zg2?)BNo9m;KRgL}ZPfOFPkp(X(~YghkOqDED;bF(dGqmET|_K>mI|sh4JWiT;F!bimUc z@x*62f>-Up$({YhrLUdY3CjIfmoP$#o$ptr_62*duTMA2-3<)#w>@Cq z@=0f-p@!EK?3%Na9ni%I>$8=1xLAralGb2AXcdA4ksxbhP=;mR=TtrrkU^n zeNE-^d3@?L^LK}u03L|GBq)2qPjPtSvtlDaCU%>S388sGWjf>+*k|O zirJmtAWUr#*`3aBq!U-NG*+NXcsmDHhAYgS zcv}O#6ImjaKH*TMKx3h_R_kapEvf0MB$&Qt&q87pcSww*bqh?4bpT^ejmQEP^84F) z16h4{x8_GDhef^Ft^lETQ@obW3?O#kPgA`touzoYD0V&F<)k9i#O|x!^%Jmi1p?Gr z?GXwdnF3yX9R1{(4*9c90Rpvkc%sIC3mFNO4xquHV7I83gTVnC7$T-fPS;$t^q$7G zuZ1B6nUwJdnwV=6OKRwhjwKWOcyB|S&kYK0Xd$)fX_cr`r&e@t6T=#wZv0mV@RAqB^eC)S0Ik8Wai>4MU!D zfX0?-^71rJqt9qmCFsgj9PyjNT z1|4eD800-1^X#OTmLSDrnFqzHGm;TlQkVraGT{dOqRMe7=9_6rv`us@IWFjV(qfKu z5)E`P=20keGDFD>5@i^+2vl*Ubaq3?SuS-E$yx)CdB0n%wIiIQGc>q^(54@+`@QsB z=OF*K%(kq>_uHh_d$M9|4t(7=QDRf|3O4d@tus#UQqUq)7rP=} z(wE%U=wiVDs77#UKif|9`Fe8r?6oWHijWPl1J4id9^aa=GZ|3DL&RMcO$y}Tpt{oQ zT_6>XRrTb$nZQNBLz0d1z3{(H%YYI_3e9Tna`#qAR*XrIbzM?x^Dwd_r8X(A2Dx_p zZG&2}YDc2)P1s$n)i|*u7AtGbbr~FjUh+r+D1HI7v9M~)b>+yyAQ72x2aCm!JK_?1$N0}Z*&C- zd%J;RvbE4u4;F%qhOO9VI0#XP6?f2Z#AY(ghPox=v(rTXmH6EDcs}|Wzl9g_8%PDx zH;@_T(lOS~P5GUCf@%}bu)cF+66K=WgA3gR57!rf!a!;6(bm z*6(ITCnQXhFIg$M$%F&$7i@oQ85&8B#XG$Yq&rrFri%E_(UU~>z4}7$?~DmGuZVfN^?dZ88p1$TytVWSc^D5u)EQyu9YBrZgT!u^K#R);vnkW5 ztMy~YxCI4d3vhqBuuCSfsS4q`I9K%OJ5)`Y(!Qrs9rcJ&<$cBB_(h>9xEQanJ zbf>+T={+xA_hI-?UU$M@$Ay~SJr06^AgG6gzwm{_TkvlT-c&G%*UTE;*x~U8Q!#0b zF(&@i`F_^gAu8$3q{VjOi9{Vi&{T{kyS7wg2wh?bz3V$A_hz=IszOVsAWc@o^881% zU9Ld3rk-BFxm%BFVcT4%%;(i+WrG3Ci+CA~yPa$?pJ%+c^8YPopNA;5=M&!w?{zCC zZE&M#d6a1LS)oU`6EgL2kHDzJ_ZU0(!rxP_4)>2xF4uR^L5N95ynM1}=MjalV=Dl6 zA2|j#TIRG1a7%pdz+V-6J;OU1rw6&{L50=lcYv8Nug9@rmTYtfEv6ccKPyfRXx00G zElV((sd1E4kXbStM<~rol*iGT&g~Z5Ta%AN5OB>U6&>v<0hq+A)(e3q1`vlZHH$zj zT6G!Z$jpIXJZtz+`yB!Q5nez#cQl)XAQ~C>28TX-SfB1~`l`;yMf_o*i$`Gkzy_yM z$iZm}WS37KFe5HijXnlG(Ljz9!l>j)Ow$`(tDsz`b2`fzWL3-C#z?eAh+Hybue08cnMzF2Mddm zRYEy*Pu8+6WfDncs0sO;H-mH>Z-9X#<7Y*#Ve7a8Ou(J-(zzD*sYfIV-YNWyst{q*7 z>`=fUSumMOoj)g2+-zLq>Me70lQ5LKNgb3f5|o2I$L1gp3_TuGXY#Y0_AMZXnA6qy z5~QP8-NiTvvg8O}PuIz?tz%#OVABkuuIK)Ul;+H1HTJ6~f>Q-|nrl1lrp*7S9$<1$ zl0eA^j&rZ7qxfhF>9%RmnqNRvTW^S=_@J~c_G~$1Jk?zQg~!3?J=jq#8T*-C4}3Zl z(Vo+yd^?)S`&XiXgpUrfy+iGU+apIbESJ}wQm0&U4uBvnFS zKU~3r-7<%rL&Ti>eVz(9aIoKmgbs(MB&tbXJrW^8n-WV-MK(byRz~M<@uVFvg>BM%B%D@6vgH?FaA=&**EZI$Uy9CKx0)X9!i*Wo*E-w zHxUp`yarG>ntbMl=se%7p!x4i*0w0t;?@ZQ{^O3`OA>h8c8;*6A&^i z0lQ|u8ho$g!!;S?1P5G3CW|hSGe=*>;XP{_q6)pwP=(c8Mp+0WS#zMPU87u^X8D~h z%DbW(k9Y2?66$n`dcpqs^YdXkwPHl^29ESB#m_*u#f;k{OJ^XhpKe6w-55dEaJOuZe%l%bu1XB(wCLxgJFaV>>#6upR)TtLTzaX4+ zteubwxmr9g*;hAOipsqFy?Pq#zWAmgC99D-5BGD$qa(5;Bn@}SoF?IKS=sb4%?GGc zV@Ht(t(A(RX`BeZ&)v3<8T=pJ&uLAEA6)6R^c0hF!tB4zcZ|1fS}P5g%P&)&XwkGy zN{}))5BlVe*JkoqPy8TWsscXd>o>g}Uk4mBg(e`0Ng`wTpo(MZP=kUjwTy$AC!s4e zCS^&<5-e$Lf93HnL{yq6t-|=>DBbS(M@$lvtkeYbw5xBq10`|JP1OGWlmZ2x>F>FF!iH!*gmI6TSAxni zcNOi(8#aB5!Ni@fH|g;x$kh4smE@Zz&llJM`~(9ZURB^iFM2-)jE!#kUrj`)@xZ(cK41s@1Qcl6cAQgRA5^(yT$UPa2aT;otOq~T z@lq%}bwzI&0`-NKlIsMw{e=5da_~rbnOmVGkgCNC8YwWvww)DZ<Yzq z6>5aQKLes1`=#mSV=Z?y!{{hTRWMRGS+jQh11boN`bS|&!*g|M#c~RsdB0@I1bTRq z&j_=9hxp}^*&C`gtS;$Gn28&-KmE=FU<0NquGcPUz+9gs=ph8KuvHrNq(ps1%S%N( z(K@;mZ}sK$L?jK^-7+0?Mq0S5(s@m##6JFM5r#I}7N+A;DWVLB(@KrR_YRs!3jEMG zT)-<6^S^sBxvNe9s^qI9)b|d+v}v6u^Bi0sK@&lnL}>b1OIt~rM1c{2`JD_OgK=f{CY zBW`kPMmI%(GboMVCx9Yr^u_V_12RsxfT+5_+TADpup;y?-}}O%>GW3@g1!!5-He9! zE+1k?Nz-XR%Fd$3Z<|w1!-L_uV_JdsRuwt(pSQk1ll~3}epKzk7r8pSVC{XQNhhUN zNRvWBPzRG&^<1M0r@NLYTY@ryY0Sea%(e)YbDj8=G@Nu7rQGkRE1L4!+UW+^ng!R( zSPKbXjF4>gHt))WzdJVfkFvM3dIgkdnnF(=h?SQr2XE?WOyugzgf6{op6S=cHgPPq z&`LdN9S)8;Zgi~s{RT^u`kag+B;lu@j^RIW=a!e3GmUlN*TgVY6CIn=AAcq8>ej2v z*~y&z=B{sgr?PRfEvTF9GKZ+jOicpZ51^4wlXOH2z?i4)> zk(;x>7{a@hzx|&9)fv|uwL4~EWms=Vp8KBoueLg1ocDgK@H5re%%Y1_QB!iIswrcX~WXZ*n;8$XG;~A&X%B7M{W1NEmPB;z`c`s?5w#k zaK0w!1S+l*f@0IUzWuX8N8_a>yvuiV`qP3gAV?-$>9m0fBUScBmAESGWsum6rbPyWq{LI|Gp944||3VU>dW( zNISizM_>!w{k$>G&tC`o@-z>nL1v#$P6Chpn)}n(<{T5`w^0?-fJ$x>Z{rX&6r32% zwDi~_26X-8d*$r0x+kdv0IlsEkXaC`mCzkR;z&Vj>;OGUJ?9R+S?i2u2|aD2v1B2Y zX$YY*qQqoU5cDJrA|)<&&f5q#?+MP@1mVP)gEXQ^>a%V=!e`vj?s!Nq0Gh$CM~Q4y zd$x^}x|K;Y=qfuIoYBva8H(HBiiBk#Q;D-5n>2f%MSJd{f6 zXZ5RmUnaM1p+SjmVw~BoNTq~VW=r&(xON}&7}c?I>@L4AB1<3@C~~TGYpDYO^*}6l z=uYA_J&sC)^P*SJ3k&8qOabDWbbC??NL2zQaDUYc#d)GnyV~wx1Z2HsT=Fg2tRh54QAU9%q!v!yp0R;A4nHY|XL%rUleLp96#2?9J%9cU(LLec9U`#r z=t7u}Tmf3ekH4Pu9(GX_USSfj8P3{iYk^Q5uuqJ;v>;MyMT%owz)cf1Ag7y1RJw;`}#oQWDW~2-Bp46OCrmYi_D2hFd4p z{Tz}-GY9E+sePG{@T!=KzXttsCqP=Agfp5l+#GTFJO8h~Ji{-a!o=G04uyb3JU3 z+qU`Gr*f}mc(ZvY=tIoWU}R#7USWC=UFNNx*htBKW|qjd7iF^pwPcIi2ev4=I;YLS zFVtFu7ubr$aTF3=;*&yyQkJY+WF|5W#3foVHPy6gEhjSg z%AawHZ_$E7I?5Ii0u}-8jKp5F{69N3g0i*>@PASGlI09iNT-$3OYSp^mDNoC>gby| zk?Y)*2RU&ec#dbJf1IPcq>Bm^f6j{cV^=DJF0^KY$3xsLFqcEwgF@zdGtqKRj`*0a z;OFuU+!|tY+VvL_UU32uD=UFMyKzHr#nY9{4PnSQp#inM<~F{?!oSEEoCZ1as3+xmEMvV0bq*wBB9uWqK|C^Bs^ox3mLQG z%}klg@zjv4Cd>3uaPd@;5)82il(jWODT8>U$qQt*p)u1DJw6hkgaz;rLEW4ckQFj? zY?_4(V%Hp4!jIxEfk3lIF^o%5R1A=KL5!liy`fl{Me^t^bNLJn%;y8bx7!5FO#5rp zdC=)fxZLf=#7tjM?0%j7Fg_jar&+Ku;U3>%{%Q$w#i1Qx` z{rnSb3OcjLWi&)I`~k{PaiXL7vx&Ca1|Ksbi9?7)iKE)Tt4&v1{5I>JksYi!kBi1QNQaPxzm`rWgnErW z&p>OBYLT66_jIooKhYuoJ|>I1To)krjc6EB|N95A5KJv&7Q?UL}UGS&a zUsmOBf>%hF%@k!bLHwZ(LHPCkzm_`2!J6)22LuTR2Jye?_89lOJ*0T&ET!vA9923f zptL3hMs`R+X-+}MRq>2()knaaZgG=CuMBWJ&JX>^EA6TRYVLyDqnWNBP3~@O&L|el zNgiE+#tqG<#{Bg{ArcL%4k^TXmsQ2Y=+^eTbUOC=>PH$K~G%%rm*aJEzIVH&+ z+Nd5UySu#Zk$`McSh0-oHLdLNGv9v&P^&KllpO%pNVD zxsDm78pn3yJtHl_p`sD(`ZA^mf~GLhOz;&h*MZ&VQ9~XtY$SVyUV2oik0eWSW7K=M z@t1yaUo;ljxaTZ!(9L7|*>m(LQ`twi9$?(nRLKU@utw%rZQE?4{XE9Z zY}EhuS^yBS-orB~>eerXNOlPuMcYk{f<6~57E2cF*na{H$b5GuvWB5g`T<4fIRHQi zMe>?ge1}JZX{UQq>+w-o-t9X_>Uy+#uH$gfuG9OjX{6oZk+b_?TZHNYSOIq(XQ(-d zv%j;@PI`eGE@}nOR$IPwq{McNQX?}jiZisk#iJA*d$Jyed1;e2G)kZu-RvBS4cjb| z`DG0V`}qn#q_063jXV|o^MJnZ7s3EzzL2r2G@f78N4frR)YK=!+D*y4A0`oA7-YbW zatGvXTmXns{H}FSA_r(_Kc<;~9ga9@wHEjMl?!@j71a=Rg9el|vFF!zCq6aa`4F?~~2^nO%o%sQ<5(4;Rp6K=LykuS$djslx!-o0g zLN_YRe(ub8?Mv+%Y$>x^d&bAdLg@UgC$lYrvJx~kF$E+pZW%k(;JDaON>rAq!6cQX zp^%pXSwpo-tuGxxvbG`a*hp*$RxjcWCg>WVn3OOvrYx)i+OcCrz}F#&9G14TUtH}X=8 z;*4m!Dps)EP|o^Jz2XQ^r22|wR_xlz{^xGa!!iL5c*rZ*A^0ngNhZu{EDLoPPrNrX zJDV(V^I>v3rCUkw7I!q2W0K+RHyc-l+XtJ@cmfM7Uka`Em}C^Wp(NxY_79bdKHLd( zYy=L5?DAB*q`UM*@ivRLsl+tB8e!4q$e4=ftDfDsim7cvUmd0sqs^*V-Nwe%Ww*zKjYos=s_n8IRm?%e3N zeR<$9HA~fh=Y|6YF?@5D?gEk`aNCUKR3GOIoVEeah$eOCg!nR#C3MC&ifQVfeE|!j zEf$?B(rOm0f6g{F_KN-4Gr)y0?iZLABSU#mK-D6EVufpYb(y8#EJFcEhzXOK0+UTS+VdMbTP{`P#wFX<^^76kxESTs(w5rpRcRH9&8M9L%Oa`a{zZ)QM~0 zMuk+cFt=#5t=v`U_uYZ%nD)0!4{c>Bw-z-w{31ORSj%cH(hW-F?eZl*dCh)g%=%pj zfk(>vAglG^)Lrv4{63AO89b7@RK&2IkSo4s!0?Lo%0=83jv{>*t*J-PV!idwYo5w< z{Rmh`;^{Kc%jyUSn}ckl3dmMD`Ts5JdOlCxYl(bp~|dw%-@4aAjo*3yX{)NKH{RWWb4l^D>fA z{N?02dP3UQq+2Lu)V!dCpw&mr!1#}+@u-sU6ZxNVU**0JhJ6 z9|4!`;bzBW?NY&8K+~!+^gv|&ZOVcrlqRV)E#P-WxMaSZYCgGCQm!itNUbtW9==^$ zyG9`o-XLp;JN+XfT%oLj772A|78BLzpWtu(lC$hzrR3gh%qbM=w*Jv`!Ya-9RZ$Zn z!cu)@Ux{uf;-u>%w;_xgfMJ;PMY9ek`TC7`rY&o=3Sm>6e14}TDz62+Stp~jGJ1&{=?4Uo%AA5$KHQRFyq$7fP_@8-2O z@?CfA$(yY+)tg93VhyyNHOeRTCa%Nu`fClbfprgQ0Y4H#?-0O1$t5)ASJ1bX=p(lt zVy!KkZJrh+y7L7lYq*C;awGfNnnfd20*;q<7UYdOOU=Q(Ohc91t@J#Kg4Efbt zM1B>O#VW}f*-fO>xP8bk!|==NJ7?T4`}884Z^@l99uetRf0MzQ_=S;)i0Wi3Eo>$L z)J!OCCHxX z)4};qV+>`WI#OWO(>w>DEI#qD{}O3?bAHSoHV)4bMbCcUnnuC3@$L?=MaXbktSjP< z8ZdiRu}{QFJbTI%FXrd#5D}a}5NxNSIK7}0&SKM|5rf83O@4{?!Sj}ZnIaimG(bqV z_@wPG^) zLcNoL`i_pZ@u0+VA*jM$@J%KeCy89F+NIGLQlk!|m$VQI-xk5sX8~8}H@INkYZ^h8 zA_3^Q?p~Q;dY>BsosI8$Ul{#l!Xy!n`oK=sg{R#}j+XXd5FaLbgH9>tPj@O^l!+q9J6G1{0~IuM|rzhispgLTH>jMKOP644}`( z(arWDa(N*#>>&qxI@oVG|? z=x${*sy$J0#f{Dn^Pz4L@e5$=@owR;Z5tUG-2sj{@qq0@D_#2j0xwBI3Gb~(WvfD^hhHFm;|J+j%?z< zwBzyV=C9MRb_}wAtd?o(wemDQU(oDfaE(ES1Oh`3XNC_$t;FH5hXu35)B#^ zUv_A(d2&;H)j8bn*zehWfyJv-qkOgfeb#}o2RdgsRy~3(CmxE4g=1~_()8pfofU&{ z{lIsky`pd-%f*o@*hc7i*eL$q2J=81(^|e`XzA~j~@b}zweTXNSp=esIvgE z?L8)itOf9;RWZqV1AnQhtLydJ+$)P$w9Rt(<3NL?k+_KW77F2%V0&@l8-8FfV8}1H z1;^59FOzgwAmUhuwX%6W=kSGU{768Cpbjbo+Y^CQl&Gf~_nTev3&VbYU=A!n>&I9EA=*u?M`e?ux{4)PWi`Z=SrhMSHja6hF}hl1lGM@M0M3S0>4BDwp& zzsJ!R?5=9sDGE5bRj?L2?gOUx$J?W+IqWaiuRrmBf9a#V)Ezls_uG~djx1)#Ywvwo z+Q@(z?w{>i8+qGySl3n7)g}I>g~W_ytT_q``!zvwwm>27TjQ^!4>)?2ET?d25$Iv7 zy>F1-4~53lIVV_iVs_P__Y;K0aNrIL>C(irN=CIYa>HFHs2tD*%k=$jS_?AW-U>5U zP$@UGr3&gM;uc$k9TayaMspnZkDZg*VzE#`DJ(41p$vkTPmmD;2|72EJm<*RVoP)C zMGVPIgde@@(u%Ju8vIID-US!|B?G~>>{>q}Fz0V_1rJ92p1*c&JSYQrV5Y@o^O@(u z9cPC9-=9lf1F*tU%m-uda&D1ZmA{1LLrOBnu&}Ic)wk~e!Coq3JMD{y`_WPnz8(-j zmEiA0tg^dJ>ffr(5oZR?{+WPvgw+$KL$6h34Rhu8bCOk92nCKGzoAkk)jmXa%zb?0<7t`_GoLi*s?gm zV*#DP3D)>}Lk5EN6=Nwfg5oq56ehO#yn67>4vXN7mV3ZUbEdLD;XvM?O=n7Mw7PAi zifvK`I2{NUzC3Pv=eo>i;$`OwgJ~_?Y1@8{pX}cndgxYZ+-(Mqt+I%BmQc zHp!J~W8wn=I}!Tb{(k#XmVo6!n`#+w?`HIjAPmp%uG5$`q05iOzVWYV6zVRK6*5CE z|EhHO!2}125w}z;!*q*2bB%U+1a}{!pbef;+vyBX@RX@ndX@{xHVc&wHeMquafW~Y zuXv^$fo+8k(%2OGYMugFOxtrKD zP3ny@pk-lMi-J=EeFIGM-jj*A&*c2h_d-?3{K(}pt2=Aa^qtww$$q)1vJF6i&*nfx zXMP*be|9sH|Ku$IpPsot?f&xk3Iw95noW%sr479}sb*+yXGa5uLV7?dtRg0vt(55r z5JxgE#BeF(@=>u$Zpys$48uB`(?#}AjTsz52#pC+gi$H9zcHq?K8oo_%*`W?4khqV zp*wecps^Ot?N3-xd`Ku`e7rO@H;Q`O3pt${sT}D*CeH;5pO!bRGK2EgnAOShYj}H#ltOf7oqB1XhQtK-cJ(F zYk~gpQs^z8oNTA^!@3Y<-&!)D93IaqIE1I)+(G~>pIpK6?_$Tnj11}nk&O81R9lp# zSwjdLn&$ZE=Cwz0UEjSg70A70k5<|Rt|;rB{)+QCt%~y5%`j#eNiWOFlA!YizLN?K zrkW6B?JM@KLmLJP6FF`B)bv=TrLt5BU*ag{H2r}!LU+ejK}H)1H0ls=Nps7V=GmDn z*j#^*UPO@f6ZFpsE87^|6m`M#W-Pp_$uDr(`OtfFAE8xZd3P89e z#+}}_@c0SZm%$f7uqdbs8MAwi_A70T89CTvZOu8fwf$GDxvc%Lqitx8IvlxmnV;L4~(jd_bx}2NcDwA%(*Fv&^8%r=MLJyL} zks}+i=4tfh-&UOxCPA!!?b{_W6j89pABnJ)=s8d2%a#4zPTe8Tj^E(_%f7qhwBqS0 zE2hH#1Bj~X><0d9Zh!sn#fDsCH(NFlQbl~phmPaolQ3RV)Ml)$#<_(r%2&pZtzwl)#gtC{~ktu2%06=Vk&2w@+_JAZgEqSA|&wr3FC!naq8m>Ps%e&!yA_pGK`P-NOF9l=3d3&}f{ zwxuWbO##6fMm%=o!(kON!n)wPB>5_?l5|P>=%V69cVi;QMND2J3nZNAvqZMoX{U}; z({e|VlcSOFt8_kHOIM~S<+qIdT>lXOZe`0JYlC12CwrZU=bpyh5_?2FuIJQKMMRBZXXvn0Rm1Fp&NJ)?2^3 z0=Qr$ugx>Q1Ubr46s8UY4AQifrZN8KusRJpJ1v?(GU;&?mAK^*>5M1R5okh#iFjYe z=7#;=Jd0>Tem21s*NP zAYZsi9inM8e<;&RaFXqzp)dwlDCGv@M#-PGT1*)7;DxIjbZcD^Y{fzgYS+jlH9+Z? zodM)_E2=AKlv7$7V!Oy;3hX}Z$=yUhMqr5#kR=$7$CWgeL|Oopo*T>6HjU#y|G_LU zy*Y%-%}!3jmLo|o#Z-ANa{Q@+xQbT>i zJ|m=8h?P58FqyX=gFvleD>rADS->0`FX>21MBY2n2%Q6h{UL8{TOYzwLu#m%6cIa2 zk`WVfygokbQd$ACbb=Pro^`4O<_XoHBSo_l76ft09{(w&z0vE6CJM(wU7(c+V$tV0;I?O|q)DQh z?G6aw1NfwkZaWkG{aWInMmcRMZ$|0f!~0&4k&)GS6{FCbCO#o;_}|w}w!h_9uHphZ z=-j3LxfmtY0s(B6<@!M8QDF}t?Xs$}xp(6%#5y&I0vdAaP(BW99p@t~@AXhsbpfqk z8!-N8Fbf2+Qcsx0469S4c61O|Q^#O~(LIXamTI9v>+v~*1=rB}I4xQ7&{8V6ne1a* zRXLe0pycNWseKT%h9x2z73Bv;7&)sslBk?ek!S6KcCaHga0O4lrQ#T5a!8UPI|NAX z32`QLgUsmQl+T9Q-6Tz|2n@&Lk&$X!)*GAnGpRlUGnbMC8ig`|s+qI^H)@GbyAgd1 ze|Hyl${RH3?$Q>ZsjaPsFrNwpMhPk(2LH4AK|M&s1Oyz$zEc2G%Hubs)OC-v>rmdM zltlx<_3o66YL2)OFxK;kiYVI1DKc5t`QWFu;=b%y1~6}hSkT0-zE8sS&?BvlJ-8-F zoi>E`eiI9gbo7B$RvkbB!W);@$YXmcRn-U@m0<8Oz=)|o^4((yQHYz_a3D9~@6h29 z^I)52(X$<|OK;Mk@72Ypa85lSbODJ?N-&}35#g3ZI6CC`lVk9 zG#US%?+rn;kGTPKidp+q($D%Tm<)j5cOyv$5jRHgW zSv_aXk;vE7Gd#jwb$fmPr@gmotFl}FK?6eUBu`uS?#)@LO?`Ib@XfiG+u~;ot!JuZ3H~F=l-A_ z+yP`m4InKujc6(nsF%-x_=?Q4Og_dM6VGWYsMIE2lZQ6FHby2wz{Qq?OODn;qu}V0 z7JdKSs2jW&Is@wfQl=KBe{FwSs5Jw(1w9MCq7<_(uj?rOU8i;yA!^8b?yNpcRpyM# z*QuoZw3U@I7`m2DNA*7pnkmPgBJ8qGj!$zAR{R7R2x9o0}W^M?)2E?TwO#cv2(P zmy40T$}o)?_f+WAKMl~q=13Zo=i z*IwwZLU^oJhh1J_)Sv|lwJDOWN1@sj`5z(ezLzjC)sh7wH>#+$6TzrAMpK26{$yS4 zI3W{wn(L2%#l_5&g+wpegQKa_;W*}lg-w5QJ;>dx*j8>CSzEMv zp3?spurwStb^y(!w60{_2q`+iA8_m(K=(F@@M&m@p@Fm_0s4L1g4oXrz=o;zjUjN2n!xmZQeia&S^zT>H!M}rs| z$7sCE^clN~Zt4=^k(vUx*hpFI8XP6n;&pOWGqEjRkNkfEptiHPf zDt7}%ll}}cAjH+NL6WVhn5$G}0C8;dP}Nh2)_rr3`owsCkXrlSC(u~{g|0M{nt4Ht z4u7?~fe7H_U^%QA}pAP*)dq>`*VQmEP~@%b)C8r21LRe#!$?wJHH)vFoN;s)kX z6edT33gPe6o>@ENMbTh9-!=&-^)ZFIM)D3+gX)HXGbU~yM=@e%Oy*PlGV`*fMi=4+ zmD~5P^|}U8Z$qB;JTpx-u|1V?Z(>=z(in=}X+jz^hI!aW1BBc%kR|8zNM z{-pnD@g7ZB{lR~tWkHAv%ldRjpQ+j}W$wK`Akn^k$&OkKq}t}SPJxwo8uOP~L9#^m zV-WH*LBFpL@LrSF6xi8)n8^B>^pZTBQ8EM6M2+9BGycjDssFAgH$pgBnb#xJHM2;Jix{_!?J5=HKctM= z&-@*d=eB@@Shr&^18h9#bIF32C?a082*pqH7|B`=3E8NL1Gk3^5zvQ>RlP6*WrE|rkBLl?!)E5XKw&d1YRFU#KheE;qaNqo*j#))ISyO zQJbu-2kg=EtJ4GsbS^i%UN*yhR`Xu`hO z?<)Kes2cRaYG3gCmg!2uPp9l9JJ~edgz*P zb;$UYbZ);cwu1V)G=>V9x-bTqZMVf1w@9FRUsmG-SRiP&G~}PYGd(|}Lf5hKu8Pk` zkn^yCtm;V{Al|(1R6)Y6K%P?IFI@emXVeDf^F%@xU*UuRt4rY{>3bK!-jFNIRYPtzs2UQFVFS^7MPM z;W)bI=jRR}@b3U^CvM6)vpZi@94ZaqlmX2Uam@!~UGqw%_(^lu#=K(#5ww%2sKMd+ z)TLb5@U8py4{3mHbawi-5#dTD2!N2M$*E&3nv3hUaZo8PzT%kY0= z;f6CJwHVq;IWCGgPCz%yfE|E#g-Oe}-4J_PH+>@oh|zjB7fK(gpXr^0#)8n`_(++F z(wWei6*~^iAq7#AiqgMNQ={2Oalr&#a}D>WExV6N*UxXNj9xM=YS3F3d0Z_*-#3lQ zK9OQqbuBUx!zFBXR9RmtSK;n8Eb*Z*sgSvQIN!>w=)83rtK@lvx~Lx*YI=PEYkZn= zNMnG&RBsa}GYeuMWqol-&@`O`z8_FRc8}Eq)zP`f^hSlOrUebEj(#jti7!YaIOmPG)R~#ghkR5EjX{_ z2i9s9I7#Scv(!=ScGWu0R5}2SlpU~KWT#BGK`gE7Z>$5($mc+8u#8?d%x(w|nQ20b zj8mY#eOpn*Dv*Jpuwg9EFOY|9L0+-zrw^+6?>04bs{y(o;{;;wN$I`^iP}wpz?Zz< z15#=G*s3Z7SEPzKplQY=Fwd`2G3aufdV4O?{8UUD4W-B&9TRrJ>mEF1cI z1If(6mb<(gMn5kf2c^!XkJJz&peS$y5Hj`qS2GOdXvz1N!2Gy`T)?(Og{NJo%3Vl- zD|#-gB1-0&fY&ZUf?kbS>P33_IGBX}{nMv!M%4~HRmED|(EmGB36c~@n6d-3itN`w zr(ak}l-Sr6$kA_pgYz%TwihXaPvR2rp(`uZb0686%qv4F1%`y?6$Ujj_u!$}P_n4$ z^~{dSrb2;*o8}|x1onElW9K#XLva)PXde`kfF^9^-adkWs*a<5QtkCxq>Yn|@``nq zko<`UzV`bbormV2BSuG>q)c86moqomr3Gttb76eJ3SY#!to>=xOh!cYS}(IrJa2CF zH1*AG`7=xO;W7Vwyt7~JzGXlMP!B-b)yi^J8*LU^<8{uxkA6!xxGag?r-PNjgXBde zl1LP6X*7Eeea@g;0HYIO5V22V47nFOfCk}2_6j~lK86mtTU#ecT8=~$Ph&_!F)x`J zLWo6dD9;!4q0h2L5sp(D0bRu!-~TK1U~AQwuv_M6;j32PTDIm>>+SYu0=l>J90Qx>SMi9C9)6%d{!@j1pc#0R*7?qWKjVXz$lo;jDD(V4(ze zS@4p9Ul1vKdwWY4mjg`jyK|~c|K;HuWEGmX26rB}+8ktr1cCEkzI#m~T~QIeQc_Yn z0Sfc+fByV=3$~t9mBainmRKU%1aEKew_s*e3!)}C(=S@@(X2@ng^6|Itj=FO6PzCl zsmP+sy%a;2B=tap!~ZQ3!pN_J1X&WmjBApuZ4Sb|Cb3<*+VgKjllp}wsm!>dSi~H} za!mz;caGQRUTD;LMXhWqfn>-_ja)SkzjPyx5T#^ui|nlnwhVJEZm)oTu2+lKv1JPw z>v|ur$}3oV*t9}}=zPIQVgrDBIDcXxUW!q%9+1PJp+gD#lWxE0=e=hlBcX0`dC&YM zfa7MsFz8+u!Dxiy#GL7$htpLJ_xF1MXpkQSfUc~2(~=Kmf|xQsK5;3=Q%D>U?;R3- zNFV}nA4UI@S!b7*O3)ux@Af!2Ig^Bti+&7_jj?80fj?${g))dij8@h9lw+5`Ce&~a zC%C_zKzdWW1@NGvFhu1qoZPwF(#5-rTa}UMd_q2!v(&{Oeb|wLl-d!(Z%JRVGThm? zqeJr5i7t{A8Ss=?5sWTP)GO0;b22{u_SF9{nmQelVmvmYeT(6XMZa-H-7D(UNf-TS z1J>d&#WC1i2K$ER0VnewMEb&dhvklT7u=B0no18p4U-iKmXPZ0ckHU@?-yl>k3(;V zL9}9+7$3iI`t{`+et%c?ng{Q{ho#E`34XnDePiSN@Mpnyd!p2DIALTiO-IYfqxnfx zid;4Va?;^j7T6R*4#~hgoY|eSJeGkL$FBpxdRnwVJ6Vg!vCs(4d=}Asa#{Vd7w~4C*+kazXQz7x{j}0f!3Rg{0W0 z;^N{#0BcLVc&rRz@(^OXhumkLu?Wl1lkS@4FW4OKd@U?3zoOuM%MuYNqdQ1%4k)P% z+0L4;vHEUaTU%?!#=!v_3$TB@GXHmT3Zgz#>Kj4km{?g~*^UJ~7&k3J@sJaOWyYQ? zO0!4g*?vGk%!NvjHp-Sj#oTw8wnj2ZH9|F0R)%dIu-1P83u?IzKp{E-h^$26rFVqH z4KO<7^iTv+NzLT&#PMOZQ%Kb=G~lE=?E^kHrWTvU)1z!QxJepw5c5A&^AQL>Vo-KBs z1llUC1Q>=yEwVkb<5#IOxSolTA8MhrdvYMC_+Q0(TsATipzHM{CKBw38B9od)&U~G z0O;2T<0Nh(h$tVO_Rg#~(dJ#X1FzB}G?HFLiI?GY`H4M;dM% zpUff+!Km4#O77O8OfKov2GJ!@4#^n6XMEvT2No#mKs73EwUh4OXX1*jp{GaE4oDyh zV47(Bfjex__H(k~cBKt#Tt9942TKCER4w^DULuz!r-jBRQ8Fdu>j1bS3mKX+CA#H| z;W25J^7sy>7yUr%TT8?blqdrf?mgIMw~+d$Jioe0wt_6lvuTuSh$$?Mi#)_Mx~{oNmA!m1JHH4R5{CCy`L#Em@u zjLWm8DsGTdGgD5!mWo5HG{+>)*l}!HmeXh4WwgwfJyCcNODeJGnEQXXY&qd0h%A6}hdy8RzQrD&U-`O>-TmQiy zE${w}3u$_@Nl~Npq3(qPx$p!qqk_iJ5?yP2meLG!``Jc2U8XSR9Z@0Xqw&t}gL(va z8)WOLB0XeEXoNq+|YiY!+0IhP4*dPuOU$l3>?PpxGRx55(KX8K0- zoQJ@(sipqSN;fG6CT0?(ZL<^O-(#a09oVhLX-8QO8qePm_>Ag}c0YqKmXD6gp7x+s ze2~N6uB{!tMo&j64Gy4%5zlDYt+*X$)FMB!5l?`r9Gy&AUS8fjv5^Oy7H04K-}~Ev zpk|OO#lE>LwI)pM0K9L?z`#I`%z`n|t%r9a@Swj0=zBqgS1qy*aqY@LMlziv3%tZ?><+t;(BA~ zTGR_`P`MxU^=_Hc0=tlzaVHl?lE!Ja%^!BSF%d_5W1yND1W=3nT5y;2qoSiJ1?>C& zub6o}RsnDM9Unbe0a?U%*iJx>zJ$ME(Lx-d-8#5{vnUn`tyzwUir0>Bd zUl;qfvlo*?hmw{F<3=NXwO~ID9^T*)G!?-T!4(TAeS3JU@-_10T7%)kh^7eh7Keav zcjvgWPWJ)oRKcKo)bXFY&(j(`VM^BbzCgx1^6t;lttJCZ3JprhDs~Eul(w1n&?Wq; z;Zw>D1=TZ4Cif_{@3H{HADR!R^*a;>kjl^jzj1MnX(fV|yb)QZ#EE&jmd=abqSqproLNNN zpkdSeMUSC`MdJ#GLNJ2JsQNlk3dXJ*nUqia9jOm9nFXMNlYl#A9v_f05w-YAn2cQ9 z+Rkiu`Z-yUtr4m!>nRr#a)qQ-4Co3}@#6RxPYYwNP z3;MAXLdyQYHn85`;(npsmlR({gJ1W&wIWJ9=&l^hU-|)HF`2*o2N!&{QcW12g3rv$ zss^%MGbFpW7R1%SM!^z?ud|xXYScB(VX?V}>Z#N0ANgn!abq2wcjBBC|0egf*C7Bb z(gy_iHUU${9h6_2ddyPlpe zx{Yau^AipC|CA|KuO>H`O0TkEYLfcW726CD5<4fU+1au?0JP{6kBoI171&K{O0d?> zq)#0z{?kq~CNh%ymFc8;Tx`CSKhb5SG7J_HsXJ*?d{$dQKzO06{+Gy>m;xIOHSly4 zjUyfwWz8i#cC+|Bxh|R_TuWQp28lU-i(*+_GC@rUXetdFFSNRIR-l(-v*G-_t}5>T zY%wiFl%PSEuVMk9(Q_mZZQgET<{)?75?{N{Ryjx#9`DehnSTI}|tj0OmyeKwk>F6*47w7kCXcZP*#sH5kkYFr%cZ7(|X83~{ zzr|RDXO>XBvF6ugHZ~HPQ`C#-bXT@SP7Z$M>Bx@>$LPnc@*|#7HTl1ILfVYl)~8aT z{kX(IG+TpKa(Fo`SsAKT9@yI|aqTlW zN-6ekwifhBk`Xv%ey?o*eH<_7_*IgVlTDgHB>y}yF@gKJu)f$*n>Zs1P3kxj$H8S8 z$E#a)g%QDMWbpt`oE0rp0K?AaS#LAY;$ok@0se)Fk8oX+I$P|}n&8P+5Z#3I8+F3s zKiq5pOrW*4+~3sNPvqZYhRKZ}k~)B4uHD44GcBJBRivo3w2yN7ywK+%DpCdnVp?-nKueeE9b}FBi)n}ir|^*B08;7bS+nlZv@dv zLfrRa?`#RxQ+NVLH01E>KhY<+3k|~vo~jLuHf}e8Vt_H{UmgIe&f=1QvB_8SCre+U z_l1=NRljKx4(Oj4;fmohj{4Dk7**1B865MdOb~f@4J4OrTwVFt^I(G<2F7sw2-f-# z2t7WRtrY;5^GJii!nG?79z*R*p_Gccp08&bs+kZ+!D`FAZ0X~y8$5G_#ZIaLwCZq{-5)Z zg4nkSa3)E5Rc6^@*8TA>X}o^J4~`~Yl3Sa{677&yw*?w5GLWcu8|Qu^&v;j+L>8E+&4 zyps%nv4 zFAaJe+GlY@f}w|;cXOk)Z?8S*uyND)W-n2qS#Ly3Jvhsh! zW3c90h@Kq;vZ4lDOeiuE5{{pUQBt(+Lz6yXaED_Ai&5PSjpO4Wwm@tMT$WA>C1{tC z7qU1eA=JScC7`L8RKkWFYm@+Kf7axRQ1v%}e>0J*{!yDUk~Ak5hFD%>kXckz#4jmn zUJ1n3IxglNYr!%v89!VoJ1eXF5H((V);A|M5<>K%3XRK`np-~9!XM&*er9b2SuSm_ zd|P+qTkzuJ55A*yfY?!Z4feq)9vlgNpa1vmI5XBWJSu4>W@bKn1mm(o4rTm_ql}5h ztwqn~qx19gm5T7$sOE_al~s^6tn#YEO_k?9A7u=h$Q)BwP@^Vu(xLwHJ5|GIeDV7R zyynQ+oAViSM<&eZ;~9prz0F+6M``y)*R>Zd9{WbM0hUj2s6^tYx%v&ub<3?vu!$3p zG-zclJzm0di zzwNiZ1Y>N&MBb?^9U+2FgVhF=}0GITJk*nI`KvFW#E3A+JU=HK4}7h(BzXx zRRQ)aqw%hyhlj^EV7a4JKe6r-_&;&vY6#y5ii%6)l$DuzyA7^|N@j6}KMzRcidTG7T9K${E%UAVqwJU(-e74Y)5lOqZ)tpj3akRK z{sz!;u>(5z@*o!{@0n{%gLy?Ld0r=Pjr+l(sG*+Ti#fW1Qna`;Xcn7UNaK0@Q79HzI3q_z78A| z2&9bq~Z1|p6q6SzM`fYEy=9kL<2s%v=5`=bGGhuylk zYgHIvqIP^DchOe~mI!q%#AJq3mT(>AD~e{D4U7b^lPCs1iSga|Xk|amF2M=qiDbSj zt**pjfF--9iG{z|`5rUlI{aX__(@C(Ok8p%rtLRDwf>hBo3dKrTK#{jbgUQhTAa1@ z_4NY)8tK~sSaz7>6B7YDKMPPx^(~08FmV}AFyux1;qa7{yF#~2JZb|%eyTHbgy|+J z_ZiI*GsyIBA~V1hBBLl`#YGJz^$_niSS85w-@tBA4uL9^Z-4Ql8eF0-{??<&_#i+s z|CC}O<%+J(fvzorBWA6ngpO}MC9Poy>`K>522bIoUO*|CTRAPcrIlB+`;Rfk2IV5o44lLYuV)w9|P&(Q{t_a_qUN-_;ud9Ftf7CJYp?!%#_1 zNs&KXYP$qnz0~t`ew)GZm7cJNX{apf;HXvRM^TdN%VRM|5o%mwX5N6p12AN+)=B5`kxfwX=mvgmdMF0(1HDm|BBhGr|Jz z3jf~U4DIbXwfIbNw7lI=exK*I1m4y<7~EKc96PsY;dx3;%41gU&hZ3Ky&951&4X|u zz32R^HeE;an_F17nlWpkpY~~RZiR7E>Jy{JJSTzSq73cwl(%k`6)9ph?q|C}@O=c8 zb(b18DL;mUp)!FZ!8A|amx`M7ee7|g)M z%#6&mw1G~@vObzCDXC_R`vjzW%INk!PqP0G;2t9%EMZa%9B^u+-mR@INj?C=Mg|mu zzb`YNISe^tW_5?E!>7Wr(yexSBE48| zRC;9H>{gb)kuL*Ubh;Vhl##a4?8cEbj0>zEqGQIZptXoTXo6#{T`eE7WVshWP#+rw z5RTFL21nYZrq||f?@+93qy8uK$IQz*g=R~6)mcdjB9g{mqSSvmr?c_K%sTCWbWChh`^RNDT$4BH5>X=%MT0Ok{s}{d&}l!&}2DV#2MTW>d`0k9RP}zss$XG0<7d^TKoixLukjf3=-12Ovg0# z$#i2$Mxie4t^8svM*9BjLiE4l)N?@GlNRog1cZuANx(=t18IE86V!qa-88?S!T&o~ z27Nofly{I*?&|95w?~fnBj>&&BR&0Eh|9LLwDja=-Fm4zJl>1Anwk{&d3Z~uN_ELI z$rzB8`X78v*o8E)>rizPT<(feX5~z_9s8L`qoaqLe8Vr| zE9@CQm8iUO8{TkTcucoZw+W@LHAgN+Z>GBCR0Ss_g&K5)w4grz)oeYG_!F!)9^_cZ z*1pFn)WH5<557}`s76;y>#eD#=1)`5A(1_LHye_ZlZWR`2N7eYmanf{*B-E!(N8LK zj15}ZM<@O?TUD5AOBVkbx$~Wvt$tyAbz8Rj6Vi`pxC;e+A@{fCrUdHIt|Nyyd;@W;WAkGR@5-LF*-Q=qvKOj~_o4(dgE^)c5th zJqPP8&s^ZGc|KmGGP1U|R@PI3UxEM!2WP94Xst=spkHP7-oeZ3DkU&5urVhm=KvrC z9_Huf42{@%D_5r%7fmostPNEs6sCJ%^dp7oW}~E+`npRa+&7ofg^PFa>DT;UY@d>i zjo@<)s_mY;)6TtY3EwnZIj$;`8f~XAcj=%rbE!?y=1~e?sJn_8HyIu*!Qc5hw$pk zE69i<1FKrKqzWZUI@rl$Dlw6eKMVag<99Hqv0xGH3%#pRU*Z*IcN zL?zyhebsV_eQ|}EN&K`-(|<3VxKlqx|Ik>gy|6aQoa@ifri5`j4+_;5q`h+4tO_C{7Bm)6PA@s=?)258Q82wJ8B$sxx?kn7T3 zm}qjNRIUE0jmaUiZZU2&09wzc=&)p8g(oTs&il3Ff16J%)r{t!y6I2^FV|iB9Z2~2 z+&7JXP2Z|grkv!t&)nrfO&%;GP(NHwd0%N}B<_gZBf?tbjkz_V2aTE20!szhZ~zzO zh~C*yj;4yr&|x?Z_04sE0^{=K#fAUW^76ya*;xa(Jq00AQA64s&y*%w@NKTll>}N+ z)z#F5RMpibM8(AB1w}-zY-7osBqNi5A&e4*edYQJoh?k0xkFA z??IfE!*Ss%3x{iWJLyV{;^v484`SC71XB&qz7``&fN)?{&{kFn) zy9?^HGH&yZyWXp-tNH5pwoXoap#~RZja9L+u^luQ?f8wa!w(Nb_NS}gOYYtBIyHsw ze}~kEjX6Wwd_gD};_V=7d0zW$X@(+BDehyyskBe5=eS!3c>v?z>Hhxyp%qVatlV6+ zs*T9mS6n8JvSjY|ze~m1!{u+39zW3k|MUNmCvYTnONjP;*jf0vMdI-zDaotLRmxZd F{Xc1qXJ5otO5)lp;4g>@QQCdn&1q1}59s~rm9R>_|WQ{sf1q76rTUtz5 z%>(q}4=5}MF$g%*o92_p019b)%gRYQ@GKM&;0p-^2uM4`Kd(YCp#M1*g7Tja{?88m z>v#Syzw>|g%m16R{-0j#U)TSCb!Whf84~zHgTRo0kY{H1G;KNPz!u%S^$q_Ds4!%J ziYjDr&VXZJJ@D&Fo}+yoF}r%9uQ3;1{;?+L!dI*wr&*`Z?|FAH5Dfh{H^%$#)pvS& z-7dF}Ea%6yB?QKPDXp+ISyYFkXMw^uD{W>U4=h%Vsp16?)hKn+2`+CEem>7J1FBej1 z!Q+RDg@|RmnT?H&*VjwQctU}wZjW1$UKx@Q6tocF_5JLW*tv#p;tL&bQfq*tx|*Jm zk?dh6hufL<=iYE6nPhxSOdNhpL1~iOP{4F^mw4Bsk-?}R=K5MEfqug_Zo`0MV^6M{ z4q(B8^@*}|Wy?mhP&$rMPv#G@1WH0U5U`2p`M-a^vwi0>#@6GOzzNwif2UYbW2Gd zJ2?%QAWH`=IqA&Wp=aESx1ggDK7N;TIzp@-t@j3H3O58^qiPM2%aaxAvWMr}eT!lZ za-URc?GRE6!ljRkRaOSc-%-GO4}mB|5s9dSAB@i!?Q*|HJ&ZW(^0*}hO$ZSqeNl2c zx)wQZ0h`unTD5M|eRQqZSidB7*VP-@k(Jddz_a5-AAbtd57~{}uUP?UO{buCFRs^Y zW$wu^BHQ_Qf`RG@K)X`slkhT5!T+6+Fk+0FfK5247;QvqnDJFjK}8|F+5vs zvZ&MR^E+Rzd3N)0jYI z(HAj8`Xzehn7E6Cwf38h=GSR9*23YyK} z9q?a?!HWL}F1M0nK6gTE3uTJRFeea4Q{9fcU~9-_L?U2%$_{L2psk2v5{|FJR19(A z96T!qGaLphYhF_+m~gyASCN{&US-jOA4A)QWyg4PXY1n_8Iu&j=1t0+xR2(=fq4g> z;v$NP5t_BS+CP4fo;=*{k0C&PBCR~@q5g*`Sxd?FilrV#ezJu3Ci4p3qsX9v2kSlF z?)jmaYumQ3zFS{vd@aj_#6WR$>XL0QlM>qKn_rD|Xdb^$IB`BI;sd}%2MCL1K z0M2{&aHjGnyFn{#WrbgpN`;kaEk;6`gEg1PwBt|tv`bk0F?Xbf9lG1w0aciD;eu8L z75_uNjcP%^hZX&~nG(jac*UD{?6ybClpl7`88Xa;>R&9ly)8@5X-<)eOs8lXw5*qq z25jZtY19B=4%I=2z(gsEXH_^WwSYl>|xzgDA&tpXKS30u{|i; zSrNrcUMc~nZiBkKI}3mtQ6&T2pl3Sp?7rNQrcxihh)$WdWGLGyDO2_jMm2+#)LFM} zBk@}iZRArt0%4*(5%S+DI5r*xf?w=6@k9S)&wu9)Yb5NWgl_M@49tED3ky@Mk!L%U zdFy|Fu*+1Rh^(7^viSTsS~MP*YQU?8HV`}3vgc$So_;>PC_ZPsFb$zscj zaPc5~*n(X%JuMCEC2&%;(KKVwElmk}gkR|g%G$OT7Z)>;`(Z*svmo7r0Bw*5lL2N5<07Fcdk|B0a7Rdj9fr&E(=h;*# zz!FwpH?9uh4|?1AMoBRDs-Y?`h;+T}ekN(f?=@U4^U~ax43?WC{Hh=819RZ10j}UD zaCb*S5G}z}muE{=VzpaYxjkPypnHMZa#998UXf8A=kN1-@Apq$c)R2)5Q6bUWC29Q zgjiTuTzWZdIX&gRrGdM0d$tJHM0?tGVsh`vRIm)odh|G$ity%Il$`W@$gSfojB%){ zQKNAY1lZ-rvoLJmYjyC;x^KZq-oKS}{b!1lh^f~inZ&n1nA!Pb6~nxp`6Pd-zzQdj zet)qQjZ&b;Z1OUr{jS9TP9OfXQ!q623oSf;=e!B_M@A0R7hxVl2$68MO%-UTUC^=4 zWTl|6Xm9jQXT;;9qgy8K!`-h*Rn>(YH{I%71rKoYJn>SAt4|*p4*b5OhD2?78{SZs_lh;&%Ws0Kmx^XcfQ#|u>)Kr zLio(1AI%cbrcUjf?S8)2){P5R&P}l_ zZ`XZay3lD_q(j+wt>%UzX)0(pVR=o9JCiqWPWK3 z2E&r(n6o)}Ay&GW(c!A!Iui`OfhV!9Ojo=?nG&ncwlv zeJ1e~FoPzircg$qr3piTmD4D*?G*6+6t+jZuOow$FxJ6!?r@Zl-6pCJ&GI{UiW7hQQ#C{9k%P+JNK^SNx!|L zRx`>e&)D}mHgBOvLCj<4K>ofW0l{bqGB3Dj#rIksAw8JB@tX0JPLJEY zKyYXz0v;DB5Srw|w6X$NcWG!u+y{rRCx>QMSHoY3IkwsmEZVtqU-ZUR)MphMY$WqE zxtK)eq|3kO(7doVXWi8gEaPLIH0)CUod0?-lw^dwh9F|`Xr#Z1C_4`)@_T5C(XeL1 zMv#C(gbE%gpdihl3mp#?U&XmdOHb9*)dRJ24&y~O^?QHw{dI8HIcySB^MVCVPshke zM>mudyc9Yf9?$>D1^=y{4U!y!vzz~dTl;M^FxMD~ROFNumG9$WVZGH(r`PADTDvKZ z|7isj9135V;Pvmd|4tw@hxKB)E3gXt(PF*UY&8^yhf>)2G$rC!5$xQe)w^eFOwVC0RW zIAxINArt@pHgb+)4b{<%s76cj*_e`U;Oh!`8)HcX5DawNE#LkcHu~?q=ZH8B4&x=$ zcvf#rqhvbSS!r8|dv07SEHu?R{SL!G#7hS$gD6Q|FHb|O-3O^|EGa2zZ^vdA{>t~+ zgJpmQABId3c0MV|LRzLG5hnyWH~9v)OlTO<_w#U~1qpZ);TO5C%i2`AE-HVoHrJc2 z78rtB?bbOge}hnnETmY@m%z$Zi-sfpW>T=-uKmyleN-c`u4W40R8hB2$fDYK{CvCi zAB3gK@j9z88H>|vGLK!0l9-&G^}nCyygdjJ{Bu%vMkL?q>qS!gTl1B>31u_s_3u3Q z64eV6sGypG=JC@efo1-pFpD=V?Ni7(b!Bri#>_ZUvh6L+FPiq|y7{f}0YfHi5;klE zC~$qdm_OP{c&f<4xH#zPfp6-anaZQHy;RTli2_!T(mP=zvVA8>mbe|gr*AC31GqwoR7jk zfS-`DX952y9=3C;wZ!iV+vu~`Cn<+Z+vK!A3JH%X^x~MrcUE5P1w_Fb=*$8&*p$Hy zj;*Dmpo6+7EU)#+Z&Oa(`{NxPGAfme-KE06yFH-ky$|!U;mAbH_8aZ7cpSD{-JYuI zO45wp1@|w5!LKhlYY?8zg*nr#rHVuXK3w;(xA3vc8cERMoi3+)kxuA-T@E@zjOMa= zmZBA)B>8*cV2zgKlp4517K`YLd`?dnt5q5`gE6DvgQ)4J{yrr;Cc>S$3PU2Me8ctQ zG1?-L#_#(h)U*txviD#SmveJfl6UTs&@XiE4$vdF`t{3uahK~()E_^QMNGo9w!;hcdyQ$D?eI38St62$A&-!?q$_>gZs^ptrw~egR^5hYNu}=>w(;=%}g1 z3{B%%3C)FSA#r5GiHN0ILkKXb=$S&$0!^xQ+K`I3Y07fu+Z+s@VZ|Uhz=}mFLx+UG z4v}g0sKOONleNL2X9M_CuQnZVbEa|YWBv5J<*w;kBd-a5@-3@HeO2N|kY_le{AHwhe*A-GKIfaOa;Wm(;(uc%i8ktx?hFF zUuLp*H4yLKM6Qzv%QO8nH#J3YI$3R^vX4@NN99)8AX|OBifd=_;($RhT_t0+4Vfb3 z_kQ9zuWlNP`@oXZVzrpb8h2Q(=5{`^-Dqa!Op_%8kT9LeRjrz12Twra(gjd0jjstt z<}E8iDo@||VClU%q=@BKIiUH08Qk>#Gy`2y@;_+V1RF^)DPVxO{83czeQZy zVcZ$gaobZIlzvia=i;XyX#D4dPgy15S$DEKp}j1po7X3Gmtrq)!|Cm^+U~h)`>WS) zS(W*}=<_SQ5h5*S;*JXRHWCIEU8$rjFlH2goEs8|4{L}CfQP54068a}6C?7vA*tr9 zZ~Wb^eEpdplcar&c&)l20IS2|`}(jS$BX1Hxe|2Y7mH7frgdY8aRWymh=P!j7k>~5 zvO~U7(dl-%X2A&)&0;>4(cye-ys>@~U}Hv)nao~SM9X%>s1Vdoj!nu3(0&AA&|lAy z0hgapjj9UOQaUl3+B8zRzl^MRS)KQPN%!ZzpJIJ`I$bYQbja!fN?s@ug2nUvC8JRR zaDpkysAa0Uik@-!;Eqy|t;)};a3Q23^LhTCp56Sr;Z_vkuV})8A@~Bf!{|QHg>*Lp zD8kSxRKeDbV*~nUwIGZK(|htmy&q>)b$1;@8HN7?w6M+_p(z)L)xPr?-iGTD$tuVf zY_=#+C`6@$2EXjlcOSe5V{gL* zjKFfEHQwigHJds@T13VOU8OX06;Pb2RcY1%@uqU<6&Vbj?=wL&rDMvDK$KG&80VE>Mn7SC8|^UQvm3t%=+INpB&)fh&S7>Px)9D~vbs5u`0D z0yTV&w36uc+7R*;H$P(tg{qb#71 z@SE+avQ1ht&6Xnhl@iHZ_#GILge)lV+U@iUvzkx!`#NoPzv#5uMjnpHM2#qzjz(rb z^L0gYCa%$jX#P265~t#>AtdI}c{^WLOD1I`|K1T{A`IJ~9;61&r~ETT8I+%3<+lUL zS@j>m^}u@0<2-NsV+OqguZe}fP3WlaSb=Fs1o(?MC`<$kh!OUc5$rdj0G+IyOd8Ce z05M5=Z}%-HXwLL}Ea*1$<8Y)o%<{wFUGV}4@|>cESd*K!m6HF)LWFe$i#}_tniec% z*x0chyVeUlKlH}NYdye-eksK0zQSY8B zM&U*`MiKE=5vYQ;j(s0b3tsL;pV=J;+!k@+!>kdafmDh%ulWGTza(wQ_ zJo( z_fT4I7Psmq-7wKZq{=5Kf6;3A;o!MKonaA0NH{lL z$aanD7nx7w!ns2Mu?{_tm`B8vpnE%GVk{%LMkvV0VwUIq^@Pq!LlO5>H2dD>ak=iF zo*BN5Z{!dZ%jC>(o?KqusWuQK2GS6rbFu00J){6U{G=p7{}4MwYPn8vT7)KO$pn#S z(la=AHAg=`B_{a^#>44f0!5@}Ak-=p$|>hnd5kleKtknlxUNN@4l(w<|D;SY{$_hn zKrQS-P_%U8D~!>Q8Qe=>FdUtsRf=6ib3B4&Fx(DZSM}E_+)+V)9!rlJ_bTLS;?PGn zfznFo^61SC{aGtH7yUTxMoy!~@RxV*Vg_Bszv2R{zWW%ME=YZDFNRyhha}IuwrZ|0vEOBo}-y6CXzlb6H7f3M*2%# zSSm8OU=tfu5A%z}@>hxHAZ`)-Y>oGbQpvUgS5uK32=2YHT z6y?N4T54{O@dzBx_`!HGGnR>X$-T?T@5EU+CefY!tjfLtgj4U6Im6J>?1qd{;GRay zNThb|k?=H~0y=Zm8(1zVuiJ*V)AZh%Rd+1R4Wfb8`NWxoiM4pX9BO8;(o5R^8#7GU ze!KNcWc4P7y<{ws=H?O_8ko-Sq!FNY&z`9etrJrx$#zi37N#sHW`VX-WGEa*luN~) zA@eto6bNJu6uUo%!PNT|t2vqYVMt6F&mhhh){Iz(U0Chk+ky2q>KS?$MX{lS zg418pliK$0{_!#`iL?!&0VS0`7{%|2E8lMrKDjaq5>C_uC9H*T_3?-`=aMB{U_RZ* z#Jy{X#Z!^`iA2bqiQyu#?!0@O>$8@&^sLEEZ@(c{rWYWc0rDZB1&tJmcs+Pe_(G6K zM-gOk_l(zSbU2xXcED#RJR+QFD&&4!M)=Ak;GIxx>(&pVZk$}Z(#4t{>*C93zY%7W zOeJ{D50Q8*cAWW+p}oqoiI=^Fs{iCU+>w!ipu~Lm+5$2w1_c)ZYKRfJI`C=jyl&fL zAr=9jwRN<@(4IalCO04-x)S9RgiMxGW-DUq$4XuA6D>1`q5UIvTz*B2m^(Qs465+1 zE7uY8Go|_gGDK*|t&t&H-+6{foY52UsHky&wLxFrStMxEK^JKK6)RP^nqfBnfh;J} zG9q`yjOG%U-KI@qvab4MlCe?R19($vpO?aS#fs_OtjQ>fgS%$cWfKU>bOzn#h!OGH z?mHPu^gV;hc+{V=_RG!MHsKsK>(m|iA_)yRyy4F}{`GTpa z?%V5|nP$>{K?+y(bGjzrC^AtCI^UkKP5s`Cp4$7V(P-_myT-+Rtq^S+UqItL7&|Rapu%t;i3uFcxre+i6 zcTz?=#-7aDW$YC3VyP&)ow715x(b10rbAB8wNXk%CKr zoY<7;;Atj~0^`BTIB@3~;f$)?cyxZ&N9&_2kQ2}G`wQ1}zd}<3iFsG6>aQ|NFfwUQ zKar2)3@rX8W%e!)KYIhr`FHWfrdhr>H~!K30^q<7fdmd) zqpt_nS*EJDO$UEY0gkYa$4c>F?LR&q(H`FtWP%puP~E5J5>d-#8GBMh$!_4y&bT9D zt#>jt-HpT|Lwvnp7Vt?EEUb}GjRxtvA2nT- zTI#qnt~f)GQIhFUBvEg{6LGQvl+;|}M`)tW0uxeL0?oAQQN@$##PSjt=S5(3zHy71 zx{qimf8({#s8TdXid8AIRmE_W%(^z0?9;Y`8F@^^r^`Nz%#QuWN*mwn!p2M9B^5=T zj*6IMTo^~E7HA$b-1veWU$(bCHN$4`d6&-oexY4i$82Dglk;LPdcQfkjzhFa7SD$U z>;Jw_q}^}OtY}3?1uc7%v)l4VKiCZLY{pOY_`BI^r1`q}I?t+SL(thDM1J-rganVv zZmav$|DOPk5YtmrQ+sp;G(GzX@^-mx{#b^U$fi2U*fK&9L4i{m7RQcag&!`%7?W*1 z?YoPj=$W`8isvxwi$sEL$3fca+taI76 z-HT5%kh88IYMqD+xrkX{!#2gNKc}*w%!e{(cve#oTg}nMlL)E0uEu!=+q{r`RZDkh z5w^&Z*sVr0Vg4%Dm6sw%tY8uEuDMy0c4&{ID)h$_FIBp*1rPs>3g3N5O_wCWm)c#f zM9*Va9JsLoDDYkZ{^ry*w|%EL;H|sA)fQO{e6Eh#zwchR@3LE|-<^{rI;_k2LWJ^8 zZgg5iLUrF}`GkLTiMXolyUKE=qKc1*y$QbSkm zo`fx5Nm)eP(*hIAF$t2x4C0V=_%{VM%^&StP8%mkiTpzh{V2(vQhH*DY|G7=FjVRO z8YkD@^7(|LUUaHOn>Hq%R@I2qISLf+;(}k0Ydt5GKG}=Tg>71e#YszjSEm;abi{-7 zHo#`COVxcB&DHa0RuV7mG6uu+7?D5UdfFW_3|+`d`1Hi?+ha?*FEyuCwOHKXwslnv z^~{W7hb>pR>=QUL6Ud9ap%AKd_uI@9Z_Uypq9%Dw0kOpOH0Soc_bGBH* zjtI`W)H2x|+g1wGSBq4WXih27*v4UE=}k}{xFW}Crh4?WctysrZHr5ta7EfmtJQKE zEB&xR+(;hKKYTIv)^oGGI$CEax!5tB2QCJ?N&5MyMm90lzI9Z)s*DGWm?F|pw=~9u z)i7>wGPdsoef(X&=G}f__?$(EdZ00DA}3@YHeM9ybo6_yGD!S-T6Z|c9~|hB_Qbsz zr^@d>EzQ;%#nje?d;^DaI)2 z?@loCT>#NNJ3UVx)}RM&{!qr&>rD`aK)}9K9jES}rB2s#a|;X4SL!y*cgDKV;6oUW zDXzyw)zyd48iVf5B3qH~@jLJneAoh>Oklu`Q$}GSiTTuXIjS*8QGGLl{9A~zu_LU# zkQr=KMDj!9a9FXsTYRBqW0URCET1-{nyf*^9Afm(am%1=^0*8d{%pWd;QVUx>E;q9XRhSa+8aSJoWOGo2yZqDNO{15#BO>1Lf>THL`R5e7$V z9Y?jY@)o;K|3uCsHvfSfkNI)$tB&J%|GQ*G|5w)Q{Gqpp;|QeM1R{3B?;m*E&!YoC zV!s@wH(3fkALXz4g{g`vM>=ux@XWuP%X!d0r@_ae7~_Z-V(ta^Cy~7+l3w~d{1d|e z6~m_R7Mo>b=wIFmQDklZq8)w+oT+@ zF;m-Ah$tl8xdljBqt!W~h{?NB_f$KFkk_1Zss< zKO(kIGmdC`bYFkI-6A{qA%x+??8n~eiT;`fb?r$B!m#L~$iKSKR=~7=2~=t+@=6|WNKCbSs{^aWB$AOhOw&v8lYYY!j68>by)gQ3Ii5l-c03nF zd#R7yh^b?&t6;?j{c`1Z8K>ZDi}Cl+{{GKG2xTNJEIe?2LVTt!Y}>JQ!<1ZaKH8cB zixZ+G%4-WVS8%4z7e7oMCoiyy31+YmdIo}Twyk`yGdjvpz+)S$bRL+~_tg8hS5p?h z8oCkwtG4UHYhId)9hr0HCA-jtAY(x-R!I$x&apB{j2e;Uj(6!~5`{!XEhrJFVH6o@Wl%mbB)J%t#DR-|ZUyP<~sr4r(v zETOGg0~AD|ys3*(8N6|4b!}p$$CIZ9Xyez2`WC)0q1ZBL#NJ`mb*=FwLrlakO4S&^ z_KMcpK&YVh)2912(Z`A8vmU3#c5tsK4X_j6o;vt7h}9dgz+N^k2<#(W5G8}V73!!s zl}}x^ZO+i(UIel7@x`!MV_n;ZuJ1TN5exQ7@#O*%%SPz`e<9o>o>(=e)2zicUUMB+ zsvG+uq8Iqta&#Cm;1f$BmIiSvZ8AC3b=3(^)gYWFY|Nh!jHo^Qyoc2XFoslB%UR88 zA{96Q2hXf*6nW0Q_OaOhc;f$vSx^?5q-=2^gJHWB$Vc!ABI-C!O3DW?ly{-&&3neS zPW>h(r<%oKRUEWE|5h^<#WMo2*_1&)3e9EUqIhJ>zG}9i-swX2aNdFZ@}Ns;A89rl z?*$rZw>jlr=C*~a8rH-^%W`hTyxu^4=?70L;|X&V03mIWxY&0kwUi_<%zd2W4Ae}V za`@8Q$h8`+hP}OLZN_d(OH1rv6hzVE)=urYb!v_8#uW532)l|D*rtRKJBhF+1}K#~TVw>AYbDRM_Y+W8 zYntuqFw{NzNo7XrB1sO24Ysb#sLD)R!(A1+Dp7Up-%) zeXD-tv#D{n;P>XYso|RiY7k>pY3fxPt2DU*lJUpNb0zH&e8IL$lMT#u`aNg-Mt~yY z&vkD9PrkK#zP2xwdb*VYzB^aFbhB}}{mm}HvDLwdh=>yu2{AFT_qwJa`k9K#jz)S< zb!2PenCmh8&lv13{aCrI(qb={(|L+kiVIYNO7tl%%IqfUR@>FVi;znPBd5YOa;Wfx z*gMpWthFH~7d{Vyw<3RbZ_}d)F?!Iv44E2I`OTnuIjPE_< zpc{&{$^g|KFLFGDxal)}VF{cilGI6<0rkBh8@_Jy1;6{+%f zul+H+I)gqxB2?fY_$N*jBzbgz9|KJmvuWbWY0mfe#UMw==TPFxrrM)buc4uk8W25i zinAw55`iIMgj|mO@BLp6hExFm`D)~5c`PN)$E@w|5Cw6J%+-W((VM zg%s?GayAgy^?dA>#Iyt%VgE&y-9@R&!32VX(DH0W5OyW`BhggY6fH%7uKPDACPm0a zySQ;Z^BVnfaDLrCadyTN9&>D_n-%JPqZQ)iHX~1=qLpX$1%H$CQ&5i+w8eDmcOBdP zzdjjzR~l_sSqM&oqAdkToWoFjMur7Qpm-XK462`}v)gX28tLhxM3B=9^vl<$!xb~I zAaP(x5%cYF{~HCEt?hjP(5cx$9o-GE>$fX^mJ`~CJGu}V=qv{6aBIxV{QbInA8&tY z9D$wkR(a}s1X~0X?;ja-0P194dbg`J&x_}V%E~F&YNj|r=$pq_ib`da8nD!bbetB1 zaz0l2r4e+rg=v8fkKW7J%JdNHc{2CIO&o1?+3Gkt{YPuG=B|x;{rK}qHQ`OWw~;!Y zBwDPZv(0lm)!bUUIlCScT%u}EkYngzoH#b!ub1QQ0p_;fe4_A|VLNt6;W$5OrykG$ zI8D9EV<+7L8RX&anOA1@;}1KOT4A0+hV~1=pY!0`qnzu6B_2X z)}KaOqu1p&7Dr$}BoL#LO%#xE-jB_oNP}T|7=gDwxxK9BCca2*q=uyDHidC|fKXs9 z6vP!`VhfT=izn7{a828XGiawJe*$>=tKkIGhZl@L*<}h~=z~Kl-aMqpocOe(kyt95 ztWa(^a2NeE-E$-{K-BMO9cK$}nSv5F`FQ4~n`wjS z_AqCe*0S|4?|j?M90<_E_X;<;ZOx;Qz-Td_#tn<%3cRqa`>nLjl0p%b!wi5$9~yAv z!!cAqGH+*6HM2Vv?HEd6l1Y`tg`+|p&Va9?7FmXbW8RMMm7RCtdc!NASylbhqOD>a z#WHex0HNXx`+($G?{iE`H;x>xu4K3{$_nmq|)Ws^>{s6W8$LXPAm?5 zu`_8VD3hrMb-7(;<>iecl#)ylvc^Q?kY!6>EK3zTS4qo?7)WD8)Eg5>5j+cZn#FE5 zXU}~GU>vDfv^_OsE5Odb3!~P+VIH>gHu7a>ad`~XfmviR zuckpuA9I>T+R%)d3LZiB@VL59n@~#z^I9ZwRlvlZe-3T=Nro~eN1MmlK?QKQ`u}M2 zd0}ye7qshNCjy4_ap*j+$NMkZ)C-U*f`nqUI6+g~#X&a^`u;a>l4aF1E~l6Z{z!OGyUnq1aB7^v zLo@{|XgF<1+(zGPcMO#wB;XXtafN6oKm0M|+L`hNyYYM8%@_Q5D=SmXXIYJInnr5S zEyN;X{3(XVkKb%?boA-&mO2t=S}?s}k5>7U0d_nTk55S-reYTMSVQsCh@hjKN#zmO zs@!eHxt?5n7fzZZq#H2i93$>@0Qdv!bnCX+6U=rIx?SSM)B)f`hcU6RR(0CP`~clc zd;Y1XrzS(Y1pwziUi@C>T1h*iNWCvd4t?+M@b|ITWWcmjB#MoSdaE2b;-^{nNhgg? znKrs5wFVhykX#~hpgo#G>@Bk6z0u)Jbphwa3)t>*udB6b4Kcz1KoD!@5`t#1Y4!Pi z3*~90IorWdTW}%q2n|?uU$~10PQC=5)gL-5rE{MdD{rDj|1TudxY$Y3YEw zfm5uuBtxfRnh@WknCD#;p|QcJAX#sv@B;_rRjU>df@=N>iTU~6Sx$rQ$L{MqI@Y7` zlH80coGRjO_PMj!1-V;6TS?t?YKu0j%^2fTRo#=_Mth1^jVphl=O<& zI{d96nTjM7A=^VTkIgVC0hy+L{gxklrqpTzE%&rova0fjmX=b$Nm3j-&?tNriB1KB zX5t5;nE;|}4(0@TX?lK`?;A!3$ozvagFVEo@#c$|qitqB&eyL_%CZsx)#*^+lctp^ zBbSy{>4H~maa0p15hKyh2bHA;k^Xq%knza~l%Nl!;;MI{fy#5tq<8()6vEKvmDTLV zWj{0PP(KUnzlXr6XZ}ZBpG6mb+|$UzGPSC=_`D(#@gnM>B3xU4^j57U_~wK3JpLWu4j(s}GuF^z+#m5xj5%1-WVp6DZR3n2J$rYSgqlMC#;z(?RL2 zNUDVe{QL71B>EaNw3;9-twI_3j2zsGDAOcqa!{(kAGm8=r5h8=rD|4*s?O34i&I7J zY9-LlEPW8ACa%yOd2f!Urlz+eSYQ2b+xe@5FLXPI%A<+KICl&F?&%_Wj2?uj$znCWio8Hj)WhYjkk`m0e4T+wW(1Gi%YSN{t{!Cal8b zafKqU!Af7|!g;US_wzjVB8BgNxw}5^hSznTRnilq3WE3w@c3WR%>H1hs&1S(IiapQ zXLc(RNny66H7DjnPYZK6A7+Iu_G`O6G%-Rqd1^DCwa)T+oa32+i&E4Z7#kSKkw&mu zZdkEv+x%zu2ae2lQ$K{ndz4|Kz;`>GtSIoZsH(^f-1li)e?va=1h&mSUyhrWkseNg zdidRV8NUDftt62v#+xTlSO6U!A#2!lTmLv@^e2%I8=1E$Rc6B0Ha7{;&3-W!7_~3y zFy;|4(MbYY7PbS&TKsibU*oj$U#2WD^<0!SENYglV=*0Cm|ASjwDhJO;`nJdOF=1j z*~fK3^CVOn4n!=yl(?xG`Bb$$mrm5<4oE~B%X(Y;=EyQ#V$Kf9w-8DKnPW|Lchr-u zL~tgJJ*T&65f0hGMtGZ4znzIeV(#ai zU~D<|^210nRBx-tCCxD+fR@rS(#W>}$P8g)CV|(vcp$3HGdQ;)8&rQ-C%6VN6~Kru zrZFAZ_&_G9P=hgyO~lsKowg2H{Q=1`wBOAb5?OcqeE)b_M`lGHqol)W6!`pSDuENo zqLgMmOcOqf?j182B)krWl8*6tFZh{kPseeus>nYQg=@R+yV>$b&ujJ9v4NvN*rC%H zq2zSaH|S)(~U(d zdzp>W*)a8aJr@Kt&_8IF9(pyfilxeHolLVDWu)8=Onz6GDtMoerbv^M!Q*LX#-Z6D z?3ZM-doQ=wwCQ{7hT?Dq$Y=Dyd@|I-?xHkCLOLjVKtZ#^=ldO54Fn@41Z zyf|I9{E53Y>j2Y|m&A7(UbCso%^ll%YHjOCC-zv#-fsRa0`JgqsG()x(g#oxgLd7k zinB)G6pM>Qm#D=VaxtH_CSQLrW3$v4j{J&^ma~4nbntIFukEUb;&0ssGA`_NlA1l& z!S{w!E7-UoqJdDG1PTw&x!FGpFqO@T+y^I+CSmc4SZ86%kjAxn9*X&+eD$?|Vsh5& zh&ao5xxO3U43?2L+Y~k>RI0Jn9L6lUHW7@%4A%J;>Pl`MfyW4{((ZK^WQ6Y&K(S|D zRt<+E3zDj0<*F(YC_(*4inN&)mHZk?O+`BuN?ND9=NFva3Aj1bck-eMmm|yhWm=`t z9MbTLQKR`1X@j@5KieNy?)Q>~LpR^$9R(SC9_BMbK`l^HixW0qWGYSk)EI&Qn;s}?61ra@&bCOH zRzSG!4iyjxaDXy(RS4=oj}mYM}J-t zU1&x)TBYSga)o4^nY0iuA7VGd7QXv>?2D6)jnDOrIyvlUCfglA63&!&AIE!TiaH}e zndbToX7XFE1rHhMQrw_l{VfRJHj3sy>%BEpt_M{x-(T^=j~fI>Lnm4Wn|kdz?BJMv zoi4n&bo_8w%3RvAtF1A4i1D@rPf2+x5<6sWNbRNGEX+Y5jlUUA2leO|59*VI8UVk# zDGq-`4*j7LE;~zk@fVr5cz{(#BfZEheOm*4l@}FLClj5SFNZp#H6($uZ8Z7!RnMy1 za1_o>0AO)RQ|H5k=p$nx2GpIsjSU=8E5_w&qvIdfQUNkNp$C+Ae9WT9pAn*5+{*fI zSqpLxhsZVUaR2Q~HHP>Szl(QcS;!}68bVn@kUSgHAI1=(k@Qf6sW!F{>o*ERM;OP} zYeMJ?R|_Mcxu=DD9P@5R3J?Lb79S6B)?QB){j(IDHEc?bKp+03zw)pX)d8B8#*HL`LI@tQL8Y%V zEKWfhgRS5S#Tdjjr0^QZ!rU#Rl_pdNe^c6nbT?RpaHuO<;KJOe*}ax#%T}}~eDKl^ z!@!zSDkW=Bcawr=sQ=!RP-AJHBlpcpsaHt7SRCF1s6kVNvXSO$F7XvkNolnRkqpG* z5^aXz*#nT+o}mcD;BwdBq7B~ngo33uol{9x1>aU@v&?rneM!AIBo#8GrZ5aJ8C`SS zfVdo(9Qs$&v!B`Hj;5?W1#F0{iS`IVXyqL&@KS6HW(XD0`=Vh? z97}IFfLSlMsMuxp{zR!*u+C8KRGKPROtfCX#H3}kt;en~6f29#NTjnvgD|B$N2_CE zJShRA6k#1gD#*d1JRTvQ1T_Ff|CXMGpH+EyFspcw|L*S}&y%S+bjw3;Or_H;mM98> zDy7s=%_KYO0i1*_a};W%B~DOT+G-u=HieKNe)xq~wGpueks*3rlUR0vyTtgN?^8tZ zU$hSPVEc}lW@$KY#47p1qmcFU-~3UBhSmoLbDkaWlOpB#$k_B$r^}%=4iTXX~G_;&D3x3yqv+E2w z6&=>&lBT>sTDCmoz4pSH9K-GH@wCPWq7d1$;f)J z*lOw4ZeiH{YkFRWW1Gwrs@Pjs``R@;9e+>Nf zba)u~fgMnc;D6i;Al^GU&ND_URB0;@4sy^T@4bnl=RUaMlwslFWlQH{gx>T=3oCCb zS^{F)&eQCf^U0t^E{l6;rX+Lj-TMS|HSK6LCb7TMNxMwx7AXL|K85@c%3TEd(S~v(P6J^$bk+5Pz;p~St^^vCXC(0Knny5oY+IHzfP1Hs&rHNeU^gf4lHzIG-#>=! zDZc=D!iO9IaP0lO3N*CG4KB*{2*~8OR?Tp0-ES(%IW`@@Sw>G_X|xy(ifg37#LK4Q ze*=ujxJCZ@G>4?+F4sLwBP%agu0Uf{Z&FI6UeL2=gpLZO>8cW&6Upp7sdQ*+X8J5W zY);Z^YZd95XO)ISXN*id~6SxUPu9p;-5W8>N zCstXO+tc3P#qAmx{aOJ&1TnZ)1bm>lRjDP`-c<8M(rkwE~`@=ZSzuY7- zTc$eQiIgW3_48heJ95SDtgbE?U7_Qyz!1SZja^}zHsGijUMjU_xI@lq1=Aw~WCB!6 zzd5f;UZ>D5JX-HE*0O0MwrPnL(cgcV3p|)nBaF|VC;Q1=0F&}i=lbb z1JYeA69aWN!o{z9#A6uJL$zpac)!`;DmWg?7~v5P;Hs zdB;UjH|Gn_7XMg`f!&FR`8)J~+V}s)Zr9bHzSnQTf|950JJ{9%2}DN8mMjaKM%FmR z1q!^EY+H%({Ly$0-A?b%x7Xg=Zzmo`l!yGj2qH&t41Mo+E~!jQQvPZg{_CEyyvT&# z>Lwqfsw2@U4JY<|c;9lGh`ir#`&MCw!%TyPF|$c%@O`7dw_>ZNGC7@Es&vXl#wbe~ zpff^TDZ#}#Ve_K&3LzQYcnQ$otcD@jiVAGSIBq?Za*8N<|1S9_h|gn!pH2vapYWqd zi~0esE-Ri%T|p8JIMqL?tr!Cry$8L-Y$FRSR1!JC02O6{o+3=2-Oy$IwfFYCo`>LM|cYNqCriJ$y) zc-#o0Mx(dFJ@+Fh(~1X4QZ?s^Z;plJ)5XKb=O3V!OI?o%eZ)J{ZJU_)P=a?T()j_J zb*A7(0CeX;cn%AGcq=d?()tTl9wg=&R37$NaF_utz^d5;#Q|J!Ita=@fPxegX2cz^ z>y#Sv$>{$>)AaXmb#2?n-$NDN2DihVlFfknLzK(%j|c_tMC_Cs@0Ct0Ss9E&N#!D` zuM{0gXW#>JL}0BKU?KmP8OCicW}d)%SEwsN#~%9C zhtM-WM-NH~{o+SIbdVJnO}e`AbNMk$CEX}J#p|BrQ0ZvjPl9pYQYKgr)z_f?Zgh5B z<1x%=Y&Z^J7VAT)ki+b_Kt_bqZX`gR8Jbx z0uXMdgORu}&bKmBN7k76jDdFp2z=DThIx;7t$K((1036iaf&nE`ovBTA$X{Qdw5Hl z=CT;-aO6p?be9z@t+kpxEIA>w>ule*v)-3shP2%<%_v-AdY)*(mZ3h$gC%~`Djn`* z&TAw+r*ZOh`Zl0sgi!!Vu0U(j>-G!u9ST&kPt0qITGh5F)ILd6G7??C4lDg}2&IZL z$KB{_|63}@5H<3zOMBmYTpaQNV;j-X){na%y;+RDAZE(v&tjtX@K(*fO40Mac<<_! ze1yg2kDVHOT}UbVUi48bh#!%kfoRMO?N~Q`bJwjskG4!u9+4i(?z00T@P0s627tE1 zV5)8ZZgPz;S0K{LS(EvvK^X`wivTd#{CgAsuU4u6=vp9r2deTW8A}ZnE0G`O2xqy_ALo5@!?UGEp3B~ah4rCt2dsjwq4h_E`EZ#vOa&r&1xKkA2*s|}#&gz4d zEo)|(AE#jg16Bhv?FL8&56mQJxK2R#o%srL6@Ru9Hu8k!lt3ivFObd-hR4U^KX{4M zlZXWq7Nxq7igabE$@VFh<1fV`>0p-4x~_)s(dKVyfqOeX&s&^?g|4oTtCpqN5I?2g z?*%@9#h9z9NKT8nV%^8%TMOsIx%=Y@a$^K|#BsSiRWDCwUMh%nRjUM%{P&h4O{I1g zv!RV9=*1Tps)R+!@d_iGXTf+}!3r9UYS@VDs?dt7<@ARdxXa+f;y8uWV<4FTkAVsJF6&W*aX|LV-1gt?=j!HTEjYemdltCR^#GVdXlE?J z0H{Na5Sv5i#2P(}dy|9U#u;AgJaA(wQfGWkdn6tt2BG9pRH%8K%_BAHNr9eoHG9wd z%$-NS%WSnYTnUuXUN62-#*D!!n-|$hTuXHxtroXIpguLL)fhr0EuG7Q+w9njd-)n}B<5%JXy^_MSD z&1ZaLpE@wPmcD5k(eXwgjuW3>k|OHPo?UrYKOpPi%y8yPzo9N>$!#ajve`ykdCeR@ zer;G{%}sx$ApVj}@y;Rcqy_k$Fn{*}KS{DXZ*qOAu zz9_yd>rOrjahU`%bW9*IH6KguYJwR<$Q_n1*RbG$CtoWJE) zWH--Z%yx4fIM^a4Psb^9q75n@I96UY%fkVc>qM6>E-rBJi2v>+F}V_d5==c8;%f&+ z+9HgQhZ)hF=a|8Xx*?sJh>NO#Rg%s~B*^IyOW*eAm&eu{GzijCHzebhWK&QAa9ZRP8B)^F)k z;a^V*=3fH^Y$8abk5k*IOv^Mg{!x15#j9*bY6@((lP$BZkkoO+AW!S@KKCnY07}6q zap5SC)2NwLeP)I`E6Nbh|+ zuYBO*uC}uubQ)DICxZacW36d9I{0!(bQt=FkPQ<3K;hg}DcF-Qz-e0n%xH9CHGWWy zp;NMER~>azjIah+(>Fo;8f#UO(dgfSF8UN4DZ-EN&LkUrz~Z&AIg5l&<(ZJAbcxSo zTLo;iYIQD~K*}#}y*2)#Z|FXhh2F>j%dnBcnnb#p;-)XAO2XxN{|<@gKUwBGn^>Ly zjp7;Td1QYF#ir~;fZwZ&XAW!5e2^OrLBP|42F5my-1?^Df2;dM8w_lP-WK(5*muGY z+g$1rQGIt25C&h6N+heY_qZFUoL;*i_`HqtbMSqe;kf~L8C1kk?UGr*g_1m&Xk!=u ze5GcRjt{b-`2j}5DeRC@(&kxEAIvlDM6{Y8MgTKiR%HZtC>%b@=&!|NJQDpXF(tuN z5~aRG48PFgzF_7xL%FqJUs$S1nH&}R7EXwdEre_02b+8!f93GcQ0J`~!oYWcOzto& z9cqwa1m|Pl^J-Sjnd5tWW?W;D`GV^cp5w2iOeB>XRru>-j&3JPio&)S_d|0u&cER=h^i&BGOqc5Sr- zqbOif(pDZaxnX|SbY2JMPz;y~VSx~@`j;UGQ@NV@Q+pHXdp$O%7ENezK9I9J^_@~m zj2JI0@!1n)TB{2*%po|&$7L z(UEDHq`8kXO`CG2+QHa^a2|GdvB>QZHb>F8rA~iY!C}&bIl5hU$2S_i#&hUQ=*;I; zV}yfL5BZ#9Mn(kgMk>*O2F_b*-_8e^>ODO@Em1aa{478{kHA!-1U5!3GKGN)h1}mh zUYg|u;|cBr^#->Ol`mNH!lR{desk0N_qua6oLm%7NbGgFwI{?ZT7BBp3+DSFZCl!3eEadY0W3!|9xfsNp6 za?O+{q|L6}1OlyvG*yivRR8&kG})*@9#w;BR2rLXV~Hgoj8nS-=RV7M)^H7xX}wms zNe3H3O#LZu*hQlZgOEiXzh2jHS1<`RM4eYG1Sm_}h;@`$ndK1wp!xzx15C(NX=#=3 zfNgc9j4-WytFTq(YNhpa)oQJh!BccTvc)FE zEAr_hRSTRD_U@*wT3*YJR0bFx$U9V>jJtfa5X2J1+{`_z9?EwZFg&Wri{=cN*Uw>{ z1JeeJc@hj5h#DFb391?VqpjjviNywsaLWveqdTB%+UlP^MCqSC#Qm#9D3)6x(WHCM zJbpQd6(1kltoJ7vnm0A*NAjw4@-#XlTmgJVa4-t9@n7sq;|NiepG-wD&YE!4OC@s= z`;iSp!~xr}eiH+PUoWE!-g~gHnDnBaN%tj>WlTmdeZ9ck^6}v*yo6&5-}mpIVdB55 zV4sh*eo9wJ$7z?^9!L{gl&ZAb6Vklq8OxDVdy+RW6uzIMl}t$)ifc zW6^iS9Af&P6*ca|ctv({Ef&Li9?xdN#hMW9P@xT0(nmMP$E?RLIV$R^EyGd2cJ*rbk!86{2zGxDQI|A-+fDLYlmn7F0M#Pp`_kX< z|3n3G$Cfnz3)5x)G>kUE@L{^FuSL2vzmVwWjA=+Bet$uQOPC8%wLfK8E8PqCh#b(2 zPYB+2Oc8L*G7HO^TV_aK5kM%G2^tgBh6jWPg&Y0Q^EkTk`}%tvHc2XMNFOAxc^)KR zhC4Eq!3MiwmIz0*dUV=oYbG@ew931UUbR{?IQm2WHey0rKQ+mHq3~IVPvAEZt?4v-u1)V=zmg9J(rt_+VMP42KZeY!E6koFpk27ewN^|fcxuT3wrZ+wR!^h{a z3SAg@3#$H?V$VTvY1j7lB1Vv4?@G*l6NOJEK?pW*#FrW1wPnPOQ$Vx6;#Y=bj<3+@ zyl$}OqW)nq{@oT6JfotxCCkP2=a%>4^fbrD(D%F3!nhGkRXoun3rj8ohYj0S-8Kv2 zX5vHBiyS+pxAB64AKn!bF2KiAPieKl``F-8*8u{LaP?8+aCd}su<5EOZr5HY?%XXe z!Svi5E|pn)e;!onW%w8jx#&c^Z>gBNxMRre>6 z4K%+NxUZ2H@Wt)9T!;z2jxAo0)YJ0`wDELd{JY@(+f<@mPwUpJ=bU^Xngv-Bx|gRH zSA*-J@yH}^#?=F*Xdlwkid0(rzuHW0QC#A{V&+83MdPRAEl!CoC4;q6JM!cHl}d8_ z$tS_rG7u9@{wn6H#14(|1eZ20#!WQ&*6n>xqWtG5uChfb2 zRH|7ueZ99;i}Kn+Ul~BJ;bmFdet19=b_7_LCBc0yV-GLKu$rWcr{c@b&i>Qvh%l~g zQ$HL4Nl-RTo+=l!Fh*1XCrvk*++`*0d4btlMBy6{AI2#Km{zq2?Kng=Gl=9qB)`+A z*G|x-DWHF*Z&ku|I(OwVUK@CVh4kZ ztFv-gaeu}jQkjAcfOgMi`$;Zr`4dnCz6Z-r3!!%LZFPsUAsul8iq@t{aUv+R+0yo1 z(#Tm#*u2Fc)apD;E5jY=XvusD-|rr$C@3>8tMbI*Y2C1EoDKwsEsODNTEN3IV*VKY z;R&!bB3P<9Mq{4uM>-aTQ|l?JY##8IaY&ZSCy{R&{AGMoRX{sh&*t69u){M2H?2BlNM9pc5sJN? zk=~w*lM|NyKfhOxo`Y;}>)OI4$4$65XQRnLxy0@!I?V!jMr|zJqLLA@tI?m>C*z!e zRsqR$4GXfv;kZCsUf2&l7)?x8?zvP+j0QmYR6-=VN{^~N3YmqhqjQQP8#ekta%!_8 zB)E2@hxsAzc>+3lPyrqoIJ7CX{Yk?|@(2S|Xj3trGIfBn)JU~ z?S0IWs~ItQTdv>}DKAiH`(U31w2==1Sw5Hg8e_>av0Dky!oJ@b?Cfeztn0hC{U23l z_?|ak)RBIv5#KH@*J_WRaFlI^^qLRyF=+%GuV2*s6p1zXNU8NMZnZh<{em2fgW?Lm z3pi@0WBp1)u#Xp`&|p>DukPb91%3MiQHaRR%^iugTAf?T1a2^G6=w1d4EbBl^?M=< zN5eDjmvd(ZyX+RPGeb+y;^NEK6h&QiAo$h#^S>`4xn(H6GjuMm56D@)flf|%4z3qP zw3!EKV|zQ`j*gC|0nRU>N1)S%Cbs&sysuA}xnBRZJT;We?Ec~{8dmQ&oO^ioetq2B zP$~g`|1A&<2YDQ}zYG4$anUm@0042?ak*OW>~4`({0G2hFdbyhccJO+7WVh#L8dPr z?Q`}!UK~+oJQmKm08Vu5*x zV@A#*DLOMk@GD|Y@oq@p`G86FYxm!@Z|ZjTV-ljW7@28UH)}dFGPJ&W^rIPps7a`o z2FTi)RY<+B)Aq|Xx={^EY^OkQ0ETDHsx9$7fJq=1EIQm{n~3qN@#6fcZ4}7lczyO0 zK$A$G5bmB+bWbmAPEgEKmz3_f7gH|p{dFa{573k1QSP2f-tZ_%Rct|zk6=hvGqWi% z(VGnY-e4m|eXqf6V9^6mCMlw`b5@$LmbZWXK6cEdaV<=$B(1cRNI6J;khpO-g1HGI zi~7&&r85xOnRtS76TK=BI(?yiQ!DWP`8;)hk}wcN`y+eGp=I7`WhUF+dl2^x_J^vf zYFJ%CK>-LMq939?W5>(KYHQplOcDWy?dHQUgLnP!R+HJ#MFoKmJNN?UgHd-E&(qAQ zR)09hSbyb99*bpFe;Y|2cK**HO8$rh+Tsb6hB+$GAOOMOY97zcJbEYao6f!!bjdPY zA*~D{9_|nt#2eVyTxM9;szLOZrpV5l2>Rcgp0;Tg6{(9#fD8bl6=VgyLwM3Gzq?3W zU?GD~Z4@<9Z3VkVFhXH?0c8+{wm%SH27IP7*xH(zPy06AeQ#GN{`PP>UVodmbN$42?}0k^+KG;`GxI0iBJez)BdG%y^TB_`Wn!H2AZyNk75KuYaSw{79$vn$E>wtST}UzP zQF*ZBeE*vtR-@OAqs){?cJ%7Q@h$P}dqKBA_<>GLY;3&)-(p$nBSi0W_s!m~{Rrax zi|~Ey8qLPk?ESMP{H(ogNm8P(eyzT6WTmHPeODHyBC&52bK-IBNt!qcHg64+^nWKO zmeJ7v^|JOCg_)@#&U3w^Ordf3XwKcSfszrp|ot>H(<)0u^&Aez38dPCP}?4q%P z3918^ibv!hb8|>J5XSz+4ZF>?K6Kh&9}1xQgDlM{!^XnGRN&Nk;ZlL(pjXZgQXmgg z8is6bzYvI9LjL0D0N2@TFYy79ohX9Xm6RrVmhXnQXXrw8>z_jIcB(v2gXbjYxr>qk z1c_8*f;aD*X0Qz|g5&_8857v{K)Qi}ps5$2eI^pjFBt&r5TJ2hX5C91Ti8I;7};2E zD}SqOpDIy3zWb$kj_!1|UH#3pX7j-*t4XaLeu0Lq*?YAjPsf3t1eSSwjhuzFs-S=d zBdH;F+-UOGm@TnBE^+o}hO((j0|kK$Rb?|nl&G)4JTsb6=&j>^Nok3@^&tK^Wk_52 zRGs^C=gyX=_Q&UQ@5u*#oQ~cyz?#91gZWSUOKb0kZ7v)-I{I(ot{~uv4g_akbN{9v z9y?~|S@!N+go(5I+nLu4*FL_RN-H}C(dSg;pL6;;7neS4vL6aXH$JpsNJ!^0!QO;X zH-1tXoCI3R3zEWf0p18-wM(ZMrK-;ZsEpVy{@y6E_1F4$g&GLE0mrapPR`@if&(xI+%gPSzah;cr;~9WwCDxmcJ`d zSB-Ia5FMb9k6#XAMMBBdP@QWtoEZ%P@aeg?aAATx{#$Gg;A?s$Z@;3GnN7ZFa!pL& z!2D$MUuMT2e}%X!X7Iv(=-e+#k{3q7_;FYaS|q&~#w+-US*UE5sVEA1gwgKlTqN`} zNhRE1T#*-B{Ms=&tN74zJDJAioMz1vkEU4> z?=3;wd-?i!p*4|InKvi;swQWNv%V@?k^w_x&*I!1yZRqvw@zi1<$IH|Bu5#3G&uXZbc#2kp6kMu z--YOso1{ZR`HhTv!H1fgiUE2nPxw5H_OJQ!u22au9jcV&ONZ)N?!UlefP@YDTESUINt&1 zN$0r6PB?qpR38(eti2C&ULJn+M%C6A^ z$STK@S;SU16kXxojgXFQa%qE-DT`98#|lu*XTk;*ubdEQzg)C{VgKMt_+}lQF}s zhQ}!lnYD+Ls&PY&>%cY)Vx=%MGs`|-PwIa*tTzv2t?0VpcT1Iwg@QXEC<)LA){G;S zFE>n-SErdfS)BtVjT)2*BrMEBd811YE;ZIj02BIXRfi)c5-LE6lsJu8P}F9NEGgRf zJG^Z|1Z@{d8XIy1$+0Z2>Si9w2!oMy`60TC+Hzj1EE`Qflr=r_AsXrE24gYN+oPI$ z$Y#Dg(FvfUgn>MhSAb|~X$nWW%sDnjv89Yjk4ij0k&ZY)sYp`cE*V}5d|-iDc^Hi& z3~^LW!W;X=L0l|P zeZcw+`jn>bSm;JQ#O7cFiHRf-;AO?Z>BAjrz1d=}Sg}qHLpo^N7ZcX&97RY8ngZ<^ zqYp@;pSj7>u25OXX#ImGOz6}duz5{5*tmw@D#Hj}(?(A8w_z}Sd04|j!?sL%Rbp5a z$0N~B2|$ay&!PK?=s5=#d!EAH%jK>GHWG z*eFQK1WVB@qsrAL8t~bfQ4d0qb?%A;P%PFz=YAE&*%_Kl($M@`09-;Age!Wnsp-m? z35SqRr=M5akOut^0XRr2s6S1Z%hKVz0ZZ3V?JayaK5TJ`%Uk zz|^KfL_9)L zc)~Bd0}!xQP6@_%@bEN>`V~{njx3f4cb5AZ+^>7JT2Hmq<@5p*S95Q{G{kM;uJk~R zAH-(tFp}P&BU2E8fysHZVZQ+M+-)Qp`*E7hl0R1s5lgi(SkYmIq(*?|EYldF=8Iq; z5Gd2vq>6mdE$8ybcL}x>8D`%)g$6PW1d>b+Tav|p+o*=5J_DSPX}@yO7b|x85W+@C zI<5FIElSjSlGUj)3hRIqMy6<)2%45BP(AW@&Y3t>1SR_8co(^XqSbe6J!Q=?e+*_4 zQsl{UWqK=C;;14tJaqFKeN~2LwN&m_ro}*;+_Cj}W2G%+COKt&d)U;#$z6N*acvk_0ENNQjA;8F1}y>)K4WC?gw4DSbUp-lYb zTXxK+o9D*E{|myKGhvYW&mAkaTMfgp;c0e)aRK%HC%h+I)k~}EoL)gLik}|>pT>_B zYVPcdEQ|4(@D0#W(W(30k)ovgM1Ty2%$G54C4qNq;a&@0{-bYRPycCRz|t2QmwV_$*|UK(L5!qDF#RcHksLf?gzKf+W9Q zprD?8PdMm%Gt4Jg5USq>ldbjKdf!&O*s6VX^})!1_Eh6NjN`slY)_zs=P!A~rE`&#U(;Qdlr3D0s-!r*F-5cy=Uw-1z9K(8~h z7spU4`k3p+6OJEs3~&wj#DPc6E_g@US`2z>w5SpktYWxnt-23L`?QsiV zv^ckJw!RRNke@6ky7&}BF!b;2(9p|~pI$0@9Z;boEeNC;)n(l7?D*bqhas)E)Dvl} zcbh*K%lul6C8<3Dz7_aL#m+*Em8O?Zr}K_bGd__Q@DQ22Mn}xd%=mrYnoL+FsjxV3 z$?%N2$Pop^OUI8BN-;o!I$LbEogUAB`?{X_6Q~4XO<_l9qEL~Yru+o|u}nPm$3WVU ziow-j?QQmjO4Jgn+YW}hzRm%nsTCe^(x8#o=F!~T+>)gZ*l~wPp%eWm$(xf9vlj=Y#)7xUAf0D`}6egTuXKX2yzv_z_gBkd?qJGz zx}=Kt5I=#T!#;S^HfUL9VY=)WY|Oio1UM)fZery9w#iP96rbxu+`WO!?9{3fGSFx2 z2J~HSwctmGe6BMGXFB`-lZJjb1PaD!Mt~IP*jWbA?H1c_a&x=$9xf)yko(U!I}Et+ zPY>{S8ofXYvC@SICPA`sr8(P5mxA9$R7qr4YJWuE24n&92bMlRfd8*V7k(m=U<(Ko9#hm~-z~`~!I!i< zb6t9nzzYQwjJ>t$``xn}Om}jnN9B0;~JmWNJhymbf7bRaGlq)osn1z z-V|-S-xt5aDmM;Uqw`4(l4y0P^R^UG0wmZ&>qFY-b&~;*eC~F=S+4?lR#!k6ZK1fG z$Kq&bV`IMvR{2x2ZER$0kyL1CHTCp*z1ccJ76Ow&uLu#BB(J}7q8P}nv1mjx1% zWgRQjN@4L#;dv^B6bzWWr6e=Pc346}0#GeiWIh8m$YHuykPZ7sHQB7mWOTFU6+~B2zW! z8*l0DXqZ+<5Q+Yz#Suqr@EOnXAp_i=D>CUQ9H!WPhMs)F5RKcvM*y7vX~M z`Bn2$i=s*_Pm2xLwfS*O94P7uzlW~d$MZdeW;>TLmmL`^MP)U`t{v(O;i!)v9{O@S zm^eQXWbwGkdxA^tjSH&Z==_#q{Vm>1{d?@9Vv(s?bRs_PE2s8vX+Dl&LY{$ZC(BijN>K z4xa~t_RmoU12mv`E?60(nY94C*Og3#ft_cKBjooJbk|>2gDR*kK-2vkShDJ5o^fV# zi)rc?vu&;X5qs`O|Ia3l`>~5;)?(e0+T*t06he93UGlah>P@F?Sd7BDrisDoqZaI! zN5YendX)2*vG%>)-X7$YbwIRwX1n(cSM};;0TZuwWox-(aoo zJ^Ot{|7-A1|!SWnBx)a=1Qq(UQn?#MllT@HcI!0j=r_L=eZ?O$euF>zclj3*( z;}2~qs(Ck&HEwYKpv)#55KW`Zv_NT{K1S*WFZna;p41PU%W)qpsNwB)3c-@|Sjl>E|?@&vOocqQXogDN86zi!Y z)$)5i!Kph0VL3mE8W!k_dSf7}-POad2BvFaIsAPRn4O+}R8Wxj`gmhwKz5%Nx*?G{ zzH`u#YHu-pPApTVT$izlnrL;MPghD>7jh|qq@k-8tWY)3JQXHI+>ku3Cras3qZihW zE;qb}P%Q5#TQ*eqZ4@+t5>6&Nz6{^7jsm~_L2wsO3@3+ry6IeVZKyH~XfF!9)!4^x zSBj2BL3Mqu`t=&T$!!9=_EqopRHt;Mfp?<~q6ISqlW5vtDAkZ2eML;${{${6nMu?l zE{R4&bN2>9LnaD|jt-8l5FQyC2!cZ<8WeoTO@Kn^lk%+c>Gr9xoLz56ua@&Z8+{Tj zFk8>Z!O#Cmwfp(;m-lHlTVSQ8=qpFSZ~qWFSLf}AAh4{d_OGm&b}=2t?QgXWXQd)d z9GacIGl99rk{h!oN2vybh0gY(ZuU)@wWg;7h(4`C^cx?DxKW)66?p9*5r8ONg$SB% zv;`kt9^xWb)|}@+n%->JFo*kH#}~0{fPT!G3YCWL$Tt2U&{kPV+k@8&R|R9STNaq6 zkv=cLlNt|?3ir3p&M_X&yqH@G@t&ypa4YB@_CA`FF0gIHvDP6TLGt1Kx$P&^=Xm4`C@!;_HU^!FpGOIQf$DN27X66|g>b0@IaTASVj+0Zv<*9GQ z<94AM39q9WE{=>Z)hx8pyY3^AJy~^dkuq)=-yG)DN4KC#-$k%|)itI-n@w*MrOZGh z5h!(1>MKWEnKpr>vZbf8?*(MExuQ5zF=I~e^OVc9GTMRon0&CC&jzyg*a4JcjoaRE z<_KRrwt)bJF!nK{`RQpn-^3}k`A*RXwT93jjP=b73wHubL2ATRWo4Xmb=Qu-+(;$p`$5_*)^E@-jy`-A zoWAv^%N6q@a`BCgjrGDn+PY0gdr@ICFNF4^K3pnU|Ku=lA~ZQEbdW4O>3|uWKw(YW zN1)<^JxL*lS7^Eqt>l1pUW2Rw#wCYltbIb^(`alS{Js_~oi~Y7cGLMMSaYzD!c=m7 zW21<7Hnh+O3qj+%#VT=d^W)xV9HF1t zQ8njDrt|s5((%2t(EJjTj~DprW2!7#Ib+4#!Yw*snmMuW>9_Z}TK1Z{uKGbyL76WW z{^EBIMCfQ?=9Qkl4}c)DqG#NbkQCCD95J`@vT{yJPm+9N!I|64xdXs%fjN>H} zfx`7;5AEC;S$7Dp+v)Y&`B$2N0hT6)g&bB^Mhk8A>jl&@#zMkPO+WR7rMP5m~ z#Q!WqG^4Q$ySr9hqNJHX2aM*_QWfS$Y;*;55g$@vaq_&r;0BfY(bEGbnVAV3JK%K4o$5-5DC*kWyEO<1)6 z1~)b~(%130KMY&&2;q4agUg|Vi${M>;-u-Uywv3S-7L;-@H7eCly52sI0w^sk4d`o zGF4JKvqMU?OhC;9V|Rqf*u%zS?rmGVxwhGxm1Kg29m72{Gt>F=z4;t^*H%kMC{wJz zmka#+?YVdM5|`W%OY%~F`f6?Qs|~#2-{;aYB?&}qNWorFuw+k;j&HMzV1*4X zVxOv^M}SF}k}uY@!gacaxdv(}@$JfhsuDmlq0m3j<^5(9I9z7+!#_6)lfkN+s;WRX zgV^2dIm52{Me3f2Lg#^A3;q|01;gYL;&FWU&$QjQY%a%>8GIb5wcoNC6{nM`X%hs} zTR>P;CvByAeywh&_zs4>Dh}mGkz;-p2%+IuVzBVQoaXx+aB`tEA3rRuV{%EAcBjBUqrA^a3;GXN>MOzRf1^Yx1D>y~*9;n;bqqO>{e zHqn`cjd89#yaN ziIDcIn@#OZ#r3Ga40FsOr{Cpi*mtsc9q;azO`i^3Y?6T!JC3g2GRXc)|4uRr)UW)>8^yW>F4yWcr#u8|7FHCs z+zrYToBp{vs%U*iP{#W+!M``~jy?IUCWcVh1t9oK`xfTf%YVApi1l7iC+m1S2{#@hc8Fw(@SAsQhZLk*YQzLZHEB7qfs zfO&jU$U;lPYAOV#bXP}<^#n7+i)eM#|HT@F0`ww3uU*FRLiKt z*Tkhbr&i~+w^Po|`{OM|sPtMl`4!}^$V{3v;>S8jQL+WBe8$yG$_lQ2n?wJ}CSDFt zv|O{IdH!kfM8-I;kk9TA*-6NO5%FKrKR*y=ElHgG@ty*~d;jEcQD+SEcYy?^Dam7y=%?aqHZZEEPKUbsU}jf#d20{`(bna4+pj zR7sq`0mamHqp6Yv)zrnV7e#2A#CO091=NxZ81(+TUcoW5$eEfsuK-2JqyIO;>}bXpDC{Fo#F%^^XaP;?ocPB5;C^w)o5rXk;l6$*Dq|F5 z(=>2zt^XGX(wGQ&A?N;NT7n)B{Ek5l#(>fDv~vSQIf){{!5;Qyuqn2{JPDg0?VEKt zl;jVO#tdG{j=A~EUD@0Gio}Vw!MQm4Mr$0zw1(8Mr(4p+^~WR^F%M#;h7UL~M-w2O zFNfJsP{88b6aZZ*WH6f2Pj3jB&A4`9ty_ji+xSno zf%M3C#%NZ6Qzt|(__%J}4pZuj{h+9zR%Vk_9$YKXX>TnDnG`G051i4~#~hjQBMdLO zmmK?PITw;4t_@P)g}A1FW^8N>#!y^*BnG1Q@0Imas(1 z$-@EO6wer+q*ht2%?6U{VLr}?sL|xGwV-f6k}}wHnWJ~jh|{JxkF1VEKlo`)zS$zI zq*J`;xl!%>qCB6RX^DAWb^fd%VF*x*{g(5%;^LFfGu$&;tu(CLAu>hGE;1w`5+js<0)kAV1eH$ zk%~8)4V3z1xrb#Ai3|26Ug}~YK~<|Iz|S?WSi(O&nzh3^sh5fcSfllk=5_Xmuq;S!TZhmMCTkURPzr)YW zJds&dj*AIwCN&7BB_#9`c+bP?w;8=MFQeT$BRo<2AD9hq*fmk4{`n1X;C?+!LMAl| zKo!g}5COjvw(z=*^HQ{V{njezP70+T7|!g6f?XqjNutmWST*W4I+B7@1=C7%bA{Gs z#7;A#_#_dulQSwhvte4q#_Tc3ZEov2JK60QU&p~7fvGetW}E|<`PCbObl;nU{`Zf$(WLVB1tTL``<$k`V^xcCpSX9c~5!hhrl5(=`ecH%xQ z1i4-gCOfbQm{HK>;IB+Z*+>Pr8Om`1DFb zWMauRrTmZO4NOVChKO!tEKRXgfgph34usu`-x93Ws7r{L+n!Zg&2K<_k1bi9XAw;2 zSC|m`HX}N-5WxDlE&=|VZlZJte+?I=+(?`Jn#=OT@PJ-6aWj+ShMpO{@VPcGier*T zy^j~Ze(JG4%*W~)@Z*zC>rL|sMDBb(wK~^`TrS{2_Hp(pYkdD(sEO!ea&n0XO>pE-EBYJN)dZN>spGlGZIs%wUkR z|G|o5JPkt0{^byW+Sk1zYoN&{CAq|iDi|zGdISjY`@GlBCRuULxyMrlkP)EeS8Efu za(pGBbQY$fzb8r|FVlf_0uc-e3I{k>x6Y$H+f4GAf^K5E48b#*^~x6mQS_HzVuBAm zp=}!J5pXb#YHW(AtLiS2D^G$*haH}eGzfZ8)layVIsm`!%ZABRfF{a02EehHFoBUL zOb9K<$ruJS{OXbg;{Nos;~>_$ZZ-ye@6eOXR~&&4g{No6BzbhWn{ zLQgc=)Jr;&h~QpcZy4_(aoq~_vOW;Dft9z5staXVmXv}(kE3crTaDr5;ofR1HZ`CV zk;O3xe=2t$%Fz{o`DA5{B(iA|gP~RM=8?-y%-vWj_a}g)3S#A(+a<5iN}c}xGM#pP z!H^kluwt!pwUVdi7>pMtkb9+^=x3HkyEwC6-s8i!nhFc_ee!B=&l~_jeV#CQR$}+T=2CS~vVQqbo zrfU}TwEt!ByO2E}_uk81uLSxox0{4GI*r7>%@)_ijPm3?B|iL_-IZrwlytsS6BvqY zZ~58(FrbtJ$4drnqOfHL2i#}5gc>~lG*Un$BXK6X9XHU`y;aRhN=ix_jE#-$i~}{g z%EwOnh`Bn@yB}x@5fQv~_%95&peF19IawZg9^uGPw;L_!WO@BN4{Uhfd5tP_B(dkE zJne+&;rCF8H>b$i^gN6;J$FF~U!MJvS9pdgx$ARz)+gEBQ<$J3l|1Tnh@2BfoRTDi z97#g!EB}tb;~Ig!rGuv2loiGbj4~498&s;-MBnZ#yFiaV zbz|k79zZAcY>5bU1Bbeu$K~x(`eTGAUJ~U%Pgx+e{6DJRfjiHr z-P&$!+ji2}w%NwEZQHgQ+jbhOF&d|_8nn5SG{$>n@8{X$`+h(>MsuZWook+R9y4$9 zzjMykdr%U}rqeG6l4bP6EVRX}zIRH$>>(A}O+7fB+U*RedAllYtN+Ho>MOQ8+dlvQjUTY7{w zUtd%25tHK&jRL2)U=+WrFHqYz`S;}8&S!48;%cn;8mfi__Ov!wTB&2cFkZWDAseI> zhLHRoP8@Ziv`@sEkl+pjU@(YrkbavZZ0!Ks1JsLYwV~0Au~#F*gQ%S?wTW16In4nv zSZmihnDm>BDs_(=Qi}u*+}1V+89rH-Qh-u>L&{;k1^-3XlW!55TX(Y7+Wri)Jwf7t z`9(JNM-h?1)-5$v_|Ak=*3?GXr5()3C}xI3uyy@iq^6q2hz>3Dte57H4`czC;O<8% z=A@P-abJ5GLN=q@^bb>e=pcZvpgtd&d5A;$$#F| z62CJLh=?2SyHCDfKVsnFX_HM)Omw0VO3itahKh^7#F$R0v5BlnBa6_)B@WV;J%0GD ztuXy;_6^udC`y>B%)WpcM8q(p(Gy~ADJw*WSiei4PBo(W1fR~nNzKNaWUs&Kg5uUc6B}b!{MnZWfE~6mY_f3P&@XgR zIk{E9}L3o9w>_FKG5c9;~@(aJ-+ zwvl(RnyD6ZF`|45)WxY<6c`n}$cmhhyu2(_T>2_8&UY|0xk!U>>@S)KO2^4I;6w2k-r!Zfs#w%ZwI3S zN!`@^UaLLZqinop;{c_jPSLv%hlQ)T67dM$e*QA5lFd%#OKw{3=iJp#(zcx=&|mPI zjUjCJgh0{Cw}nW}im;b+u_&GKmj-CL<_$h+X;U54NTgjzFBrdK>R?@zepih(HG@KQF_gg#G?Tn6=J(V`5+PKC~ z78`Sml%`iT&5$nwa_NGE(h*g`%(qsXD#C@s9;i;ffJIU_{H+7c=V6#cDYJ!@j*WZ8+G$IY(Vi7K;ykBRJz3j7jiN7u@RtcC1b^D1g%Q!>`@qC~h_)9Ux8_8hH z5wJfGRIlew1f1*c;8TlxM;N;W#UFv0f>I@TD@AxExp{VfY#G?!@D4y^U^rC3f%f9Y zsu;eRrLvkZsGWU7)G50Hk0|}7>6T7M!w|z#*xhEcms-@^%-Oh+{(WGEx}EO8fk+~}sl5BHLi`Sxe|^csiKYmVK;PadVO6FfJe+g{ke{?&WdX)g>c zh3Z$CT+2m!bF(xa6W@2jz?C0U|Mw*Z0-Sy5C8#laWC{xK0jn^p$mDIF&l@rBo__2R zQMLA#qmx(!<+5R+{rlu`Vx?^HgW(-%gbif&zh^MIbYNqAw69}|p8=Ov^|g~Qrr5NJ z7Obo_85F0Hs2XRZ4C2Om(t7j{Y*9Ds?aUV3W*GY<=nsb4sYb>X+|gbUS>Ne-prtCP z&e0!1hz?v3rYSMK_?e_pIC5ggDdKd%v+sr|2Wl`gIdo=lFx1#=y}67@fBX3J=ev-) z&$sICdNI)~I6KE&9go20T7@|I>vzJxFt)c|=de2h!g)6}nh348L6+53;!!2uO*my+ zpsiPE*K?OAb??+=nXn~D+*Z6KjQfO9m6bCZCu#E7-e*7|)^A;ER7PnH|9b&z<|4yC5V+;tyK(HZL9PMPxYO8E4e*Z2^UT6`O^J##iU4KU zES!ac0^$Za0)ILbV+oh}4OUcytbG%|r~(Mr!j9*_I5!eEq`MP>i&}2kQjOUzqN-%p zkhgI|@ZuQn4M}Di24sIj(D+0iJY&d0;y}}b^OZG?XdpHL9V zobD&ebSN%?cI-0%fAF)pyGpANfFi1G+q!?hjA(XC&3)O?-G0IL?j-vv+N!9OE+RF; zh-Vql8HkvEbR%On5v{}0jBn(kNz9?((Z2hF7+(-SAWH@`HaS_%IuB5X`3`p4EswI} zy)V{PAw&uUM-Ge&(ZZCS6Feu|4pr^p($U5X5Ff{Y%I`x26Ys2_Ebbo zX^)&C#=@Y?|Bquu^PbCgs}^=DcM2>k1q+Mt-bMr?jiAw%946KOIHRI*eEL_Ht7XT( z!;dH0!KlgaqRE5v@|)w)w;ES8V08W8OZG5D-s@kippyXD-w0NeX2{A-E;22hD8)JA z!z0$~P*_=vMs-=dh|~Hzm~cKV6Dl|f3^)BdY1ugvPZFXBX)--w3sJ$4E$oOd#WHc5LF9moJKmm6*rq%M!hm?nNe5KV?xj43LQefwh+Nbm zVJR7U$z@vA76?QmbyAz+VrH7&41$d$ABEV?smuni5@-P6cg6IZhkFOB&UUE`A8PskDpN@x%4ZyGGEw8%vrmo; z8S^L!V4|y?g&=kcB-W?}ws+T%2gzOl&9WQH_Ww?3)V0$Wpp$H$AS>e&%L@4FKU34S zPL(UI4NYre#2#`nMETFJKByE5DyS3Smv5kdmka8o>RY$xEBwTsgjh{*l|Rah$${62 z=8>UZz~hk>b(DaU`3$Q|M1vkB4qAA}YI59!pe=Cr%zZ!RY{))?3w)e1Ob~wa>J(Fq z(Y$s;!WQ$uHaGwK^CDRJq0zV*F-0wde2~eI*k7>62xke<1wykh#o!L{`DweQFd-0z z$HwX;7~1&;eIkPzJr|XV%I%WYzGB7Qk~~gGC;h}b`;#3OWrwG@pj+?q_K(wkS$E-l z3c^=VQ>P&%im85BcBA$#C7bG}uv#ytO)qG2%tVqBM+WWOX(}*1bj7$V4 zcsVfp{jWHwH`vkl$izroXZY&=64G!0eub2XSVHA=C2|B1NeX&^SsWH7Ul2&>?qj2U zB6|~!Ccr@rsK?5hMX#cWU!O2jLFlI7(R)F|3dXj6;bSa-yadRNnCa}~`+wH0)8kKC2Ih;=3gFZUByGCJ|@Zx~*7O=m*!9MIBPe=6Gmdxfdf zrixiabGMMflO73BdZd!x zc#B~W1Ts(ICNkj;FbuCL&N89iZJ8>kWr+DCMSNz>39BQa3Xhiho&!&kU_+XwiB|s^ zR~u5pJ9Ccw$L#(3huTpEb*<-RdF_Vs9^+2Y|Er7u?-*|o}!$IJbUda~d^ZLNR5@2{+7Qt{lTQvXvO zlj#}GPx9UMXcB8QvL_=9TTpatTyV0V#wyOwBO#u!l|H zB!B7l{G$KX6D0jLJ|U?s4i&UdoS>6Mmr~9R7Kwza&AL-`+Xti|YB6_tC>tN1oQoyD z=}46G#X;^v5TrpK@+O!{b+|QzZ_k!mSksmQ>Ots!oj^CWt*52`i88W<1Wb9Y7Q~M~ z_-@=3MGF&Ekz8v4BHjI7?9wc$ki7&xA`PzESNrzoWk`N$=-ORIz;8t-OHR_0p+S!| zDI=jLgFavJl8X$Up78i-9ro2F770zBRrMP))LDFwm494nEB`diF$Vbk2h;k0C^}w; z(FizKJF@Vvaq~Rtcp3Pk!1Gio?q}g;*ERiM1{cNvSq=5hrM?T@cPal~9n`)9LGm#pS449GN%Fm5rWY@1kW+ z%2XnRjEbv_HbPkb2Hi7EOrv77Z$IgLMJsMq8Xs9BwX7UMKW+s@T_D8~67W$#jnwu2VmY7|*cVWK51 z0l~kOA3saXY0TzAp2u;jDCA@=jQap7sDOmn(yDA1E+<_$nE=VS>>|3^>3OxAO8XtMhZHPR3Y$_YF=CK|ST zH7E*Az1pboubfuq_ihP_xLZZA7Q zL|PQW9A^mnNEwxb=mpxYfiSwDp)zMRnVJHKb^y&Ym!`|jFe;V(2kaeiu#9b&A{0n!3m=~50y!sK<14|vh(EZXJ* z+Ei9q+;0z-i#AWc1dr<`6$W5&o{Mxz@bC|-Pqh|0VP$!RI#)XuWOR*u70GASrfD%O zY?0{g9>)Kyu`=X>KENzTbd#Mio+8c{a$QZP5pipadofo}1{f)FTk1u3G94F{9ybnr zbK+-%QwDd<4*aG>-XmzVVLvNJR;!n5gl^X76wLoKM4SHEEg^ZuZ64hS4n~C%(M$f? z?zr=IZunlV7P)82eh&+GN!5%Pt)n$DZx>u#15a0E&Zg52WVHi8UvUZ#8(H}#uR_3U z%OuXlO~e^QQuic<#yAi|VTkPbY0LW~oWml5LS2AH>Q>8lnf{y`EXB;b48tFD( ztu4^{0nAS#?{U`Ja*|SMp;i6bY}(C}V6k{%+ZzC1vg9<>fVeyU1y{$fcw z-bWzMN!ut2WHX}ux_hUyllxDkA87{Z`D_*VySC$MwB)qa&c_mYa7|n1>=Kstsv-SV z8*E|+2?E~2HMlA}B6@6y8PKOZcjDx1IyGHD@Ij*C(FEe$k!pOEPwVo>4xT*(4W|33 znuL|gh#OB1ml$-9ZY<~3%z2EhK?ob(ltTMxiKlmpU?^$^EXig5w+Qd< z^kd{}m@sCqL4;%(EmochYq%F@rj=2UY2YMXh1+H#kL~HPSK|A|EAt~h0mFV^mL}BhS6rV_`DWnLbiU{ec z%HI}YW6&vRY-8rn*o8t?pyiWv=&E~i)cN;7Rs=9ZI;yZwK20lP&%eJb)2ViMt$^cP z+OU3qXUDH?6LSy6TLuvh=BrE=etwkgR9^SLPb3DJBJO@xQHf^W=fHy@Vf)kwhLh(? zI{b}Yp=kccqTLLxcvxgLBW`)9k+rEz{lnMD6@1Bdi3}CXE1G=|ESTaY@y;%0mr8+W zJwspWxj$i7zO!AzkFB;QaiHN1JR>6?u%Tb#PkhL%U$B6+lKf$2c!%A`i$kVS@-WE4?T&W<9H=Hz$Ao@ctg)BAPyQ<$&S>JPVCXp%(-+?#k)&)v0PFGz(z(R^L5lbk-X-ISE>SN>207 z3O(E1#ge0P!N78l7ou8Wf_DtlkX>po54_q@YKRoK(a+EZJ#d^sY-?!FjWgl+Y&4?(#jiy(ti;NuJlnFKXHK#Cg0o)@PMuYKD%3U($LCMc^y7B)Y98XF%C&LjHDt}U01G0lTns`z&aOisaP!bYZ${U%-c&^atvKu zseCxB5R&BWwK>E$zd33)9=_{U9ucIorUycEB0y-ai8Kfr(IAk)un(z2N^HOc{;7b# zp>xpiX^})w8Lh?)0o6KW=@N3$!>e<>Co0PqDyr%TgM57$(y<~F>hSyNMrV3clk_cT zgEvxyxN*gGktq22ilVHn1e*jtrrUwlG?1c}))&1hC($>Q6{2cJBJdE={IR045q(H~ zLJx}}F7Ox1%m33{#n`fg$4NBkZ)B5Lt9LFI)X4FSencmso`|W@mh~Fo(>SePMH>KB z0AmLwp`ACJqT(&Cw#M;e* zK%oSMXR!uEWuL`VKZ;?<;lBvaty2y2Uf!rBZKxu?57(vZL3cNt*Bh&S9xH8g0WJx8 z&GIDRQjr4eKRh5j>fYzXKK#a5AAaMtrwLhpG@5Pd1v-BvTm_7)5JlDUTU%~qa8Dbx@{r`UiGNqwF6Gal;-d!jueH3KoQ zaih3RP$;;W6W*c7i|LDl>b0w{`BuP1w{wCp=K|vrTIPzJ<hZuRsjeuO$$vyqsZ4P>?*GjzHKpmH7)Z82XIqP-%AR4>Gl4+MUiKNVHz) zYfkLHa$0mW-<*bO&GydE2Ov-+UP?ThDwAoVjcdF)IduN_Fo+3v2}(L3$&%TFdDDpA zIllLO{pquP@%G2FKbMn2PBLBotj%Msv1pHiD?6{_*I)}lriv8ojpHQRHeUNy)Vdk( z7}#GOhK;XVgL#GYr8qJaxN>{`T#kQE^kKhJJpep%@Zuu)51~2)= z*v_oV_2S~*sU!z_JhWh|SCf%`1d+V#!nDE==Q*oB+bf$=w<{l5eWv3t@vnApq1;!k zcsdw1KO&LzMo=SV-b7!pPVFdn#>%uVv6xW%`FsD5QcVIzu|l!RdVXsmT!e}jN(ifQ zKjJe)k6e^&ufr~ZNOpCpU#2rBDJaf*W|2SthA*Zci$1EC8%kG|N5uov>lMtT$bvRo8i(<_2-sk0<)6M(e|LP<0v-@>z{8#}6n zIn(9nZZpQ+t-OaIe9rooHPW;@M#Lqx4Ah<1`^hkG^~TiunvvY%2aKd85SVm)zgRP- zDEY!gDxJ>6M8Q-lDE*iv&kJ6gm#isMfvJ%}&t3fu4$V3$@pF3GVapmgMd^DN73eKi3T|xH8uuwgtaM}z6z_uROi~S zLbT&thi+?)r*A%IRT%OAzMi+BZpV@-_dWmmTMrJ{tN-Vezz!3EEeRC?XYLywgXXt; z9qAtIuA&Q=YNFP$cv(mcfQ%gJ6U75J_>?M{)J2p$(7l1-wA$6VAD!v2>(1E>xuUo0 zvisF*?=>k>05Wocd)w>aqpzF;0(R^h0|VTH zKanOmOWubtxX1q+P3nuZ6)vXGZV<#NPd5a?ypU{MBF%Quh505EzsvKQf7vTb=foxa zrERJ5_&Q%{o_X_~FWs)ybQS$gd1<>qL{k^#*7jl)mF|m^j=nfQM(#S)~8e*v(NR%^Fw1I4gLTp!2<{ib<&^x8HniR5(4*$ zNYiYZTmzMM!^hoqehM%Xq3&Xoc5_XGl+)_=~NJo9i9= zWXZ028@~=g!0Wl;ffwpzC2^SCQmspUeW5t(4&qT3k|g&7EuHdCw0LkR;l74kwxffo zJdLP`8)~)vOUzTS$pEaAeK2iek4x~lK3;(-rWEuO^EA;v$-5U8L;l&c6!a~id}KPy z`yXH$^6Wdrral39^~VnOuMfsk9d`9dbmLCqAso|lW$H~j-!l#*c{EcIV&0FPpb(Kc zdT>vpz{&R1EVy3!&KopIDYbt#iF^!VG4vm7rC=2y2xKr~sHQNPFp|`Ca5Aw%aI(Kq zkKZ%34+3wp_fjDpZ`fTpNkwR>D8|-kxfZ>}qx$ve!2Y_r>|z|mP$!Q_yxI2$Z<9@s zWi4>JNlduMD9gZ(C@VoU1$&tZ)bkxt2#Tme*GTxSz@$LOs1k;?)mu408a=W$xO~(l z{<-&8W@sZ!TI!~oDdA`5xUw+~%8a_*_F4i`A+_v_50*kV*(K3tJZyt23E(yyP+CXR z{tj6rn~sTbkn-@q=9lmfo@gQBgnmQqo#auGJ&3%6WGzM1#E!bNRXeY-R0C}$Eq8*AC)u&!PS zVdF5oj*4a}e@G})tXs@#LBhgoym`Y55l|`K%~uXuK3j!8uoX+2_hNGc3Ro+|9!diM zXFgXl?=p&|dDTp(;Knu*rNNGwPR_-RM3Wk9=4QD6DN2MPnj0)C0e5<8+_rQIf)Z|j z63s-g^eF(xaX=`qm(gP3V4Fu}@fIQPV(ga8zzAGL3HjL8;mLMIDx+J`>7B zLt*%}4B`zJ*n52K@>!P4X-86<@tO(2LXZoU5E=!gxMUhMq)n64o?7>;C86(15&k5$WA zcO)TYnzx6z{<*!Qhlm_C#+rWg`Xb}m!TYv^WM<(@TwdR~{@n@?W-MgjP0y!#8n66xYIT`*(&8uMggdg{zSfA&%7+jlQ z79)>Xde?=)PG$DgVuRYv865Wbag?&fNPOa*WPEa2@pZ-~5Y$x;5Dc!s<5=zuqnns> zN><%eMmI(lqZM?2=!Y;vb~w&EUqAg5&%$lb&-tZs!z{*hs!TKLJT~=LuxTgBw%5+r)8!3UE@5)wdFR4A6ta0HXhL}@ zL<^cbz>{W!$nSp@N)m=iI4SvW1S>Y}ulHYH7NL2^z+%38x6i-QiL88L+hhKb+1izRJ0G7#7 z3S~4}nIdOOXUXq%WatmVHu2+VXx?#(H|vlgs$WPc2kwapY!g z|DT&DELB6CC;SsTRSO@K^aM(vbg+}{*t&5vDSD|BnkX;bd`d8+)JdA8B($PEAnRqb zZiBQjapyWDp_itlrC|GwV2L(N>Q-+ePeaDI_do3V_$+l`WGfpNBDYI(Avuzef!Ilq z8zqTCPlj*q5}sFDe+FNyQ7Jc4#y5OFZw;}YE27D|n9VNgIeHBfaJ*BrAg9AuPfwF1 ziwiMsY7bRded!vKqS$nT{+A;E`4JeGV@ZG2i!vQF3EW;-3U7Wdk;&@vR+2W0VA5^h z^1GSA?R^b^@dM~}!)BNYz5M9%&vNms#QCKUZcz^LV5@5(r%p*AlP#)A!n4^#%E{gVXCciJ1+!9TU=8{qH zQ=qbqpRV$6gA%ZVRgfCHUj2>CYB4OWh?1H)9+K;E zaeG%tguTcl2V5601is95^@5XtE#-_S$tsv7y$f9}!4ALOQ#Yed+JmaRE&fQRp<6S4 zR+F?#Lq!`~=ldHUqam4YvCy!(RPc72|L60@vK)%<+AuDsXKkI%cboEqXbo~7p_ zL3XHOB8Xw4)UOJW@WtfcZ3v*%4?p)@Tmu?7?5khg!^AXekF33L&OlbpfF|Oqf$ycB zbx0efRIsZpd)ZL}jHCqP3FdFNIa^b4R~N+fPgr=+Udg5(RK4?M3yzOYDg6XI4kCE=v_Gr7c&ViH!R``&s!rv<;+S`bD{m>04T9udGH(|;8PC*aGE_7dCR zq@ZCc3xFhI%GMvx7m~O=_>kfgDMMwt)bzH{ugSmnS|E`zt88$FA^YDZ~nN856 zekKJlj356&@Y6+qkPdI?t?n~cmx#c+wq)2TjwNNW#nBl-*ThCCsnBuMV&|dkqSE z-{XXsru4I|{IG7KkX}Cin6t&w!p!Rd@jVYbO+y}n+Jm-+nLZVjOYAg^ zLbqh64NQ1+7_ROUZn%73{2WQI!$dJ9$U}JS@Ff@~{>(VXF*jJVV1(+-`EYZ+-JWzq z;Q3?1JZ3a#A@?NGqwEPVFB>oEU`=CQ79C7?i=*(~0tJmIWR`uDU>S&!!c_?v z*-71m8F!;h6Xmco2aCGh9TXjRz#0Ni%)-k)9P@~g%wpz@a~?h?6*t{c8a+GcvIO`z zFiT#?HFZRnvN6Y_x3j~ujr6GWQEW>(wQ(k-8Mmyl)gqm!{}C4HQJZD0k+gcG&grhC zUhGWA1~?#gWzZ4pyQ#gv7aQ;>R<6h2KWhnQF^@`1l8lHobqiv=17GM!7nwwjtco0phy?vqiXf%i|@)LuX7)rS{(Ylwn5Kf9N2;? zOkBcQ1#6oZ%Aa#4-$bfIsp*x-ru1k;5$p?FPI^!wv=p^MezPYqu||As;8{mDD73rH z==kZ^6F2fCo{kc~l+(6o9fW-`JQ$uBFe2xxR~C@*0npxh?#t=)^y9(6=~QviU#4bJ z*j7>AI(vx;w{QxoF}yiT`FYWH>_%05ZFPMHGy&+vV>v#z+fDG#%T0uH$TY(U?1J?% zp9>iYWXN zE**~9noMX0sv;{N)dBjyH18P)K_gNRQfkf9NV?s%Vt%j|jP#g<&m-Tv$WfO^6o=f@ zK2f2`Q~t{eG8en6uKGFJ7OH=hVvQ+EESZQXwR2!&W>gJYZ#>EO1%7b-rzKZ|72x|A z9NORMZQ=&#OA)>9S9^~+DgU4d*prKPX=6T-zNbk5LCCp-d>t%%_NKX&c`xYGIlNm~|zgRvbbeUvBFlpL&gMujxRIcK6abSty?ITvpysY-ls`a4#skgRI_BCJ|R z9V@r@4){vRDdYC@Q;cXthUvHR!Se^PWg@f&0EcVA-yqh>ikBYvEMoupEc9ZYkee|n z)yBsf2>MrK|9=1G7q7nsyg0{}2KVPI0%eUDjVgF;=rxm3u!qVBpfMhVwq13!%@ugj zjEMuFHi=f4yc+^q*Me5lCJtwF_DdH11%rsqg+hX3;X)kAZBGNkyp#O;#h^IFJ4H&r&Lykh56n}wVRh8>(%lzo*1vV;>8~n6vs7MD-lZLxp-i8T|i3fhw6pLL9fI7IyLg? z0~RONEZO=4;CXNOc4R1*v%3jli9)fN;thnwx}LwCe-6T(iHfv zp&bPCGeh!xSN-OD2+|xUZf36J;&jXoz-V3HSrGVV7Zw zBGKQ7sJ+XGad67slV9AD|JF}re;!{8;-L42^#4WzRVsfgn~#Ew3*|z>dkNrsDw6H0 z+I-FnLA2z!8c)S8wKOcFAV6pe122l3ewyAr#-qhr?1X~=gEJR1Q^IE%2ADXR+Cse6vl=N&chOL(R96fnEZ>Shnq@f z)DZ4QGKEc7PMtCpY_T40hI--f5Hf-_=fHdO=7g*ICriWSWt7<^9v{`7@w!!zxspIp zf%}T~0fAvFM^=?lqsh|7WHO6Lr%PDd=#bu77dJXLI!jLDtSVZ(^y4rVCvx9E3V-D$ z3@NTuNwf-c!c9y0Y4KlnQ zIu!VlVFNt47jnO0(U;7j;J6GD?9^eaF4jyF)8k}*-eUeUlBQbpKFJ$zEyDBeds2Ip;}gqApJqM#LYq|hir=)2g+iHWGZZKkiCzGMO*`Y>M_5?b zI2(e<62SG`v6VwIl$oFIN0-u$v8J?@v1fGa@l4BZqs(rw`0;L^8{Dg%#xJTD?RZXM zBO|6-h!*}wvcppLTg2Fda>i={uibiz+%Yq_8)h;_FXX@~6qzq7R9CBa3-~v&wthMo`l3Y@8}n40 z=J)p?N*3kyo05rY)wg5qTXwb3b$*x<$rfHE2-b1d+Cl0Fh}O|qoH|rw$=2;$*j&ob zy!3+tLe#sc0?*oWZ-3vP{t6*QpjmQ}qiA(o2=DP5Z!MSP)S@(^g{Y75K&I0)exG(l ze;DBz=Spn$wkp!#)#7?}B$O7n)WxOrLO+VcM zDQQ4e^nl+D1lyksGQLieoti^$UCImNfJj9o3>hkp&aD+VpOClm3v{J~E4Z*h>zGV} zYt7_3ArR8VH{8mO4di7YbBt}R!cN#%;ha5v&xKCB3_87Z@Z^t=Ev_IXRc@~%GiP=s ziE(baNyF)qFkq8`fRfFMK+G1N=2eX^(}ns^_PS$VvlX(t=C_zSiR8<^H}_t z7FRQh4(I}pif4Dm(5!9!vr2H7b?jEXAc{}M(EUIu=+bFj9&;S|K-HoHl@=->>-|U9 zYv{d}^WfM^;gQ#=!GDF@Xg8h(<^QX-OqQ?b1;I)&LlQEXbiVsi9GSPXCIh+WZgsm8(O)h}7gA*=Vh5 z^gQcx{JwP)cDMe!x%ZbjS0)lT8~afEcC!q|qVui}Z*(a{7BhvUajWk&g11E|mkk&0 zg58QgZ{kDRGAz-IziaQ_`k95~B5anz3X^%O9Z&mt*3%#=So_4k;o0Nz6G+=eXkWd- zAOYW`kQ#jXn}qE)WE20VI(&&(=;J|-T?qfGn{bsgXgtx$A|wjk=A(IvNwqO~0wq0d z9W^$55q?MC&CTmN^)qN^<)KV^3!Oq}OBH^#bZ)nA+WK2YE^JIU#;JUX6j2;++_E`# z)=0PmUl6oLF43e3Srtg$0An5){FS6)@Ct5&7t56{(h_Q;aZ zs9Cqh`{$z}1jY)BZHhnTXmz74br)_orr{1Yq}<9S9@cjqdkNLA#nfFN~= zr7T6uQ}(T78n0MP;93YLvUhk;Nw^LVQdWEfqu-n zT2Deh|7lS!s7oR1{&@OZ##tdEiEBIp0uRAK7FIMYqFrUK?VeprnzkkZ_CapxTL^0N z@+eR0(t)d8l`r5Xt&-hNMngVwE7)}qC-|{;RIQ&cqcU=@@+0DY2&X2w<+B`d_b0VW z=f+h3{7RoEmV4aFuSqRa#+@!o85uHhCfP2{BDx_uR$QeAUIwx;j(!IOSxsbM&#Aog zJotj^>P5e>A`|gpHbgF4=Ytv4ZOr0Qll%u45?Dpk5gV z$~==SKQbWk`F=mFH(_KF-3n+m*4Awu%s@AKSaIynKR1pfikN2C^guem`+{_8RPAn}y^uxz zRYGN8>)9H|=xLqrD4^f@lf%VfsxmSYhB&0DRc^6y7SdL$v*`uaVcEctp{YGG!eS)(dWeFO~E;>9U9g-&dYB6`17hF$cHIt}aIt1_T@q|<6h z0yXjVsL9aDBHqivU&I(T!{JDHTs&y*RA-7%#nsDFoneQjOUd7KSO=Y1H0A~WI?n+i zmOR0pI7euXnpu*b+OnnRi%yDz&F>VaNvbLgGAOzUS(cKx5*s6L6$k0^1E)uTIH%N4 zD|&PY`*jo^=O~gxM)h;07X2SbY4@?*LcKl*VU~-FxW&xZkHBxjzkS!CAJe2B?|f(Z z)WWb|TA!u|{N;JcZRB`|W0eW_L)K zoQKv0XdOmA7@m*0GDQh&`L#2BwXzBR8#@g0$dEF&n>sGqc5;FW{opH60Jv%}QVm5` zXvK^b``Up-FpE9LfR9}mmQG$l6;3ju1gVeBY!qtpni6CoyW1wPmHR!E)eaVTomwEF zkNW&6Q=~IHUxBUpDLJxW(|%Q-RXP|8>8|?s+(1Hkuso)WKO#Qej*0%w$8i0ASMqmM ziCT}-+`<$KZFd0Okz5EkYG%@@FJ{Z8RkBT3cgXgIS2?biieSfQ+z9iKZKZ&;^+GQq z;WI?rIBvJ7U)xbSd9P+`5OY7!(Dh`8CwLIY{`GGSaop$QL9SWY!Y}+kovj?6m45=p z3PU0t8I=h!R4{~sU!3)Qr$w)40g>OsgH5akSi*Aut#Lt2eeRr>z`yk($g$j)Ms)x@ zez+SX?PexF#!v~oj@r-)#-trH zxPq1V9iBs2g~KW~L}nwD%cDu5n)a_R3L2NQ;@qW0O1#$-6boUrX+F&f(&o|==KWI% zStgPYcN<-v9W;$e(GICx1-^f}%Di>kaTZf==|_997;70cDjH`m`>sM|vyYayY! zP<4bJ22t+*HqSz@7tMHC_b#Z~W}A1TtOJ|NzGwZkLyC}pQ$1Mscp37Yl*sl=WRNC7 z6@0$?@`g|79HkUZp{t_C@h#@Jk-{&=_mZ!z6@p0X;=I*%{=L6FGJV9+90&pS{Sw0|@=nxZ5{Lc5%mCfSN%tTjS5TctFsrIr|S75ddtk2sV6ah%%SS*Pi<|C@W?F9Tp!&7f(mh;E{K5lFjt01i{kwlI>#Sy&Q^}?r!MqL9 z_GpVTyXZUsIA4n3%k(!jA*KQ}hv0LE85<5Sj-c=JWlehDP0p~$@8B53tpmy>@e&+R zZRdWni4U7Rl%DA|mc9~9!Bkc)iQygopq464;GG)J=F9 zV+PaZg@qE=8c1iM@;))B(s9GVV+`TVd@3lEgj+{P;9P0!(3?&theB&6%_%(zcT=`j zF*;Bpnu_~mSMdEGdCr`d9I4M~#heA7z!e%@IfCJWmY2vYXI$xP-i2|LBn_%=_LO8t zp#d!Hze>CgqpS_hm6+S8ar^+Sp93BUVqDt+LBYvKUZ`eUfNe&-_ZEin8Q}b<^j3V< z8+IOw!I!?mN~njkAhK4;af$~~C5<^-K;s_>M3v4jxQ1%EjW|B^#*q#&`DPXMZxn3_ z)Nm2DOkz97Bqr})>`(YydwxIH$HW?^%ciIzPdXiFp^9CGU2vAnc_SLfg=4iG92GKAZ|26$h^K$S z>S28>#e+n*OR?hqgniQ`FT4orXk8)M*w*v)b~IRFIkZa<5;ZRR-smbs{6FToGgh&S zNvlF;9x<&NtoZK&dM~3WfmK^UiYMxGE#7SW~3Q8JrIqy0;f@M$=)cp*(p2#eS z((dg90`LvEb^LbUrG7RcO{RH;C-LtQihP| zyLI5CI?<%u2Nw-WhW{Y|SnvDSf77!V!Sx!A-pt#E3Yg1Z|vC9!ggh+mpYUAk+X86PC={;Z@I=wySF4* zcjn0sQ}7iHOybUcdy{&OE`T@Tb}s!FGhpM1oias_FmQ3*C=g3~7ciCyja1l+zTe3n zuF-}?23SB2G%QhgJ!QyIrLw^p7_{51vl${JwUkQ}xu?;WG=qscrCVefbv+*EMq>Q~ zzH44I7-#$MfQ}x?xAr2`V;aX=p!=9FlJOjxjyEpHMG!QC0WzOlLSuYt9@fr0pAhJy zDmwGO+de5f$v@||3Pn1zxe}mW8Hi0eXR@7XeLu6C#~KG#iTLE z?^4oIjN)sbYif+4r0DT{{I(L|H9PU|I--M-(f+ZG!0d`hCDBVId8j?g0u}Zr+&@C7 zlQZw+aYqK22jp@EK|skE}$F zHRC<4Ndi+Rl)rrQu@>;r>?w_W|2S@g3I;$$mp|izh}&I!c!7dmCe+?i$XLkWa8U~s zU?_?Nr!O^RV-5e0aqojTz9OUrjF}w2um~iatl4#DU;Z`oTa>g{*C9@q3PYV*V~<@B z{JvA^hcLHLFzW++3*#wUjiBhg=`Dr>6^3N{F;&oXovodh39vLxaXHGI)vv9*Vk`RAMlLML0p? z&)WdVWu*zH?_fPI`&nvbMX&(g=$t| z-zY@%U@ioMX%u0cCG`vnj%4&dXJb1pW-=2~y&KU#g__%t%$ydYN6Uz)!WT!Pgz(4R z4RUe>2obE+SSJ2;yfF$G0MSUo8dCyHW6x+{M`6vHS}nVF0{+JBKIcl5U!#J9TmDu| zbXv2Q?hV>OBhm1^ueId?8-~)NcnK!k=8`$7=XjuhRY;Hx2a`^V9W{=URd0X(zOAWM zHIpthrpaG%n6u&M@d~E(ApX&;%w@!}c9WqAbPge=+FBa_W8Efl5S+{gV_XCLp2%2C zKS@~}5`R5&l=fTaluk>xk^-hC_r1)2E#w8P|BQEKcx+8OPMS4Xuh*?@@lGo(ciad% zG4#5!_5tFlT1OU;7aTM>N9^3r)*dy`l%wBbTvQI_teD_!Gk0X0g1}JS|IzE@p1!a2 zDCf@eYFlJM12g+BjL}y!nz+>`>Vy+!xe2ptf~v`B@P?aZ1+m#-$|L0C{_KjeR$Zh2 zw#X?I*9_9IbFh|7k7G0K>P#D_jzr69{QW7uQNnj*RRG5PDytRcAADz%m6P{PmO9cX zg|nEDMvR0`^!4YqaC@l8?@ATYg+DsoEydz1 z89gnJP@O&?Q4V`w_{cOJD3j??Rz_5AWOFeSj zha?v0BgT-u5-%aH$vXH1L1qT%50Tq|b({5KO)8R2KQLR> zL{4e@4Te60E`5UA9vjxTr0c;(oL8@#{Wq(pkytYQI3u`b__=D1)LScI)eUeEU+aR40#YbmtbbWhXGG~f%hXl@9+Hcl|lwcjHo2({@S)&#)Zcf z9OEYsulFG=4@XJDA<8F^9xT_~<5fhz%#<3h**8)h!7zd*3u zsqH^blOFnwAW;qrkQEIOGI-ZlMoTz;PTZ$KwjKaw9mY_(yes<_nA`>&` zb`M2jy}9v7f5OhYh2iTDKk)LWn`K{!B?d1$#i9tIf-H&>H(LEuk#0c~pYc`IXT2?g z6RW;dj({>P#eCRk9y;gunN0BOW!LVI&2yc(kDQ7rUxKCoCx`&&eV@BTr4>Tl=758L z0@s9)0Yb@CsGkv~@h+ay(9+^d*qc2j9ondHjIFshn*SM>##-xIvm0PoTO$b{FZgo= zN>(ne<*pe0LX)0A@qIX+_Fa3UBtDqYe``wsw-|Ke!MDcAjJ*dnJ6j~Ke z;s(%aO3RaH?6TVC_*^D>z$jWh#vQ5-+AJnTmR6EtRkQyF!3&{z$iO2Sc${g2XRDfa z>GBZ%3k|;r^YC7bmdb?al*l#2P^z_4j+p8!srS!knBHGBY~FHZ$ypMz)}>Rr&#XV# z;(~UhBjFJ7Lru*N<{Iza{-pBULYs7-S5=?CNDO1L=EiV%>dpKpRxVHiRr3x*T8%0%|q~~W8RHfGmv13etg03nA+$#U4D(Yap%{guwU7E zgf4rpn0zUApk|_HAQ&YQ-JF=8PCXsl_PyD9q85*QnqW4U4vAN}PEenZGe`b~>~<$|!3+ej(J z-5sInXdZCRS(X(B{8Hh}EKK*8x&8CuV^xlv%3+#d8Rhe1y<+yHvg3O*ax0HTLsn#$ zpxC-*wkoqvG3wo`U=b2BGBS_=504iXWdzZN$fSGEWGWNGqfY~j*q`Pa0;=mv|11PT ziXS>k8<#-lN>pGym;FH35qaL{iW&V)ZRL8HnV3jSe}4dz{(y!6P8e@@Ect^e z3hx93ETZ#_ogwxNY6-GXk|ncT8T(RwdwkZ*Iz$VTgJFq4#lg*;#IG{God)fS&&Z=o z%T!ap@38}RZ>aX>YDtI4oXX?cdrh0FH_s?!(n4(5KgUT`OM)iXRHMUQ{YNbTR_gth z?Ww5q8>1Q)e}GJrzx?(+a$144r-Y(navU~rBB@BA1}9agDp|9HTtgQsZr>))JA6eE znWA8KkQ^-VNio>nlOvE)Sx)+K_2Vy@qg%s|Pc#hAfhW-8-a)@17vB*x;xju4bN37b z-+oV3X%}ABN4|gqVf6|-!IcGxFgP$!7PL%H0tg^BM|R_Upn|XMmk~gS8+-~u7uEIn zrl2j&*07ECU4CSGDAVix`9#{l1`J%?6$%6DKDd?qeAP~ zhJAId+maX|*Uwn+A9=s%(sH+L`&@`GsBgLNdY`P*Cq(A|OWKslSdG&02gjR-z^!>5 zg_+`3wDCQNKW_W7?Z5`KI12<)ab~_ywh9uhgzXeuLM|Bh+^KRW6~w4apVu*YJJ?46 zb3{L0e=X6-(W6ia;0ZmvCdeORV@Li`JSd~B)}X9j0yULf&UX}M|E0wH;DupFw$2ZD zAw+dfEYT|fXj?m;pQS_%6`ojS-tX&Lg1N3GWQa`?o3Y&0#oYI&^d76_t>7mtll}R} zbI#fq!N`Tfod!E6{cPOJ=qjZnrv6)cUCJSsZtK2aX;caD?I zRE@G%Q(awMh=GO%_U4B*MK~BS zN=?-mEcFxkKlX#*E}WksVkj`MJi$hGuUyPl7HJ8pb<&Mce1)t)q z5e-TsAjEmqJ#bMOZXOHA6imlzLw8S>Rt$^F&L;%sBj-80Se0v~L?J9;yLn~-C&)PC zLGaZ-Zdki-#o=a-?-@F5Pl!z zewL6wE?!P?_P?lYUnkAj0V&(yTK~*6`lIM?5#F}pER@vv|0)+n?&)DcMdOk1%=1G2 z*2Et%jeiDm;^4`Q*M++j4)LgT@6HTTd^IWwDCUK}Z?soH?XcG+mL_pl%xtld(`yQ; z0a84&#r!KrmpcS(xGz$~zdi%cdepuT8xpxiskDU{#5ishHkQ#_VkHPN=Hlku|Kp)- zmXI(oYE%ja=}GC*{^g=;B2tKlh&W8i018^0L2Q%>BID4k{yTtIZLUI%o!XCv<0$i+ ze!Ni-TpW{xqQsYy&V`Jqk355&`pdqDTdh7VSYG#&zn#iR-p)r77hb{tvdn6gzSkl3!aYlr%|{>mFg2p;QY{)Uq^HX zwu_A+&C$7pWGEh{KWQOfWfbFCeK4LXya9QWziB>at#nJaH$wl8DK*SfJvIF~c)(?=qEN3gu)J5&re2u3XYc7k>j|TRId?38{l1YKo5Piji&H z+O&1CB*-w>sN$V076emMO;CEZAR{!bm=C)^T%8&d)^+e=nffjn17=!m7(K|}LD!b? zeOZu%H5#-d?e);NA5Gj6oi)phdzT5cf-^WB{ez2uwFf;pbnSGynfN4wJ|_tddk@gy zTiy8Fo15z>w|@BjP5cQkKHk9ew9gG?-;_?TXL7E4o+6`A4AhtfWW$$&b^FVN11!-3 zm-WMzU-PZ1zc2zCvl4{wWe9_YdXCzTC(B?1GHw0-J#YnNpl?$E~!CXM^AeJHBI2L5Ko_ zTsiFAn4ZRPg4#)hg)>==QRxe28uT@A+=2~HT79C|6Pt%1PZv8JWK@voyP@9L6{NzZ>v zbrq)-fVcy8soR6V-Dj#y!K7oBhqN}5mV!zBqs&V=V?La|)5=Os4 z?a@g7;*yX8ArS$K#k^y~(3>~>-EUmftc|Iz!#vb)ouz$qz^{!qq|*A}$KUG(j3>3< z>|ATdArQr-GqVLH-qIDR2;R&*dKz zu(IdqbQdnui6})Di~7q}m70KR!={G3T8k+K6Jz}1au0zenCM@sL8LkU*;Lt{h1Z3r zKLW)tD6uGa$$yv3(x*`wVn5NGK~GcDE#$N;Idd(;X;pl?{Ff(sGYm|6z#g&*fgnlV zrw9@^*-R*X+NaWeEb2K|4;-1lCc*B8K%+{C9K$;9{E5b&KZP$8k37%yP5!R0m^OPa zKNmFF_)CcNSFDO1D6=Cxg9gS?v2-H+?@Zos%iSpGz$N^j3a+xo633FSAM`-jyI@*)@gZPnvU(glJ(AhjXy%b%&k2CPis}R%NBuPBM4(n>- zL3r|RxI6mJf~W({Vh>uIl9JlkZgR*q`d<^WHaW4;0NodLU9X6bY)bcJe8Gp(_p@B6 zqf=ahND;?G*DrWph>>F~RH-=jT)VR?9zpyP`vRH9z6js+%}nl1EvVE}_~jZ*ECR+n zLb)srkMdR$wZcQ6mZBMs3P89edLNw}7gEg`90*Hd>ASn(zGC*$B7^9sh(n|g9)vM7 z?~uwA*odU!*N$(u88!JG0%m!?{-~?~uxGgL9d-6IJiw0#oyeI6wmF5hF8!eJ|D^+sgqe0?;$y6?53wz;)s{(;*(D_+4f$iT_jPg^a8joa>qj7y z-`$^VjhW&Dw(y5TFt6Vr9%?srLrsxv{?H7#p7u&g3tMCQO(I1daqKJ zgm$A?Y((B^iGgO}(DZ6F+Qo{tvrxzJ7nIi}fKq=ZwxGkyj&cgOpX`52H4^d1xD17A z<=3_XDCNQV($9d|8CN8=l9C|Y1{m^|Efv@P!caoY0L_rNk;=%5KHgU>ln-2)q4e}2 z_56Y9aWzZXv`cy=nBk|Vr-};w{e5be&+@CKoR&B?II1bVzF>qFiI~r(pW`4zohu~& zAMr$5S1Z3p+3+O!JF`+U0brzXx`}WVQSk?Ct9CoWi9rW)34Cg9xqed)&hBx&Zv3!E z)4;1O5GoJ1W%CzM(vN5-%hC*epmQ&xR$t{9u6@m#`1;yy_x7qj@a^JDWaW&;7E5V*H_jLYrB%_NPth9{8 zTXu7{F6!US(Pccnr$C!JbvMC`*BxBTm@&qZg+n@%^Jm3ejakE6+ESQX*3vyWz)(9= zVI*mn30*SNO!g;qOthT8YEE=eIEAhRYE0ftwc@Y(bYEu}Jn58wve~l6((U79mAWEo zqiPF7Luk>$*wyQ|%HtCexuZ(sK3p~bA`}`q&J=zv)3#K5QAy(@I;?B-z^vdyaGAjQ z_9G@T>5~gxnC>w36=V9vRzC5f5pYC5bF3;~^{#}w*?wKqbId)2x(_KM%koYx& z!4G zh|#qoX)WYM-YBVF7-m%stnZ=-?`Z1)v}!Yq)-iGT;pwY(YdP z>y&*v+2KGS*;MM(&KqRiih=sNH$R=rhfbDU?7qQlhy)pDC2*XQJ*R*S@>4K=?k~!; zB=w1sTaO(@$|w7o%*+|dyw3enQMoAH&+UgqmWB_D!I_l1S$SMu%qcQMGb!H)GZ?!> zT*cCL^P`KCqKlp?meX|%3KiBAKw=KyLUxkTJh$g$tpaTb^qv;QWQ)Efx(Hj+O0Gm({hQ z-fmJaul%3s41th86_L7_Y)Ho?qQsF!+0}ABEasVN#bq0SYg1A0FNr_Uz{dln^+i2k za>h9flW!jg9%_>K4aQ20V-7SjUr`ql*;)wKm?jnPXgS95$k_?+&H%2LGD(2rv7eu3KHhOp3`EiSu%6K;+23a^*{Jkn=+yCLH70o zZP=mSF-inVJwmN;B+jVR2%%J+1pg=kx{sa71nOXq^w{?I<76X>Jb98byT0YT_PzXZ zO=?GZH~nDfNi-3v0WOJF^Lqp&Ww`I*m1RmhW;4<}Nc;P$lgt|w5s7QNZ+JSJu_nC^ zRSXj{@MEtexO@n$f5E)t2t9Mvcds%PQ-TzMa42O-6PL;j%t*>l_^i8hXxt^yr1(da zxD;yui{YC%azSGdD0T2GU3iWLJj&61pSpB%0hGcz_r7^^bZTgV3VSi zkV#k2cu?EHNra&*O9&Jdx}%V7R?`rH>pmB?n2Pq?GXrW&>Mo77Hnc^3Eb1yBdd{qp zalU~#U>{S|1#y_q_V{DDy>}zoHM(XV+F*q1|9X>BGj9G#3O!Bj(bQn5OQH&@v;U83 zO#3WIX06LEUrLYs z4^5@glU%sX@Xe)$hM;j7x~TK?KO9h>(Cn3qTPTPNWQSTeQ6p`KwH{ zw$So0PT+oSV{SO4#NDd=bQ*3H&*7GffEEpPFP5u{5)T-NjXFu;+xdi!0F^q!Y~0;} z5?3rMvPHE&6NNpf-j7g;!^22=zA77OCC;tzbBV7FMnREvXBJs35D|kHE?PsxV#K}Z zzQV(-mxeo1!f~$P)zH4?4$1p18b-V&OcVR3vVYna%K^l`;u0^50kxgNaNfEo$qpFj0qXx%2W9-BJNg0yj9xA2GVcZ{gV-tM1e?a&jzTGxM1cUtk z_F%W*O2ypLm(WiKf&GsOf9sE20=WI!oKuW?+hR$|rur1rT3~n&4p4Ra>jU3`fX^NN zmD8pTnk~0m{F7Iza|#w`Xo{H48A30S*j`PsHhKU?1)R8-cidHTiY{hopnMNTiukVM z9rI+KLCVxuJ$_2YJ0(|cB}&^KG5!XF91=RA0oZt3r*?`y=r%JPy(95N;5IK7CtjVy zx|sZ>uA=R%GKN6tEA@=`X-<$tfgucn!3_>gc~nb_KqCo9gNq}a@NcoBGcXcs*9O^L zWUuTZZF=-Y@{@XaVJuFo^kard6J$i4P-NJ}0LDvA_ghG!K+pOuI$!r*_=VG}KyLZ& zHdrZUA8MaXM{@h81#E;K*sY8ip{qwm#e3L_X~|0aWqh9oVkKHU$rddYmc^Y5D@Z;Y zU@RVXHIE%97aA5n(p~AUvdT$h16s2n5quKM6BGsDYCz1NJ5_e05*y=+{cks0>tKxx zRB-9Wt)@QurC>1FfYSFGlj}0^bJ%s$V zfS)o34lJ@GIlAE~7X1AoP7O_H&^KO?8U|7}dG;x%bUoao15db0~+k=P(&5 zleGz+#(a@pe-If@na_=C58ECmK7J25wNp!~md9eaQ;=^1>^W_|Kj?udV{(!hg1-FY zsz~k(OV{7yzqonBZy4+VFiE|iCA-)3hZKQZ@-5FCO+?r!w$Ma`ld(BtDMl& z6uTuHV{CO*Q8em9lYUwky_GAKfl!999A3xQ@tA;_{?2hoRqJOV1hFPTMe-h#&h%*D z`(K>yAg+E3)_QSiN@o#zCNl?on**ge<|%ZmQN|%^(#Jcnae^8|xm3t`gZ@<25pHNZD45`cN7(a;0G|gVJ1|@?9_NaRXlM>` zBWSzLnP6#M0K8|@ef%veOZ#6u*i`)e7aPqWt72rkSc6{LvFhKf!50XLzOWJkr(L7GAM964ni z7_LF9qzT0UZJOj07lf|VnrUFy}_fbUq;6=G)&yMyJn0(^vx61t1nS=P1rwgpK=sGStK5%kR zE0LY8`!;xRV9$KpZIY!=oJKM#=9^!>Az`;5hr<1orbsaP>mVJ6@hzmEt?~l8;B7&@ z-!l`DS}r8jxQ#>!%!HmJyT~GDzZPgm8zDpbP=P6TkGx@b{FzF z3ZG-TSBo%?U)6C|Q4Kv$=RD!u3ejgi4t<)U%lE^YX}jn{c*#N!$$in>u(&Z(E#TI@ z(+yeEE~qhi)&MA!OTmoZ!w_e2DKfXyyH^sUcjJ7U2PHZbhK^V4w5s=%uF#KT_1UI4{L&>sIW*JoqXR|a@NbHVrW)fF(6 zDdML+lh30f-o)@)#brb_V?R0*@?Jt4UA|mpHs z9U|R&amq^AyDC3nKy(0_8wZ23%K#!>qSy*7OUiOZ^p$&4jB;byn4IUN%%_(*r22Fa z04roQd`R30xd!9uh9EC-rYTz{(G-0gf&i{u*Y%&{ z*6h*kTb@jM9F{gLMlUOY_jG~sj>Hp4K?Z)>Kq@;g9Fy=J;0k03Qet*26>C}uR@g4L zu9$SX1ypt#Nu$=v64#xZGLP)S;P`q%zysAX{+DF6={w{hi(937$YLq`rMui`D$JDj= zBzF80h?LKuL*ZrViFu`TRM1kI}cHT+rUA1Ho@@?}0BBz*h$+5`r>G+};52Xh^ zDP@!+=syX>#3Sl*gbzw|O~fxN%-J%@59G4H`$~=FTA+YXff=QiIT01!JNSb8hBB>@A zTJa4mfE>E;K)-$08-`Toc1~-%w2eZQ5_-PfqMa8OTh%U!CO3lnF=-0{@YJmb<{wK2 zgt%9n;`Qy&*+7xsOu%!IL(~QAv9?QY6O?3V&HSOojzz^6{@!=F7GS=FBlGby(Jp)v zmRo8^DxkJo+UImsiFYLJg(HlCCkpvh7J?!@01S)2f)+b}yD$u8WB!J>pj9Iw2AyRR z=Ve-$^s^rdVma#Zg(}8lD#zv&H}F84)06gF*Lboco}s`(p@Ca9OFQL*`yqPbrdcBHYpV01GiS3EV2Ywj?bdGA z8TU)rF`ZnSVUj2Fp_?bdBaz9P&xO;fUFu`XxVPxjZb;N%%4&_y>Zeqdpg$+(Xii$0 zzMT4{ub0G|1(Z@6P@{f(@>>P zg=sQXLhDBj_ZKNCcu;=QQ2h5SbC=d}(u-!wRUo7GHxUUAL@c%oAB7>d%+HMOGgwa< zMYu)TW;9EqNLFHUv7-!sdpou@v>)aoV%8(OYtbP zaQAfKD}X$Ky_{xJ6ut!j(zMlt5+(T3@g&?SQ}6D@TvC_sh1jJt7a49mG%?vA+g7`! z>z+qr0_Anb;xDj&U*XLrKUWD#Gzb#5`L7D!JK{13=E~0ThRVJ3uoF}ndMEskP%yBZ zbA9uDqpl-wY|Q;xRWjZjOt-Nnhm;9&dFV%Z6Po(oFn#L~7MVk|^&IH4@@^5wd$x*a;OGh_ z-(){o{!EOKv`S%@J)Yem(>`$jDe`#vg~eHd#OF`9Ey>9BXF>As28k;9VeC=D)K#-{ zH)n(zN{pmT!?~=&BZn@{zI0^WyvQZr029i~VmO-KYu1MAE5EPKzbX4=d?9mK0u2dY z+k({SGt5Ke3>f!mO{2!mSlO;*qUE(mMMr*X_8Y(7r?;oGp#B%P!pp$0!uPWhK8D*! zM7?rHS;EV2o8VcH4NR8(o|<_V7YvFP)F>TN;$4FQ`P#E!NJbdbW18FY0*2R`tB8L? z-|{d#qrU!K1Y1^rd|kJr!%!8V8Cz) zO!9#-t@!%517$QaX=;{<@S{`}GFoWjc%I#fIX}U0I`Sdh+v-Q?SrNfrrRJc-PYl|$ z#L2o;sAno<$*266Gkocvq!_%+J1*b=JqYhB)v%PXJ^{}^SdXv|m}}amvf&GL1NMTN z?@Gq}IVcY`sYjuIK%Zci*j@HnQfnDEx6?+?#5~KlyMEN6pUv#U0?{CiGjw^U3clf3 z$5zqdWbchEUQuOi9bAJs$4}hO63$aggK=Ld;{|Zk);ulDW;o3pe zI6^-6<8gi4k#eU4x+afeipPCEuexrowG9ryc1F|1Ix2XoPZ7IVJpF7E(!J}<1dfv`vn z+AYj7WLN!j7~yv?p-A{1AqVA)LJk=)K2f|DihQ}lzxhj9x9W(LJ$+wB|2%*72p$cy zcFeHN>UNdGiHTHpwTSMb)cx~{p?+filt4L)q)}1E#42K0DFF2ZZL^y*QZnAuE1wDx z&-rY=G`i9n%EKfZV(yRM=<)%yv#9>=YB1P{5st`6y{4=0%Zh)7|%kuNDG%?eXMBF1(%_!D2CapVl4E( zp!s0;gay4|T4V8e$;=+?zBh*Tc*GkVjahJ)%HlWSF;sgcV;&c@6}?Q;Ooe_gwXAKN zVd^aetE0Uhkeo}`U$R33pJBoGC3U)SrA@9#@msAs56`h@r821`P%IA2{!?Y8#!(r1 zf?9mvbPlr@K{y~wT%p)jM7reblU&4Bw9^r+h!a^pEmK_7JhR=O7K2kPHty<&XlJ1w z&TeWWyG+$}%g1@gPCv%-ag`IS@tw5RqzTR~oLIH~`C#nbL959$%wxt4t8}BpqSB8n z8sjI9C8@p8AABGTQq(d~*|5Fl<}I6ZQi~u!5n;ddzC=Pcd39g1$L2L zBsccfYye<{nEpD15B7T7eL*Ql4?Oz*Fu!38I^=IE)tIlt>$SADHn0X|hEf>G(hY(# z!9RDiGH5O?uD??R*4D`!egLkacd%R_>i5*e#JSPr(|r8hwv&pE$Bm+l)sb(e7O$?a zaxiL-U_B)U0t^K;6bKlEiS|)V>SV{FuaLPk3buO4)AJuPrmAD`R<5Hml80^YFXykH zMf%PFu@pbP@oD@C+lg7O{-p>?ZjiNby>Ha+;{rT=i@H>^!Hg$ zF8zK+4jMPb78{DoNwxyf8vbDV zAaA(iBem8!D{sVWxY-5(a8(oDI!5b+rE_hnD_ZXBTC!3SQWI&d;c6QK0s;`s02uT} zLKEtZ@<=Ax)&8sZkr(B5<(X;HZ1*{7*L{k-7yAe2GOgY;%u1WZ_e{qm&^Wm*5CRYh zpmr2i4bf;f{S>cix&cbpE{L8Y_zw`>{eT{1F%vfM>UECP{&ai-0;`O|;~$+IQ91B`K9&~Do&tki%9I_b?^1wMp#vVn1+m0)=#P$lvXoXZlsO&#TVSJ?P48kYkA1MP2Du3!LI=j3cf8r z8K+9MlX+lo;&PjI7Ao1-P1j>GhICR*oK9rr-g)~7Vooh6FK5itIv50hn8Q%Xy^^2S z+-EKM5a}G^*yt)$>W*Cf2&w~An5q#JYCX{;$>Bs;rCwb>@RSz6aA6qv#FLE$1z%SC zvYhh`gYyeV1t~K}^?#v9#GOQECk9&je}hHRH=aaFq9vln8dcp>ttutH#;Vi{$6Yy@btO`Xwu=+=IPZ0<`a!TOOZF~!ZOswxHQmxIPoz6!3CxK$GJaIT6{3(0Na z^WzlwP@pAgNK${I2NK_cFxzcJjng#q&(;38_HK&qovhUW*dlyB0LYF#+xp*n;R zwpdxMCn49kG|(OaRjCc%0xoKr(F`Dk?tz}8SG*UzFi7R*uoVWMdw%=g^sIm{97+rU z8q|j#qrlo3+7n#j6y^72{iZQ3s_iT`yd8r@Ts|yz(AvDI6jRljc{PS@?hpKuW%(Hy zj1z-I&@P+sGm?qjUALYeXJ!UwRmoVFw`f0Pw+A;CHMG_Kit2rQ@r9kAJiJcVFDfz$ zaR3j+oTZYJA>c6_&~=6D!IHBrBZflx7h&?7b-Hws%tI!`#Mo6SMD5?=j+G>!VCZ0t zq>zM+b*{%U*9?D1;m19qT|K^`OFwu+GV!ES792#i+$pSgCaS=qTiFP96b!f<#xF$u z?^kQu6y$9P4TsWD&?X)ZF9ethn#}kJq2QAS*DGT8=|9336Mo^vV;#j%%!2T#^KaEl?l_W5k zP%O3R9BDi`$i%x+)IW2PQ1-Pk$^AxZ&Mv8eSelIC>qtFQK|{$X&$8~I<0l%M>NbOB z){uGfQmL=>8l>I<&a0$>uXp`$&>$ej01&|LZGdFrwFm22?o!=VTifF@4a2SDwKagu z=C_oplZ0-~s+s{Q;(4h%IjjulN!3#lMoTh7sZvvO)y@fLI7k;@hfdU?IB?>7+Nc}s zHlaJNwi{qR)J1_17+hbp9S4sV_lFfzc|aNpDB1{GI0&DAj=?x2GV&ALbBCaHSFgk! zu=A4Y=aJx!7uQ&udY2d@g)K<=g|4cLj2t-P;?zF2mZoQ&35I85aGx^4L1MzUeHCiO z%rHCWoV#-d$%=T|Q``%RcpupRDJ(i6D9~=`;@dko469AMuT^=@CmOjR0Qa)zar+W? zio|@rM6Nu~hmjbwb7Jqg=aS9c$h*)`AS-cqw^?2kC78*cL9AkCN#l5trKps z=sWYQgh8I93Pu4eP%_<}N8jW{e5_4&{hxVAkVL|3|TKKA# z2OhEf#_v|aw*xnrmD+AHHq~_1sA4dZpxKf9>z^zr_ z4{}U-1=x6-(+cHc%ghA&y|>1 z&8K-*{0`z)gaZ7)E7gCx8n7Dr+D~KXfXtIJKSmUZI&MMb)0+wSs0g;|-68dU`m^!b zj(5=X$R3s(2MS+cb@`kEAKX#86!tp2Z%ydO=AZ2ez@xQ!ToZoiU$Q)hyq z1G#JAM)}R+jwqWI?zkdhLZiETBexKh{vK5SERMb}wAI}25rv6DvPQMqTiW&5?dI?b zsN|AW4~Pm6pV>!P=4Wo`gZg&~nT$z#Tp!bXJM4J@+ukR&Pho!)Q2OZ@D*^e@*tCj? ze~iGb`9JSQQMy6dZp|wFXg*ZXn36uTRKuOz%3=>of%y)6F8OL1GG|bp<2I4=WoAtu z5+AtzT`^k{su_D8noB&Z-*R&kD4V1==1*+?s) z_$d^C$4UpGs#p$LX#Wyq@`ijjhr1~Hbr5PsfMNstcP>+H-H}kZ_^iCFHY&wZn)zJ7 zvT&H#wOM0v0;?WajAAXr3%-S1FhH2`Q(ctNJMa6V*6L4)nw$VIB+(1DbQtFfG)K<- zU#)Ft@+Y~DPqR$JM41$^R`-~vj2iv=mk;m#=tgSM+e*Yp>!23Lt$&JJIpAu^fE4kw zY9j>OEnVNa=p_OEbVRCkRR;;gP9n`9sQJCQT29$m-h)%DI!pt zW}mvsOXPOo$6KaoQ8?K3=j&J^g<@(!HvO(iR@?wQfV`PXCf=_v3ohh;3-Rs=*p=rs z84qC-$Hs{+bMGq?Fyio`w#MrT+Chvw;Av02SoqJXg@JP?4Kea`C@B#2XK*j2WSlo# zl{mJ$NdB%Wr6&~c_nTsh=hO`R&{bVu?|_b8Tij694Pj+C*(@p=CdfIwaB)$%^n=I= zb$6>bRxzpMhO{31Sv6|WgfvU+fT!?%m9ca7Q0noqt`|Wk(r0D@4D7I;vKxHA&s5a8 zMjccmqwC;GQ&h-!-Kqogy{$H`+K#+0Q4rg(Z^Pq)8~uM;=1;Vi zI6aRgtxlIhUE8B#))+wU!S@KJ?Dx0w&96KDJK+BMO3^X@{r9o~SY~6#sbcWS{}TTdz!Xnw#&)ihg%{FC17>xH+fx5PZVc-R$n;skW-z^2kBft#! zq%`nmDDiEjNNHR-T^Ul-;I%9}4q9j6>KG<@Kei-}bMw>K76HZFW(&kf2L2rT{=QDN z%@b#AXKwaVfv-!oP?VJx0VdL`oMQt;QyC{YY4Dk-3+Z(jqp+|gM$}Y$BbH5jEPxq`&pTRRjQU_?KgC5;(s=MO!eJOYl&DDPUaG2hsWj6j6Ox?2zznpHvTAw z3y^)rFuv-WeGH)z-zza*29icOl7WOH43-8s0*4TJo)?zexKO3d2O`FXgSS^Sd&K`= zb8i`y<=RD!f^?^pbhmVOcXvricZZ}jf|R6E0@B@$gh+QwcXtctV!wNTzt1@1`*Ftb zm&J2G_Z4%^IoDjpE!){Y4oQBfW;!669{a6kmzrk>Z(Tgo7vjItO`w7ha+29>Oq1jv z1B1oRj~Z5vX`zZE-q=+_^5@2=sad$aO^4{TK@yTFA}98{9M`}OVAXiJc>ckbxJF(< zO6*C9Oewro9A-J#O@sldX0g1V1+VAOgdZyp_oHf^K+o~!L=+NZdDcD0C5EA>cVKm( z=p8Jbi6`AHY@7)RR8~DzzYH2?OT4}8tCUw?a`4bM+qIMv{`fiBQrDyk`z(;dy9=zk zP?w?F(&D@gJT8h2d{zyL;K5dRGYA^=gx=9rjc-b8qj{%GDn~CkB-%1IY zB%;#`saaE^4_KXZea9}Vz=^1ib_C{CprB5Op583Ejsm0FFX^xD$jvu3h`zCkU{p!V zadXMB{0p>yxiD90LU(V#CBO4xh&vaQ$51Fsfn*LYy@+c+0y~~tSq)HDgvi$D$~jM8D|tK1V*)!u8dn!1Sm_}ey9aOH+7R&Gr;q;VT@T)Z)05yuB&*yq zB}OA;5_65j{_^`H;muJjQ?0`M6`9(u{n2Q4+O@*byWE`BRXrHVfr}cU$yezQ(Nh== zB9~Hwf3$l>};t;S(vff`Fi}%TF^MYRfhvSwC3b2m;p%vaiwu5*U zB<4orE*M<9{lzH*>quGcI4y$*y+;29hnAy$8h*kEMqi&Brn)IL%8=J9=@#YX(4$ys zYfVRTg_t;vkZ^?;JQ4{I*@VJwJ`*4K$Qh*lNG0`DMlk~~6Aa)Mwy^k;xY{x78Y$y^ zVIhL?<+_zO3r3*ym?gKjHtAf(#3OFLOL2ev!?yPt**6~d5K6?ar}d0mtqbHW{CbP7 znH_o#290q8U*MVD--oN6ZYEh6Uv6&J$VzL_AifMND^Gw+@c=L7AIN-an}C=F{%PHe zo^5eaflWCC%$^l2wZnYaJ$W?aPi)_}>0QbfQiQQ0iQU8o5QB&WC| zz$8`v#P-dBHm>Kg2nmnzt3{hw7mBHfNDXV+80}w+>buA=i-Z14PndmM0WuhatQfNV z!zeZr_>mK+tz%Rb@MPfvnDeSX{#rqKC^`~qvNLrG7j=oEmi9fIftqLRwSQ@IM# zvhzgYv6{=m@3Ga4+iIyC))U|;t~%>`Q}ZUgJ3iLM4;GuwqR@U8s&$>`*4EZa3;r9d z^5!pu`!4v(=K&!e9_kcRSmJN5W#%lR-prYGTE0{Hs-|B^U4rY^7mkTNS#cUt=&1Oi zlqdpKPdeo?aD5kBl0x#U@TIhDmAv_nesqZ2h-@?*5)7s?rrNg_^~1m{&G2wzgPM)I zo1bFDXWoxa@6YraLS~6`APjkLEt`FarTaxMRrnSQc0Rlq9z3-FA2qUB9hz@m>x?x+ zKRMC0!cKL6vyh8mptbWteEAH%I&wUV)TA8(``1*d43i1kx>*~zfIFt2Lr^yAeXS|O0 z@|s9$#InbtJw+H=!yjfXs<^{D_KJmMF3;fUyeYDIo7*>RpAr6d|BH(qg7$EkHglqpw9Y@lJ%oR1*YXEz+8fX48C}b7 z091X!e5Y%A4Xzt7oGu1Aom=a7`0LTb=mPgGv_ZxigevS%1JWj|n z6LH#L>*C)`J#cYC7OH=-`PejP(Ml(N44GYJsjakrIO9POvCUKykfc_YlC8p)Y@v%gBS(kE$g`i?>K`cFh)+;VW$Ah> z831RucAQ3Bi$>XNokN>@eTnk93Uaau%Yz;)VP7!x{)4#~0 z7LsxFU-*oO@MGJ^^6g`aBV@8pT>9)Kz?cuR_a#~`Mrq^<%U!NS=UVMP1>FXUGwfWwNP9V=9lJkV8S)^No!R*ll5A(GzUl0QMXrx%QN?{5fv&4L;C5JQj)6GfDXqNU4IHgtu2 zj=hUdJx$}S4co1;pU)^PGmAGXgVuOJKIoGYO5-Ro@$Wzg8+5E4mjd*t^z8oF@#}~L z#B)QmZhplvtCa*QPyotBDFnau|>!}tsExI-2T zQ=u*`rR2M|Dqtac1B6IQdA?R`{Q7KJbO$m7=G)+~J-?N=-|yQ56*>!?-NDXL4VN(! z|7LS1h>Ifb{qGUTdox3I#KTeRHaiD0eZ+mJtaPs_<37B{p98!9VSu56r}VAk&}{r?urOW3N$kz(K}i>>!mHVX92)7kg*NoP7bO!ktN7 zD)ZN!?5m9Y4!xK{u0|qt5+>p^;@_^XpUb-Uk0jf%@MH4(?>jZ*#fN+5xIS%X1w4UH zu^ZqT`hj7kOKfGVyVr~RPQbV`kiNm3!T5{<;@Y5^_w)V3?3Qv@uHG2P%^cfO56o`MJvl zW6>xbl0Gg#Lbj*Oyj{v&_go>ADACu5l(T0LqM0M|1ECA8V4?-lf7>6rd5zl@l-Fn^ zLQD4nr4bzV5?zMgE@H&WLB=z>7JtkW6#fEqc?Pg(DOD~a7BL%Gn^K~>DenB_{YeukZblBe?~>Vsxr z&tGv?xVKLfp2ZaIJ|n6_C7kxP?C&g5kPjui3U9+}EG&9>V;5%$+qJ zS_($c?^ga+bUdZ=%K^~L{RY)uIjRCBCPvH_?lx(a3rDkyDp@DkL${*+-sUr+1mFt) zIj-S9Jf8h}p+kYWaOZXWeTmY!)IQYG;Ew*SjqE!>@oGtXc3D0A0Jd!qk!)lV`g+d1 zpEvFs7pF7Va9!f~mOb%9ViZ!`Zs`RjR=nnM>uee_p<3K!Opp`m)vA2ElXQp$0dWc| zSdWF?UBWDqw{>v^7py81vv6@@PNoXj^^!ig2uzWP@mL76_>lg;?i3G~3asrn4r(Wr zTENi18jefQI;x(ZV@k3QL@+aIvOlJcbv?7~F)aV7JrKhTs?2tL?n?Kbbr*&k)+>RodPTq%9AMB29y&ybx*@l&6Wy zkSPh2PaOC3p&SechViRYPmb6@A%&9Eq;irq^1QEA3?`%Y!-;y2Wmj*~4L@8HuEwIN z%jzOI0mr3&pu`QO=sHMSSzVrNblxb1i4UgeSSUieL6krfSQWWuDdGJOpQ7)IV(e9# zbeBfd#^T(Ql5uRPm}Yu!>)Jowf{cW=k}mQ2PzqM-i97%&HbF_3<4Vf=gZF!NdQvcq zhR$|t^K(lMBV;}7ve$53{$r;R8QsoB3?`H8`gTEQ@5 zg|k4@qcIhcMJpCy@{<~v22Ke;{DulzKcDYuQoX>qdFnsj!fflbCps6YdET0hSrlJN zU-L|3`b8w+*)dY^>{x%deJJQq(MgZdsvPw905^iz{c?QCG5ruG_y6oHJdCcG$E}oi z%U@SA7_39oKC&poCvZP?a#B&XgOs){po?hA%R-ukaZs0OB`%aQ1`&w-{6tcx78fy{ zY(Xrh?#ed1CjAb+o@UtmLw%qG9^?`)eP>hB2Sdez*+`_2U(#2R5PK_;OD%B$q)BTG zn79b}%mk}W7a>syy+XrplUC?zqSicZ(ya@00oL|d@$;0LnmX;Qw&}p{s}%Q&;hZrJ zuN1`LK`#-H=4Ncq(4;bLw+$*RltKHqm?LH3jgon3<%E&n@z-ow9ICT4s;_7Q*3TX0 zC%RhH6d|Ih*vm~^2r63iYKvFBALDvUAXH)ZZzp)VpaZQ2rnWkLKJ1o+m|d-_ScZ#J zaF;{1AE#T{=fTeH9q6ASQO%L`E3LlpFMs?P52rGC=nc04fg^la*?oJ-@`heb^mxOZ zi@Bb|^IRktr^gNbC5b1X`qc{__y=mg$}@S<1VOWfRPe95^s2X)6U?{<&Rp7^8}nhc zn+fvOkB6ZBXUqdz&cm2>KT%oechX}F*FwZl;(;DB-~8tu8*bG7xkNsP@2Dyhc34Iq zg8M}-dBsp_#+7(kgu*{5M7!|rj9hhcKMt`=ipH8SU^WqRno>NTxz*;hx^#rhoIr+B zzkiyxwJEx^bQi;3Q9*AzWEeW3=wQ>r8HsHN%twMgQz_CeFPgb0Nm))9GCtd+0z3W= zWyQf_gY2b;JU#%JNV2;dN|k!-tDa z$VY=YZLb@s*~H$eCrQ`i>~o3oI9|)pxokr{9q?5qZ^jWve{|EtNMYRNj^l$aM#Pbc zO)e4LCW6L@SThht8-ZF{q@ZJg@W+_*wym4l#$i8cy@&;3wC%XD_}X1FSuBL zsBYG%Wh+^KglFT^jMvg{-iOQM=|#Rc^`kmJpPTwu&?Bz*dp|7m@4sjp-MM~B{Xc6n zO2!(s%R?wRaDp(m=#_q7)4{rr%Fmi9>)>Frha`ult>jiUHC4rX0P5Q>M)*rT7EPm> zv~=IUt|xHOnz8BMaHXqneT)i zj%&yupoPTq>w9@3mJF65e}+RIqlboM*F*6XmeZ$j1GWt;PVZU{0rziKlR30bEp1dgjUCQ&`q@A8@mQ& z`x;#MVxR5mq?R#WUBQvQhoaa`J(=#(9_L82t8;~xLis_Vq_JQ#jg*kA4fm1OJ+$~_ z>c+sP)@d7IytnX)gSbLMGUh}a){4aayCtO_uyL>s@>pYvqnTV?jaOFS8H#`A+qbGG z8e!>1CT#oL;;1`<18?k6aHM4m)C(j+0D(t^p9LD+b4C8qwO=@-cbJXLtuL4fWVL|< za+vM=-NIahGon?;kkPkYdr@=7!%fWOwApaLi39HyU8)IJW4k%kB2deG!A%ct|9!^9 z?=>(GlL|bV8n003At8;LC^aC0DDbInF;)jngJH;fc_lD&0-DHcRaI#< zr;(=Ej9z*3F$Yopc6n;#<5itq`bi=-<6nR#BAH@KUaFJM6#QsX-n5m(q799mjYKVoZ8x5^m<}*Q=$~Xlb1;8xtf;D=jUeh9Tm$)5vQ$;`g_R(6+%BD&C50WiL1M+`|3Y5R&Aj2 zZpMN1m{p)Y9o=0P*V!{Bwt#+XQ2*EDh=-2|&Mpy-=oK@rRqltKFmJZmNDN1tM-~*2 zii{p8H01CN$cs7BsABF%e^{@AD(Mv-9EjSdWf&kdsnb=`}(I}0n z<2|?!xEZwPU9H}ozKf&>jK=?tqCl4=0IY{JDcFBC+HgD|)+02Lofj5aaJ1abDD4HF ztQR}K5Y9G5`5w#Omg)0Jv&f-iXZs8lM*eqOK)^B$u~>Bq}%hGm~sK=Nk;p*vLGanuq=-ShKH|%dK;B$ zP!|E>5vQ)M-lf2?^A z&?JTJ*X4mo@5)JkW9lZ>@0v!BHwGvCL^Cq97$7TS)G6b$7{aS(SA}BbCir57O3`bPpVoRzEG)tX*joe1hQe5Y*^E8a6p@{#;6BErH1%g# zR$BX!tzwx#nd%_J@1Tn|?Tj;MD8d;(+n zc1J8rI@3!YrI`2Y&#M%Z!eNnJ{lN73yxtFxkm0z(H3HhHZOxDu2f% zxv!Y$E;V~nPe^eH0?j#W?9U|??NOeuHg=waVUP;>KF;@(tQ#a4(58=B_%_~G{=LgV=5$)bf00SNH{-Kqf0Ec^m4uY3gNnEfTz{wBK9{>)_j5Qcb?pJLpG#-u zqafom44XKzrOsgzq8TL9x%Suf7n;UMem{m5MkA1LQFu)jI5n4vmo_Uw%gRfcvdvuM zp$=Io@wJ;ZbxIT(zcyXXA178PAz2aW|w;i`k(l%b7 zODof5aCTSKMH_2fh?<$EB!6MXC9>(o-25JIu8ZKS&T|R)A-$zd6?igAFla$+ATFP* z1+t>$*oUvb=6aZkk15gmXk&x+yz-Kg|CeLczJ(BYl8TO5c=~nt(YTqZ!w0ou{uc@6JihTBiqGue_))Z3qh@{Mwj#!U94ct<0 z7`5wOQ-6(Kc23)ekEb^y2(=x_mSjs?zF^PGrAON&$r%6+>`hQh`JU#$6>Z*K&Axk{ z4fv~_5de#_No7GALwkQi5_nt7)ItamvuV=Qm@Zb{KmR(%nu(wpkm1e?2-q(?M6`eW_6Jgzb|j4+~ARD;W60nRWiDqt++bC?i>v(US~WEeyK04Rx3pi;ghgHaN?#Io-%KL z$*Dfdd~1*I^}(X!Qls->Q*M%^{1%u^S(iyh& zLU+ydr1VR2l9Ov0I_;VKdE$~)bl^Gi7h`y0tUOAXdoWw&Wx+<&9)6Zxn|7c?^ATa=_1KZY7TE zFq`G?-l%Ns^_+!-knp(SM9E%id7)|7Y%TG@FV)Gksp@uT-8V)3Vf#MqTpD_UIn;XS zXc>%hVs5`RU1(I!6hIhEuRvB&w11kH)gG42RK?r7=0EOikxDra8i<^pz#Q0d2_qCA z40B$lqG=jCKVNFCV-BW}`eapOP(l_0jw3PnoO5>9oe?4~JDmh-&KUED%Sz+wt!P(y z2WB&}LGn~jQB&RciD}X}grm4`j(8>f(J>eid{OV{TJAxZu5*wy zp)<6us0Mk8Y#D)uMBNXza7n}i2qyXA-^RZlZ$|>H9m^n=Cb<5{a9_FZ)rX$7J?eR? zG8<@e2wg-v8}6*}&^`TTbvs#(Io%O@ICOQ26^{#uV6~Z6t4++6h)>KreM@>P*IT?n z3~XL`DZF>*y&9(&t9IRA#4>D2tYsvBls; z)3L=*9xq0J802W;NTM%DG1`XVC@H3&Bn^-*B-0SnZ3mf{cJY2fO&$Tkgp2_ZwKTzH zOekL?LfHaPXW=toYPMKM0W$aR!3eLxs$MQEVPE-8FvjV%f3RQn0F%R(J0mFv@Dt1(f3Va+PpV~YU0WT+d+y%06m46@lE?lPIY~vR&WL;%AG}G}zm5u98 zFbeS-;$I3T=sg>@xWAvo9VKnmQAoVx-%3)aZR;n`un#}%u(_armWrmwu$G%~+a5~O z$N=;hO{L%NmJGDBI+}byamI{ynbnht|78-*kgj5ww2Fuv8`O@GmR^9wG(L_GHAAosHP_kvj%-#EoW~Zl%aHt&@jGw5 z2L>f?(X%tKI+;Mf)c!zucjb@l?#Ck|x=@Y05WE%B;C^pQZYgtoX^iGhfx(iethl(D zI2od%dY6*E-+bT9(I|3FAg$lOf>vNu8X9@Wg^Cus(>#Q>Sr51S!B$mA2Ef-D*7LMmkuoRtB!-ubbt3nW}_5I?5 zTbN~FC~L%Qv4=w;cmyj~U4r}P=Alyvx>zu{VWA&VWGV!&rV=fIqZ4oxeNi*XHPlc# zf9|=?ws~M`Le}IcB;XkY8S8YN^sBL7SYMkEySA$tP-CJVLDIhdD6~nqppX3KOc4ft zo{iB?rIyz0CsD%+tJ5lI#uOM6`*B65z|ZPH7?)@l(K(P`4~G&z#q9-E`O~19YIUde z_4yu9Qk%gscMjJROLrE$Dx1+sMEYFa@G4hQHL#-XdI69xwMQVa7RVSO8pCz$d$!wL z+A+j#ikw;Uz}AWK`DQdHb_bm#?(}(p6hPGpx|)sXNBATk;75myiY$h3t)<< zFE?T9A(l~nHiycm%ZQA4(}h}VY>FJ$4L}eyfkNML0rNJl5Zkl_G`U`<+au~K9+QUZ zFcD^;WC)K6Tnqup!uxV}9NKnrHq_zAm@Qd`7{4(CT~yLrT$!Ni=r1Z97>b0#iH%xN zk!fLm!xZnFb#Q9a?lO+A@My8*98w$3hYfA;oSK>~j0pD-EGl9Hf0wTl!2q|mUIL&?sQbD?^phSywh*AzH2FNJIq zu;qeSJ%A`%-25s25V$Yc3p1LK2YhpQNKPIK#9b}R38L%1aLy3ZN-WEWz~WGg{sC#A zP1ZeemH{!`pC8NY^BOAdPvnO(k(pV!ek~std!~WGR12%0V{S1Ocl&Vx(?cH|pVRRp zx*Gq~FJ7OlkYr3jUyv|i)Bo?XAbSu0e5w2#(rlbUL>@z)FRQ3D(+irA?tjAhth5w1 zr&69U)?1!*ze%XA-QqWqEM>n(SGkSH@JVB%CSaMujkph~JD)Wx$6)Ug2i?OYC2B{mf`VJ?%dYK))W6w}6f}=hPZ-Jk{ zd$D;Gev_Xel1R8=!lM6Tvh~k;c}eSW&`7d)3GC+Lq$TFbe*`8h7WD>9GE=|m^F^H# z;8pg}Y*=~=XO>=6^8J{hF+wk1ed#EXg4Z+wR*pB6>m7d=J6<;?sxV#*f4%izlKV6a z(AxtjO(_EgV}!O-%}T?`umW|1^6jcoZaxox&8N!08y3s%O_156xhtD6NO?3kkXz28 z8&=qsmD@i30^PQ;_nOuF+?>(&3&OvP`nCp_u1@b;KMP_v5FB7MzL>Y9yBl`znjI~V z6?I)ZypQPaM@80ZgQ|e-9_xX4Hdm&2?y0JpUBy-%k1zYoxb&I-xbdX~X>+>dB8`z% z=T{Qa&yr!Tf4xUG>_yeW`ul@RhS2>kMRBFmM&VU6cmNQM?%*} z6rtJ_ZPXL${)^VgF5=AhO6O||JDq{odUC%~IZJ3x)PeWf_RFgS_aunwVZvS~@kh5o zaV&4emcDt_bW?4G1$2CsldA2gdc}CN=$u^TquxiWhoc4>Kd|&$Fh#cPfP=2zOz=>f zGEA;9zB)PU!};%0-I};q7{rd}BU;2FmJkK8wG9Z@wOv8tq7vB>!M>0(KaTG!z^H{^ z4vL(Z;M$0mUW$3Vlf2*Lyb5oeKAB1_o~MaU@2EBKebE1gB!sqPx9P8FUzVkDHFMZ= z%(K&1go@9$*_Yw)0W^iT-=E7P&SC0*LHX~E@fGSW=boFf+an{ADL#xT*J3*mukl&w`UtgQ4&gC z^V)AP|J_tM6uoqP6$Ntq1x zX$>XWis4|JVZ_I2!!T)?AeF+(@Bau8y4mcS(^#KI!zVNsO!>^M=)}L0kKrPo8f`88 zD~hU09(qw^g{X!y-Ge{F8A{W1_$XgeQgN_QC*N@V2l@Gxk=npBUQXW`qTJ0;OLg8j zl5SmS4D^NU@B0{h5Zs8Kd3aG4cBib@b4ik=B85OtaZAH z32bEe=E!^PMuNV%n7a4hM`Xx?@gq?(n-_XT)vwX(Nv{dXDQ1e6UKJt; zeDqkii&js?wW)CQ9O4H3pyZ~!zwevxp_P8;nVqvS*KDRob;- zx!7P_dgTHn3;FiymYG}yi^1G0x2odNhssYXRWFkW|JB>bD>) z-(q(xXQMC1On>9??!uY@le-Q8I2rV(Ck!$8qF(`jX9M!HkxW;CnY|O2+$%CvMkTJ1 z^k@U#RWEzWC;B0`Kt*>eKV=IU@@_o$?Dh0eXZ~#MdB3vBCL<44AHldwz{|T zPeU$f4WTWnYc-uq1W(pslob+J7FsD#daRATU=3gMACrPT9Ea=39at&FI)wc0X3h zyjm{H7NT{xD&+*`w*|WNU4zKcAjX zzE)tl`{h3laAAq#O1ov2A6LVtYcv3B$TSrcAL2=3OY|uJ1-Bu=1$3vFk=P^Wk(0pA zV@i{Y`I~T^A4oL*^(Lq?Y3cD1Uq$)Ol?NHgt$Da^t#xCUaJZ(-ofB{0S4pZNQEO_m z*VNP3TIn{wsx|WWBeFf0Axl#ehUg4LKdJ4i^2z*!za@n)Yh<$pY$P$P!ntX1_VoW6 zvdAB4&27?@2mB}6Sn3_@*WmxHrO0aH+b>?O(v)`}Sp61iS&ftC%U;D z>&>hWkttsnup2b6W_#&MS+(&Y-#}Axm^wCFf&$TXd+07D0Q|bRbxz)( z`S&&Ukcd)+Ez&&Q1H#KIE*3~x<8xJJ(j06qoBcqJFaq)c%{RA+PV8TVu0zys91q{? zm+Wm?>(FbRjDHV!eu7dby+HQ@wrso~g1PaQQZWR={*Rx9pKhbWqYWFKOq%n3@vc5R zKVKir!3V;_Ll{3FHQ`H_BS1bQ#`Sx)=s<*^LtGE*9S~OEEEB)CkVvBCwDslgOLaZY zIDvp5gNA?*fro^EKo^66c=@aU-+t@=(tq>+{^$Q+_{{%5{b7M5g}O;sL&x*-;9Wq- MN-9Z|ix~(1KZItKMF0Q* diff --git a/docs/authors.md b/docs/authors.md index a5349b4b9df..21b0e1a1f5b 100644 --- a/docs/authors.md +++ b/docs/authors.md @@ -1,184 +1,3 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" +```{include} ../AUTHORS.md -# Authors - -Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). - -Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), -[Carl Meyer](mailto:carl@oddbird.net), -[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), -[Mika Naylor](mailto:mail@autophagy.io), -[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com), and -[Cooper Lees](mailto:me@cooperlees.com). - -Multiple contributions by: - -- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) -- [Adam Johnson](mailto:me@adamj.eu) -- [Adam Williamson](mailto:adamw@happyassassin.net) -- [Alexander Huynh](mailto:github@grande.coffee) -- [Alex Vandiver](mailto:github@chmrr.net) -- [Allan Simon](mailto:allan.simon@supinfo.com) -- Anders-Petter Ljungquist -- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) -- [Andrew Zhou](mailto:andrewfzhou@gmail.com) -- [Andrey](mailto:dyuuus@yandex.ru) -- [Andy Freeland](mailto:andy@andyfreeland.net) -- [Anthony Sottile](mailto:asottile@umich.edu) -- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) -- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) -- [Artem Malyshev](mailto:proofit404@gmail.com) -- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) -- [Augie Fackler](mailto:raf@durin42.com) -- [Aviskar KC](mailto:aviskarkc10@gmail.com) -- Batuhan Taşkaya -- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) -- [Benjamin Woodruff](mailto:github@benjam.info) -- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) -- [Brandt Bucher](mailto:brandtbucher@gmail.com) -- [Brett Cannon](mailto:brett@python.org) -- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) -- [Bryan Forbes](mailto:bryan@reigndropsfall.net) -- [Calum Lind](mailto:calumlind@gmail.com) -- [Charles](mailto:peacech@gmail.com) -- Charles Reid -- [Christian Clauss](mailto:cclauss@bluewin.ch) -- [Christian Heimes](mailto:christian@python.org) -- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) -- [Chris Rose](mailto:offline@offby1.net) -- Codey Oxley -- [Cong](mailto:congusbongus@gmail.com) -- [Cooper Ry Lees](mailto:me@cooperlees.com) -- [Dan Davison](mailto:dandavison7@gmail.com) -- [Daniel Hahler](mailto:github@thequod.de) -- [Daniel M. Capella](mailto:polycitizen@gmail.com) -- Daniele Esposti -- [David Hotham](mailto:david.hotham@metaswitch.com) -- [David Lukes](mailto:dafydd.lukes@gmail.com) -- [David Szotten](mailto:davidszotten@gmail.com) -- [Denis Laxalde](mailto:denis@laxalde.org) -- [Douglas Thor](mailto:dthor@transphormusa.com) -- dylanjblack -- [Eli Treuherz](mailto:eli@treuherz.com) -- [Emil Hessman](mailto:emil@hessman.se) -- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) -- [Florent Thiery](mailto:fthiery@gmail.com) -- Francisco -- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) -- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) -- [Gregory P. Smith](mailto:greg@krypto.org) -- Gustavo Camargo -- hauntsaninja -- [Heaford](mailto:dan@heaford.com) -- [Hugo Barrera](mailto::hugo@barrera.io) -- Hugo van Kemenade -- [Hynek Schlawack](mailto:hs@ox.cx) -- [Ivan Katanić](mailto:ivan.katanic@gmail.com) -- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) -- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) -- [Jan Hnátek](mailto:jan.hnatek@gmail.com) -- [Jason Fried](mailto:me@jasonfried.info) -- [Jason Friedland](mailto:jason@friedland.id.au) -- [jgirardet](mailto:ijkl@netc.fr) -- Jim Brännlund -- [Jimmy Jia](mailto:tesrin@gmail.com) -- [Joe Antonakakis](mailto:jma353@cornell.edu) -- [Jon Dufresne](mailto:jon.dufresne@gmail.com) -- [Jonas Obrist](mailto:ojiidotch@gmail.com) -- [Jonty Wareing](mailto:jonty@jonty.co.uk) -- [Jose Nazario](mailto:jose.monkey.org@gmail.com) -- [Joseph Larson](mailto:larson.joseph@gmail.com) -- [Josh Bode](mailto:joshbode@fastmail.com) -- [Josh Holland](mailto:anowlcalledjosh@gmail.com) -- [José Padilla](mailto:jpadilla@webapplicate.com) -- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) -- [kaiix](mailto:kvn.hou@gmail.com) -- [Katie McLaughlin](mailto:katie@glasnt.com) -- Katrin Leinweber -- [Keith Smiley](mailto:keithbsmiley@gmail.com) -- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) -- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) -- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) -- [Kyle Sunden](mailto:sunden@wisc.edu) -- Lawrence Chan -- [Linus Groh](mailto:mail@linusgroh.de) -- [Loren Carvalho](mailto:comradeloren@gmail.com) -- [Luka Sterbic](mailto:luka.sterbic@gmail.com) -- [LukasDrude](mailto:mail@lukas-drude.de) -- Mahmoud Hossam -- Mariatta -- [Matt VanEseltine](mailto:vaneseltine@gmail.com) -- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) -- [Matthew Walster](mailto:matthew@walster.org) -- Max Smolens -- [Michael Aquilina](mailto:michaelaquilina@gmail.com) -- [Michael Flaxman](mailto:michael.flaxman@gmail.com) -- [Michael J. Sullivan](mailto:sully@msully.net) -- [Michael McClimon](mailto:michael@mcclimon.org) -- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) -- [Mike](mailto:roshi@fedoraproject.org) -- [mikehoyio](mailto:mikehoy@gmail.com) -- [Min ho Kim](mailto:minho42@gmail.com) -- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) -- MomIsBestFriend -- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) -- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) -- [Neraste](mailto:neraste.herr10@gmail.com) -- [Nikolaus Waxweiler](mailto:madigens@gmail.com) -- [Ofek Lev](mailto:ofekmeister@gmail.com) -- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) -- [otstrel](mailto:otstrel@gmail.com) -- [Pablo Galindo](mailto:Pablogsal@gmail.com) -- [Paul Ganssle](mailto:p.ganssle@gmail.com) -- [Paul Meinhardt](mailto:mnhrdt@gmail.com) -- [Peter Bengtsson](mailto:mail@peterbe.com) -- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) -- pmacosta -- [Quentin Pradet](mailto:quentin@pradet.me) -- [Ralf Schmitt](mailto:ralf@systemexit.de) -- [Ramón Valles](mailto:mroutis@protonmail.com) -- [Richard Fearn](mailto:richardfearn@gmail.com) -- Richard Si -- [Rishikesh Jha](mailto:rishijha424@gmail.com) -- [Rupert Bedford](mailto:rupert@rupertb.com) -- Russell Davis -- [Rémi Verschelde](mailto:rverschelde@gmail.com) -- [Sami Salonen](mailto:sakki@iki.fi) -- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) -- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) -- Sergi -- [Scott Stevenson](mailto:scott@stevenson.io) -- Shantanu -- [shaoran](mailto:shaoran@sakuranohana.org) -- [Shinya Fujino](mailto:shf0811@gmail.com) -- springstan -- [Stavros Korokithakis](mailto:hi@stavros.io) -- [Stephen Rosen](mailto:sirosen@globus.org) -- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) -- [Sunil Kapil](mailto:snlkapil@gmail.com) -- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) -- [Tal Amuyal](mailto:TalAmuyal@gmail.com) -- [Terrance](mailto:git@terrance.allofti.me) -- [Thom Lu](mailto:thomas.c.lu@gmail.com) -- [Thomas Grainger](mailto:tagrain@gmail.com) -- [Tim Gates](mailto:tim.gates@iress.com) -- [Tim Swast](mailto:swast@google.com) -- [Timo](mailto:timo_tk@hotmail.com) -- Toby Fleming -- [Tom Christie](mailto:tom@tomchristie.com) -- [Tony Narlock](mailto:tony@git-pull.com) -- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) -- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) -- [Tzu-ping Chung](mailto:uranusjr@gmail.com) -- [Utsav Shah](mailto:ukshah2@illinois.edu) -- utsav-dbx -- vezeli -- [Ville Skyttä](mailto:ville.skytta@iki.fi) -- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) -- [Vlad Emelianov](mailto:volshebnyi@gmail.com) -- [williamfzc](mailto:178894043@qq.com) -- [wouter bolsterlee](mailto:wouter@bolsterl.ee) -- Yazdan -- [Yngve Høiseth](mailto:yngve@hoiseth.net) -- [Yurii Karabas](mailto:1998uriyyo@gmail.com) -- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) +``` diff --git a/docs/change_log.md b/docs/change_log.md index 658414bf967..e5f67e755d3 100644 --- a/docs/change_log.md +++ b/docs/change_log.md @@ -1,503 +1,3 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM CHANGES.md" +```{include} ../CHANGES.md -## Change Log - -### 20.8b1 - -#### _Packaging_ - -- explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions - older than 7.0 - -### 20.8b0 - -#### _Black_ - -- re-implemented support for explicit trailing commas: now it works consistently within - any bracket pair, including nested structures (#1288 and duplicates) - -- `Black` now reindents docstrings when reindenting code around it (#1053) - -- `Black` now shows colored diffs (#1266) - -- `Black` is now packaged using 'py3' tagged wheels (#1388) - -- `Black` now supports Python 3.8 code, e.g. star expressions in return statements - (#1121) - -- `Black` no longer normalizes capital R-string prefixes as those have a - community-accepted meaning (#1244) - -- `Black` now uses exit code 2 when specified configuration file doesn't exit (#1361) - -- `Black` now works on AWS Lambda (#1141) - -- added `--force-exclude` argument (#1032) - -- removed deprecated `--py36` option (#1236) - -- fixed `--diff` output when EOF is encountered (#526) - -- fixed `# fmt: off` handling around decorators (#560) - -- fixed unstable formatting with some `# type: ignore` comments (#1113) - -- fixed invalid removal on organizing brackets followed by indexing (#1575) - -- introduced `black-primer`, a CI tool that allows us to run regression tests against - existing open source users of Black (#1402) - -- introduced property-based fuzzing to our test suite based on Hypothesis and - Hypothersmith (#1566) - -- implemented experimental and disabled by default long string rewrapping (#1132), - hidden under a `--experimental-string-processing` flag while it's being worked on; - this is an undocumented and unsupported feature, you lose Internet points for - depending on it (#1609) - -#### Vim plugin - -- prefer virtualenv packages over global packages (#1383) - -### 19.10b0 - -- added support for PEP 572 assignment expressions (#711) - -- added support for PEP 570 positional-only arguments (#943) - -- added support for async generators (#593) - -- added support for pre-splitting collections by putting an explicit trailing comma - inside (#826) - -- added `black -c` as a way to format code passed from the command line (#761) - -- --safe now works with Python 2 code (#840) - -- fixed grammar selection for Python 2-specific code (#765) - -- fixed feature detection for trailing commas in function definitions and call sites - (#763) - -- `# fmt: off`/`# fmt: on` comment pairs placed multiple times within the same block of - code now behave correctly (#1005) - -- _Black_ no longer crashes on Windows machines with more than 61 cores (#838) - -- _Black_ no longer crashes on standalone comments prepended with a backslash (#767) - -- _Black_ no longer crashes on `from` ... `import` blocks with comments (#829) - -- _Black_ no longer crashes on Python 3.7 on some platform configurations (#494) - -- _Black_ no longer fails on comments in from-imports (#671) - -- _Black_ no longer fails when the file starts with a backslash (#922) - -- _Black_ no longer merges regular comments with type comments (#1027) - -- _Black_ no longer splits long lines that contain type comments (#997) - -- removed unnecessary parentheses around `yield` expressions (#834) - -- added parentheses around long tuples in unpacking assignments (#832) - -- added parentheses around complex powers when they are prefixed by a unary operator - (#646) - -- fixed bug that led _Black_ format some code with a line length target of 1 (#762) - -- _Black_ no longer introduces quotes in f-string subexpressions on string boundaries - (#863) - -- if _Black_ puts parenthesis around a single expression, it moves comments to the - wrapped expression instead of after the brackets (#872) - -- `blackd` now returns the version of _Black_ in the response headers (#1013) - -- `blackd` can now output the diff of formats on source code when the `X-Diff` header is - provided (#969) - -### 19.3b0 - -- new option `--target-version` to control which Python versions _Black_-formatted code - should target (#618) - -- deprecated `--py36` (use `--target-version=py36` instead) (#724) - -- _Black_ no longer normalizes numeric literals to include `_` separators (#696) - -- long `del` statements are now split into multiple lines (#698) - -- type comments are no longer mangled in function signatures - -- improved performance of formatting deeply nested data structures (#509) - -- _Black_ now properly formats multiple files in parallel on Windows (#632) - -- _Black_ now creates cache files atomically which allows it to be used in parallel - pipelines (like `xargs -P8`) (#673) - -- _Black_ now correctly indents comments in files that were previously formatted with - tabs (#262) - -- `blackd` now supports CORS (#622) - -### 18.9b0 - -- numeric literals are now formatted by _Black_ (#452, #461, #464, #469): - - - numeric literals are normalized to include `_` separators on Python 3.6+ code - - - added `--skip-numeric-underscore-normalization` to disable the above behavior and - leave numeric underscores as they were in the input - - - code with `_` in numeric literals is recognized as Python 3.6+ - - - most letters in numeric literals are lowercased (e.g., in `1e10`, `0x01`) - - - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) - -- added `blackd`, see [its documentation](#blackd) for more info (#349) - -- adjacent string literals are now correctly split into multiple lines (#463) - -- trailing comma is now added to single imports that don't fit on a line (#250) - -- cache is now populated when `--check` is successful for a file which speeds up - consecutive checks of properly formatted unmodified files (#448) - -- whitespace at the beginning of the file is now removed (#399) - -- fixed mangling [pweave](http://mpastell.com/pweave/) and - [Spyder IDE](https://www.spyder-ide.org/) special comments (#532) - -- fixed unstable formatting when unpacking big tuples (#267) - -- fixed parsing of `__future__` imports with renames (#389) - -- fixed scope of `# fmt: off` when directly preceding `yield` and other nodes (#385) - -- fixed formatting of lambda expressions with default arguments (#468) - -- fixed `async for` statements: _Black_ no longer breaks them into separate lines (#372) - -- note: the Vim plugin stopped registering `,=` as a default chord as it turned out to - be a bad idea (#415) - -### 18.6b4 - -- hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) - -### 18.6b3 - -- typing stub files (`.pyi`) now have blank lines added after constants (#340) - -- `# fmt: off` and `# fmt: on` are now much more dependable: - - - they now work also within bracket pairs (#329) - - - they now correctly work across function/class boundaries (#335) - - - they now work when an indentation block starts with empty lines or misaligned - comments (#334) - -- made Click not fail on invalid environments; note that Click is right but the - likelihood we'll need to access non-ASCII file paths when dealing with Python source - code is low (#277) - -- fixed improper formatting of f-strings with quotes inside interpolated expressions - (#322) - -- fixed unnecessary slowdown when long list literals where found in a file - -- fixed unnecessary slowdown on AST nodes with very many siblings - -- fixed cannibalizing backslashes during string normalization - -- fixed a crash due to symbolic links pointing outside of the project directory (#338) - -### 18.6b2 - -- added `--config` (#65) - -- added `-h` equivalent to `--help` (#316) - -- fixed improper unmodified file caching when `-S` was used - -- fixed extra space in string unpacking (#305) - -- fixed formatting of empty triple quoted strings (#313) - -- fixed unnecessary slowdown in comment placement calculation on lines without comments - -### 18.6b1 - -- hotfix: don't output human-facing information on stdout (#299) - -- hotfix: don't output cake emoji on non-zero return code (#300) - -### 18.6b0 - -- added `--include` and `--exclude` (#270) - -- added `--skip-string-normalization` (#118) - -- added `--verbose` (#283) - -- the header output in `--diff` now actually conforms to the unified diff spec - -- fixed long trivial assignments being wrapped in unnecessary parentheses (#273) - -- fixed unnecessary parentheses when a line contained multiline strings (#232) - -- fixed stdin handling not working correctly if an old version of Click was used (#276) - -- _Black_ now preserves line endings when formatting a file in place (#258) - -### 18.5b1 - -- added `--pyi` (#249) - -- added `--py36` (#249) - -- Python grammar pickle caches are stored with the formatting caches, making _Black_ - work in environments where site-packages is not user-writable (#192) - -- _Black_ now enforces a PEP 257 empty line after a class-level docstring (and/or - fields) and the first method - -- fixed invalid code produced when standalone comments were present in a trailer that - was omitted from line splitting on a large expression (#237) - -- fixed optional parentheses being removed within `# fmt: off` sections (#224) - -- fixed invalid code produced when stars in very long imports were incorrectly wrapped - in optional parentheses (#234) - -- fixed unstable formatting when inline comments were moved around in a trailer that was - omitted from line splitting on a large expression (#238) - -- fixed extra empty line between a class declaration and the first method if no class - docstring or fields are present (#219) - -- fixed extra empty line between a function signature and an inner function or inner - class (#196) - -### 18.5b0 - -- call chains are now formatted according to the - [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) - -- data structure literals (tuples, lists, dictionaries, and sets) are now also always - exploded like imports when they don't fit in a single line (#152) - -- slices are now formatted according to PEP 8 (#178) - -- parentheses are now also managed automatically on the right-hand side of assignments - and return statements (#140) - -- math operators now use their respective priorities for delimiting multiline - expressions (#148) - -- optional parentheses are now omitted on expressions that start or end with a bracket - and only contain a single operator (#177) - -- empty parentheses in a class definition are now removed (#145, #180) - -- string prefixes are now standardized to lowercase and `u` is removed on Python 3.6+ - only code and Python 2.7+ code with the `unicode_literals` future import (#188, #198, - #199) - -- typing stub files (`.pyi`) are now formatted in a style that is consistent with PEP - 484 (#207, #210) - -- progress when reformatting many files is now reported incrementally - -- fixed trailers (content with brackets) being unnecessarily exploded into their own - lines after a dedented closing bracket (#119) - -- fixed an invalid trailing comma sometimes left in imports (#185) - -- fixed non-deterministic formatting when multiple pairs of removable parentheses were - used (#183) - -- fixed multiline strings being unnecessarily wrapped in optional parentheses in long - assignments (#215) - -- fixed not splitting long from-imports with only a single name - -- fixed Python 3.6+ file discovery by also looking at function calls with unpacking. - This fixed non-deterministic formatting if trailing commas where used both in function - signatures with stars and function calls with stars but the former would be - reformatted to a single line. - -- fixed crash on dealing with optional parentheses (#193) - -- fixed "is", "is not", "in", and "not in" not considered operators for splitting - purposes - -- fixed crash when dead symlinks where encountered - -### 18.4a4 - -- don't populate the cache on `--check` (#175) - -### 18.4a3 - -- added a "cache"; files already reformatted that haven't changed on disk won't be - reformatted again (#109) - -- `--check` and `--diff` are no longer mutually exclusive (#149) - -- generalized star expression handling, including double stars; this fixes - multiplication making expressions "unsafe" for trailing commas (#132) - -- _Black_ no longer enforces putting empty lines behind control flow statements (#90) - -- _Black_ now splits imports like "Mode 3 + trailing comma" of isort (#127) - -- fixed comment indentation when a standalone comment closes a block (#16, #32) - -- fixed standalone comments receiving extra empty lines if immediately preceding a - class, def, or decorator (#56, #154) - -- fixed `--diff` not showing entire path (#130) - -- fixed parsing of complex expressions after star and double stars in function calls - (#2) - -- fixed invalid splitting on comma in lambda arguments (#133) - -- fixed missing splits of ternary expressions (#141) - -### 18.4a2 - -- fixed parsing of unaligned standalone comments (#99, #112) - -- fixed placement of dictionary unpacking inside dictionary literals (#111) - -- Vim plugin now works on Windows, too - -- fixed unstable formatting when encountering unnecessarily escaped quotes in a string - (#120) - -### 18.4a1 - -- added `--quiet` (#78) - -- added automatic parentheses management (#4) - -- added [pre-commit](https://pre-commit.com) integration (#103, #104) - -- fixed reporting on `--check` with multiple files (#101, #102) - -- fixed removing backslash escapes from raw strings (#100, #105) - -### 18.4a0 - -- added `--diff` (#87) - -- add line breaks before all delimiters, except in cases like commas, to better comply - with PEP 8 (#73) - -- standardize string literals to use double quotes (almost) everywhere (#75) - -- fixed handling of standalone comments within nested bracketed expressions; _Black_ - will no longer produce super long lines or put all standalone comments at the end of - the expression (#22) - -- fixed 18.3a4 regression: don't crash and burn on empty lines with trailing whitespace - (#80) - -- fixed 18.3a4 regression: `# yapf: disable` usage as trailing comment would cause - _Black_ to not emit the rest of the file (#95) - -- when CTRL+C is pressed while formatting many files, _Black_ no longer freaks out with - a flurry of asyncio-related exceptions - -- only allow up to two empty lines on module level and only single empty lines within - functions (#74) - -### 18.3a4 - -- `# fmt: off` and `# fmt: on` are implemented (#5) - -- automatic detection of deprecated Python 2 forms of print statements and exec - statements in the formatted file (#49) - -- use proper spaces for complex expressions in default values of typed function - arguments (#60) - -- only return exit code 1 when --check is used (#50) - -- don't remove single trailing commas from square bracket indexing (#59) - -- don't omit whitespace if the previous factor leaf wasn't a math operator (#55) - -- omit extra space in kwarg unpacking if it's the first argument (#46) - -- omit extra space in - [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) - (#68) - -### 18.3a3 - -- don't remove single empty lines outside of bracketed expressions (#19) - -- added ability to pipe formatting from stdin to stdin (#25) - -- restored ability to format code with legacy usage of `async` as a name (#20, #42) - -- even better handling of numpy-style array indexing (#33, again) - -### 18.3a2 - -- changed positioning of binary operators to occur at beginning of lines instead of at - the end, following - [a recent change to PEP 8](https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b) - (#21) - -- ignore empty bracket pairs while splitting. This avoids very weirdly looking - formattings (#34, #35) - -- remove a trailing comma if there is a single argument to a call - -- if top level functions were separated by a comment, don't put four empty lines after - the upper function - -- fixed unstable formatting of newlines with imports - -- fixed unintentional folding of post scriptum standalone comments into last statement - if it was a simple statement (#18, #28) - -- fixed missing space in numpy-style array indexing (#33) - -- fixed spurious space after star-based unary expressions (#31) - -### 18.3a1 - -- added `--check` - -- only put trailing commas in function signatures and calls if it's safe to do so. If - the file is Python 3.6+ it's always safe, otherwise only safe if there are no `*args` - or `**kwargs` used in the signature or call. (#8) - -- fixed invalid spacing of dots in relative imports (#6, #13) - -- fixed invalid splitting after comma on unpacked variables in for-loops (#23) - -- fixed spurious space in parenthesized set expressions (#7) - -- fixed spurious space after opening parentheses and in default arguments (#14, #17) - -- fixed spurious space after unary operators when the operand was a complex expression - (#15) - -### 18.3a0 - -- first published version, Happy 🍰 Day 2018! - -- alpha quality - -- date-versioned (see: https://calver.org/) +``` diff --git a/docs/compatible_configs/flake8/.flake8 b/docs/compatible_configs/flake8/.flake8 new file mode 100644 index 00000000000..8dd399ab55b --- /dev/null +++ b/docs/compatible_configs/flake8/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 88 +extend-ignore = E203 diff --git a/docs/compatible_configs/flake8/setup.cfg b/docs/compatible_configs/flake8/setup.cfg new file mode 100644 index 00000000000..8dd399ab55b --- /dev/null +++ b/docs/compatible_configs/flake8/setup.cfg @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 88 +extend-ignore = E203 diff --git a/docs/compatible_configs/flake8/tox.ini b/docs/compatible_configs/flake8/tox.ini new file mode 100644 index 00000000000..8dd399ab55b --- /dev/null +++ b/docs/compatible_configs/flake8/tox.ini @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 88 +extend-ignore = E203 diff --git a/docs/compatible_configs/isort/.editorconfig b/docs/compatible_configs/isort/.editorconfig new file mode 100644 index 00000000000..edc849a54a6 --- /dev/null +++ b/docs/compatible_configs/isort/.editorconfig @@ -0,0 +1,2 @@ +[*.py] +profile = black diff --git a/docs/compatible_configs/isort/.isort.cfg b/docs/compatible_configs/isort/.isort.cfg new file mode 100644 index 00000000000..f238bf7ea13 --- /dev/null +++ b/docs/compatible_configs/isort/.isort.cfg @@ -0,0 +1,2 @@ +[settings] +profile = black diff --git a/docs/compatible_configs/isort/pyproject.toml b/docs/compatible_configs/isort/pyproject.toml new file mode 100644 index 00000000000..2dc02c8c50c --- /dev/null +++ b/docs/compatible_configs/isort/pyproject.toml @@ -0,0 +1,2 @@ +[tool.isort] +profile = 'black' diff --git a/docs/compatible_configs/isort/setup.cfg b/docs/compatible_configs/isort/setup.cfg new file mode 100644 index 00000000000..c76db01ff4e --- /dev/null +++ b/docs/compatible_configs/isort/setup.cfg @@ -0,0 +1,2 @@ +[isort] +profile = black diff --git a/docs/compatible_configs/pylint/pylintrc b/docs/compatible_configs/pylint/pylintrc new file mode 100644 index 00000000000..e863488dfbc --- /dev/null +++ b/docs/compatible_configs/pylint/pylintrc @@ -0,0 +1,2 @@ +[format] +max-line-length = 88 diff --git a/docs/compatible_configs/pylint/pyproject.toml b/docs/compatible_configs/pylint/pyproject.toml new file mode 100644 index 00000000000..ef51f98a966 --- /dev/null +++ b/docs/compatible_configs/pylint/pyproject.toml @@ -0,0 +1,2 @@ +[tool.pylint.format] +max-line-length = "88" diff --git a/docs/compatible_configs/pylint/setup.cfg b/docs/compatible_configs/pylint/setup.cfg new file mode 100644 index 00000000000..0b754cdc0f0 --- /dev/null +++ b/docs/compatible_configs/pylint/setup.cfg @@ -0,0 +1,2 @@ +[pylint] +max-line-length = 88 diff --git a/docs/conf.py b/docs/conf.py index 7381c9d6423..7fc4f8f589e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,61 +12,14 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -from pathlib import Path -import re + +import os import string -from typing import Callable, Dict, List, Optional, Pattern, Tuple, Set -from dataclasses import dataclass -import logging +from pathlib import Path from pkg_resources import get_distribution -logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) - -LOG = logging.getLogger(__name__) - CURRENT_DIR = Path(__file__).parent -README = CURRENT_DIR / ".." / "README.md" -REFERENCE_DIR = CURRENT_DIR / "reference" -STATIC_DIR = CURRENT_DIR / "_static" - - -@dataclass -class SrcRange: - """Tracks which part of a file to get a section's content. - - Data: - start_line: The line where the section starts (i.e. its sub-header) (inclusive). - end_line: The line where the section ends (usually next sub-header) (exclusive). - """ - - start_line: int - end_line: int - - -@dataclass -class DocSection: - """Tracks information about a section of documentation. - - Data: - name: The section's name. This will used to detect duplicate sections. - src: The filepath to get its contents. - processors: The processors to run before writing the section to CURRENT_DIR. - out_filename: The filename to use when writing the section to CURRENT_DIR. - src_range: The line range of SRC to gets its contents. - """ - - name: str - src: Path - src_range: SrcRange = SrcRange(0, 1_000_000) - out_filename: str = "" - processors: Tuple[Callable, ...] = () - - def get_out_filename(self) -> str: - if not self.out_filename: - return self.name + ".md" - else: - return self.out_filename def make_pypi_svg(version: str) -> None: @@ -78,131 +31,14 @@ def make_pypi_svg(version: str) -> None: f.write(svg) -def make_filename(line: str) -> str: - non_letters: Pattern = re.compile(r"[^a-z]+") - filename: str = line[3:].rstrip().lower() - filename = non_letters.sub("_", filename) - if filename.startswith("_"): - filename = filename[1:] - if filename.endswith("_"): - filename = filename[:-1] - return filename + ".md" - - -def get_contents(section: DocSection) -> str: - """Gets the contents for the DocSection.""" - contents: List[str] = [] - src: Path = section.src - start_line: int = section.src_range.start_line - end_line: int = section.src_range.end_line - with open(src, "r", encoding="utf-8") as f: - for lineno, line in enumerate(f, start=1): - if lineno >= start_line and lineno < end_line: - contents.append(line) - result = "".join(contents) - # Let's make Prettier happy with the amount of trailing newlines in the sections. - if result.endswith("\n\n"): - result = result[:-1] - if not result.endswith("\n"): - result = result + "\n" - return result - - -def get_sections_from_readme() -> List[DocSection]: - """Gets the sections from README so they can be processed by process_sections. - - It opens README and goes down line by line looking for sub-header lines which - denotes a section. Once it finds a sub-header line, it will create a DocSection - object with all of the information currently available. Then on every line, it will - track the ending line index of the section. And it repeats this for every sub-header - line it finds. - """ - sections: List[DocSection] = [] - section: Optional[DocSection] = None - with open(README, "r", encoding="utf-8") as f: - for lineno, line in enumerate(f, start=1): - if line.startswith("## "): - filename = make_filename(line) - section_name = filename[:-3] - section = DocSection( - name=str(section_name), - src=README, - src_range=SrcRange(lineno, lineno), - out_filename=filename, - processors=(fix_headers,), - ) - sections.append(section) - if section is not None: - section.src_range.end_line += 1 - return sections - - -def fix_headers(contents: str) -> str: - """Fixes the headers of sections copied from README. - - Removes one octothorpe (#) from all headers since the contents are no longer nested - in a root document (i.e. the README). - """ - lines: List[str] = contents.splitlines() - fixed_contents: List[str] = [] - for line in lines: - if line.startswith("##"): - line = line[1:] - fixed_contents.append(line + "\n") # splitlines strips the leading newlines - return "".join(fixed_contents) - - -def process_sections( - custom_sections: List[DocSection], readme_sections: List[DocSection] -) -> None: - """Reads, processes, and writes sections to CURRENT_DIR. - - For each section, the contents will be fetched, processed by processors - required by the section, and written to CURRENT_DIR. If it encounters duplicate - sections (i.e. shares the same name attribute), it will skip processing the - duplicates. - - It processes custom sections before the README generated sections so sections in the - README can be overwritten with custom options. - """ - processed_sections: Dict[str, DocSection] = {} - modified_files: Set[Path] = set() - sections: List[DocSection] = custom_sections - sections.extend(readme_sections) - for section in sections: - if section.name in processed_sections: - LOG.warning( - f"Skipping '{section.name}' from '{section.src}' as it is a duplicate" - f" of a custom section from '{processed_sections[section.name].src}'" - ) - continue - - LOG.info(f"Processing '{section.name}' from '{section.src}'") - target_path: Path = CURRENT_DIR / section.get_out_filename() - if target_path in modified_files: - LOG.warning( - f"{target_path} has been already written to, its contents will be" - " OVERWRITTEN and notices will be duplicated" - ) - contents: str = get_contents(section) - - # processors goes here - if fix_headers in section.processors: - contents = fix_headers(contents) - - with open(target_path, "w", encoding="utf-8") as f: - if section.src.suffix == ".md" and section.src != target_path: - rel = section.src.resolve().relative_to(CURRENT_DIR.parent) - f.write(f'[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM {rel}"\n\n') - f.write(contents) - processed_sections[section.name] = section - modified_files.add(target_path) - +# Necessary so Click doesn't hit an encode error when called by +# sphinxcontrib-programoutput on Windows. +os.putenv("pythonioencoding", "utf-8") # -- Project information ----------------------------------------------------- project = "Black" -copyright = "2020, Łukasz Langa and contributors to Black" +copyright = "2018-Present, Łukasz Langa and contributors to Black" author = "Łukasz Langa and contributors to Black" # Autopopulate version @@ -213,39 +49,13 @@ def process_sections( for sp in "abcfr": version = version.split(sp)[0] -custom_sections = [ - DocSection("the_black_code_style", CURRENT_DIR / "the_black_code_style.md"), - DocSection("editor_integration", CURRENT_DIR / "editor_integration.md"), - DocSection("blackd", CURRENT_DIR / "blackd.md"), - DocSection("black_primer", CURRENT_DIR / "black_primer.md"), - DocSection("contributing_to_black", CURRENT_DIR / ".." / "CONTRIBUTING.md"), - DocSection("change_log", CURRENT_DIR / ".." / "CHANGES.md"), -] - -# Sphinx complains when there is a source file that isn't referenced in any of the docs. -# Since some sections autogenerated from the README are unused warnings will appear. -# -# Sections must be listed to what their name is when passed through make_filename(). -blocklisted_sections_from_readme = { - "license", - "pragmatism", - "testimonials", - "used_by", -} - make_pypi_svg(release) -readme_sections = get_sections_from_readme() -readme_sections = [ - x for x in readme_sections if x.name not in blocklisted_sections_from_readme -] - -process_sections(custom_sections, readme_sections) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "3.0" +needs_sphinx = "4.4" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -254,11 +64,13 @@ def process_sections( "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon", - "recommonmark", + "myst_parser", + "sphinxcontrib.programoutput", + "sphinx_copybutton", ] # If you need extensions of a certain version or higher, list them here. -needs_extensions = {"recommonmark": "0.5"} +needs_extensions = {"myst_parser": "0.13.7"} # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -275,7 +87,7 @@ def process_sections( # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -286,37 +98,29 @@ def process_sections( # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" +# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for +# headers up to and including level 3. +myst_heading_anchors = 3 + +# Prettier support formatting some MyST syntax but not all, so let's disable the +# unsupported yet still enabled by default ones. +myst_disable_syntax = [ + "colon_fence", + "myst_block_break", + "myst_line_comment", + "math_block", +] + +# Optional MyST Syntaxes +myst_enable_extensions = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "alabaster" - -html_sidebars = { - "**": [ - "about.html", - "navigation.html", - "relations.html", - "sourcelink.html", - "searchbox.html", - ] -} - -html_theme_options = { - "show_related": False, - "description": "“Any color you like.”", - "github_button": True, - "github_user": "psf", - "github_repo": "black", - "github_type": "star", - "show_powered_by": True, - "fixed_sidebar": True, - "logo": "logo2.png", - "travis_button": True, -} - +html_theme = "furo" +html_logo = "_static/logo2-readme.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -342,21 +146,6 @@ def process_sections( # -- Options for LaTeX output ------------------------------------------------ -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). diff --git a/docs/contributing/gauging_changes.md b/docs/contributing/gauging_changes.md new file mode 100644 index 00000000000..8562a83ed0c --- /dev/null +++ b/docs/contributing/gauging_changes.md @@ -0,0 +1,58 @@ +# Gauging changes + +A lot of the time, your change will affect formatting and/or performance. Quantifying +these changes is hard, so we have tooling to help make it easier. + +It's recommended you evaluate the quantifiable changes your _Black_ formatting +modification causes before submitting a PR. Think about if the change seems disruptive +enough to cause frustration to projects that are already "black formatted". + +## diff-shades + +diff-shades is a tool that runs _Black_ across a list of open-source projects recording +the results. The main highlight feature of diff-shades is being able to compare two +revisions of _Black_. This is incredibly useful as it allows us to see what exact +changes will occur, say merging a certain PR. + +For more information, please see the [diff-shades documentation][diff-shades]. + +### CI integration + +diff-shades is also the tool behind the "diff-shades results comparing ..." / +"diff-shades reports zero changes ..." comments on PRs. The project has a GitHub Actions +workflow that analyzes and compares two revisions of _Black_ according to these rules: + +| | Baseline revision | Target revision | +| --------------------- | ----------------------- | ---------------------------- | +| On PRs | latest commit on `main` | PR commit with `main` merged | +| On pushes (main only) | latest PyPI version | the pushed commit | + +For pushes to main, there's only one analysis job named `preview-changes` where the +preview style is used for all projects. + +For PRs they get one more analysis job: `assert-no-changes`. It's similar to +`preview-changes` but runs with the stable code style. It will fail if changes were +made. This makes sure code won't be reformatted again and again within the same year in +accordance to Black's stability policy. + +Additionally for PRs, a PR comment will be posted embedding a summary of the preview +changes and links to further information. If there's a pre-existing diff-shades comment, +it'll be updated instead the next time the workflow is triggered on the same PR. + +```{note} +The `preview-changes` job will only fail intentionally if while analyzing a file failed to +format. Otherwise a failure indicates a bug in the workflow. +``` + +The workflow uploads several artifacts upon completion: + +- The raw analyses (.json) +- HTML diffs (.html) +- `.pr-comment.json` (if triggered by a PR) + +The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be +downloaded locally. The HTML diffs come in handy for push-based where there's no PR to +post a comment. And the analyses exist just in case you want to do further analysis +using the collected data locally. + +[diff-shades]: https://github.com/ichard26/diff-shades#readme diff --git a/docs/contributing/index.md b/docs/contributing/index.md new file mode 100644 index 00000000000..f56e57c9e90 --- /dev/null +++ b/docs/contributing/index.md @@ -0,0 +1,49 @@ +# Contributing + +```{toctree} +--- +hidden: +--- + +the_basics +gauging_changes +issue_triage +release_process +reference/reference_summary +``` + +Welcome! Happy to see you willing to make the project better. Have you read the entire +[user documentation](https://black.readthedocs.io/en/latest/) yet? + +```{rubric} Bird's eye view + +``` + +In terms of inspiration, _Black_ is about as configurable as _gofmt_ (which is to say, +not very). This is deliberate. _Black_ aims to provide a consistent style and take away +opportunities for arguing about style. + +Bug reports and fixes are always welcome! Please follow the +[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. + +Before you suggest a new feature or configuration knob, ask yourself why you want it. If +it enables better integration with some workflow, fixes an inconsistency, speeds things +up, and so on - go for it! On the other hand, if your answer is "because I don't like a +particular formatting" then you're not ready to embrace _Black_ yet. Such changes are +unlikely to get accepted. You can still try but prepare to be disappointed. + +```{rubric} Contents + +``` + +This section covers the following topics: + +- {doc}`the_basics` +- {doc}`gauging_changes` +- {doc}`release_process` +- {doc}`reference/reference_summary` + +For an overview on contributing to the _Black_, please checkout {doc}`the_basics`. + +If you need a reference of the functions, classes, etc. available to you while +developing _Black_, there's the {doc}`reference/reference_summary` docs. diff --git a/docs/contributing/issue_triage.md b/docs/contributing/issue_triage.md new file mode 100644 index 00000000000..9b987fb2425 --- /dev/null +++ b/docs/contributing/issue_triage.md @@ -0,0 +1,169 @@ +# Issue triage + +Currently, _Black_ uses the issue tracker for bugs, feature requests, proposed design +modifications, and general user support. Each of these issues have to be triaged so they +can be eventually be resolved somehow. This document outlines the triaging process and +also the current guidelines and recommendations. + +```{tip} +If you're looking for a way to contribute without submitting patches, this might be +the area for you. Since _Black_ is a popular project, its issue tracker is quite busy +and always needs more attention than is available. While triage isn't the most +glamorous or technically challenging form of contribution, it's still important. +For example, we would love to know whether that old bug report is still reproducible! + +You can get easily started by reading over this document and then responding to issues. + +If you contribute enough and have stayed for a long enough time, you may even be +given Triage permissions! +``` + +## The basics + +_Black_ gets a whole bunch of different issues, they range from bug reports to user +support issues. To triage is to identify, organize, and kickstart the issue's journey +through its lifecycle to resolution. + +More specifically, to triage an issue means to: + +- identify what type and categories the issue falls under +- confirm bugs +- ask questions / for further information if necessary +- link related issues +- provide the first initial feedback / support + +Note that triage is typically the first response to an issue, so don't fret if the issue +doesn't make much progress after initial triage. The main goal of triaging to prepare +the issue for future more specific development or discussion, so _eventually_ it will be +resolved. + +The lifecycle of a bug report or user support issue typically goes something like this: + +1. _the issue is waiting for triage_ +2. **identified** - has been marked with a type label and other relevant labels, more + details or a functional reproduction may be still needed (and therefore should be + marked with `S: needs repro` or `S: awaiting reponse`) +3. **confirmed** - the issue can reproduced and necessary details have been provided +4. **discussion** - initial triage has been done and now the general details on how the + issue should be best resolved are being hashed out +5. **awaiting fix** - no further discussion on the issue is necessary and a resolving PR + is the next step +6. **closed** - the issue has been resolved, reasons include: + - the issue couldn't be reproduced + - the issue has been fixed + - duplicate of another pre-existing issue or is invalid + +For enhancement, documentation, and design issues, the lifecycle looks very similar but +the details are different: + +1. _the issue is waiting for triage_ +2. **identified** - has been marked with a type label and other relevant labels +3. **discussion** - the merits of the suggested changes are currently being discussed, a + PR would be acceptable but would be at sigificant risk of being rejected +4. **accepted & awaiting PR** - it's been determined the suggested changes are OK and a + PR would be welcomed (`S: accepted`) +5. **closed**: - the issue has been resolved, reasons include: + - the suggested changes were implemented + - it was rejected (due to technical concerns, ethos conflicts, etc.) + - duplicate of a pre-existing issue or is invalid + +**Note**: documentation issues don't use the `S: accepted` label currently since they're +less likely to be rejected. + +## Labelling + +We use labels to organize, track progress, and help effectively divvy up work. + +Our labels are divided up into several groups identified by their prefix: + +- **T - Type**: the general flavor of issue / PR +- **C - Category**: areas of concerns, ranges from bug types to project maintenance +- **F - Formatting Area**: like C but for formatting specifically +- **S - Status**: what stage of resolution is this issue currently in? +- **R - Resolution**: how / why was the issue / PR resolved? + +We also have a few standalone labels: + +- **`good first issue`**: issues that are beginner-friendly (and will show up in GitHub + banners for first-time visitors to the repository) +- **`help wanted`**: complex issues that need and are looking for a fair bit of work as + to progress (will also show up in various GitHub pages) +- **`skip news`**: for PRs that are trivial and don't need a CHANGELOG entry (and skips + the CHANGELOG entry check) + +```{note} +We do use labels for PRs, in particular the `skip news` label, but we aren't that +rigorous about it. Just follow your judgement on what labels make sense for the +specific PR (if any even make sense). +``` + +## Projects + +For more general and broad goals we use projects to track work. Some may be longterm +projects with no true end (e.g. the "Amazing documentation" project) while others may be +more focused and have a definite end (like the "Getting to beta" project). + +```{note} +To modify GitHub Projects you need the [Write repository permission level or higher](https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-permission-levels-for-an-organization#repository-access-for-each-permission-level). +``` + +## Closing issues + +Closing an issue signifies the issue has reached the end of its life, so closing issues +should be taken with care. The following is the general recommendation for each type of +issue. Note that these are only guidelines and if your judgement says something else +it's totally cool to go with it instead. + +For most issues, closing the issue manually or automatically after a resolving PR is +ideal. For bug reports specifically, if the bug has already been fixed, try to check in +with the issue opener that their specific case has been resolved before closing. Note +that we close issues as soon as they're fixed in the `main` branch. This doesn't +necessarily mean they've been released yet. + +Design and enhancement issues should be also closed when it's clear the proposed change +won't be implemented, whether that has been determined after a lot of discussion or just +simply goes against _Black_'s ethos. If such an issue turns heated, closing and locking +is acceptable if it's severe enough (although checking in with the core team is probably +a good idea). + +User support issues are best closed by the author or when it's clear the issue has been +resolved in some sort of manner. + +Duplicates and invalid issues should always be closed since they serve no purpose and +add noise to an already busy issue tracker. Although be careful to make sure it's truly +a duplicate and not just very similar before labelling and closing an issue as +duplicate. + +## Common reports + +Some issues are frequently opened, like issues about _Black_ formatted code causing E203 +messages. Even though these issues are probably heavily duplicated, they still require +triage sucking up valuable time from other things (although they usually skip most of +their lifecycle since they're closed on triage). + +Here's some of the most common issues and also pre-made responses you can use: + +### "The trailing comma isn't being removed by Black!" + +```text +Black used to remove the trailing comma if the expression fits in a single line, but this was changed by #826 and #1288. Now a trailing comma tells Black to always explode the expression. This change was made mostly for the cases where you _know_ a collection or whatever will grow in the future. Having it always exploded as one element per line reduces diff noise when adding elements. Before the "magic trailing comma" feature, you couldn't anticipate a collection's growth reliably since collections that fitted in one line were ruthlessly collapsed regardless of your intentions. One of Black's goals is reducing diff noise, so this was a good pragmatic change. + +So no, this is not a bug, but an intended feature. Anyway, [here's the documentation](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#the-magic-trailing-comma) on the "magic trailing comma", including the ability to skip this functionality with the `--skip-magic-trailing-comma` option. Hopefully that helps solve the possible confusion. +``` + +### "Black formatted code is violating Flake8's E203!" + +```text +Hi, + +This is expected behaviour, please see the documentation regarding this case (emphasis +mine): + +> PEP 8 recommends to treat : in slices as a binary operator with the lowest priority, and to leave an equal amount of space on either side, **except if a parameter is omitted (e.g. ham[1 + 1 :])**. It recommends no spaces around : operators for “simple expressions” (ham[lower:upper]), and **extra space for “complex expressions” (ham[lower : upper + offset])**. **Black treats anything more than variable names as “complex” (ham[lower : upper + 1]).** It also states that for extended slices, both : operators have to have the same amount of spacing, except if a parameter is omitted (ham[1 + 1 ::]). Black enforces these rules consistently. + +> This behaviour may raise E203 whitespace before ':' warnings in style guide enforcement tools like Flake8. **Since E203 is not PEP 8 compliant, you should tell Flake8 to ignore these warnings**. + +https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#slices + +Have a good day! +``` diff --git a/docs/reference/reference_classes.rst b/docs/contributing/reference/reference_classes.rst similarity index 89% rename from docs/reference/reference_classes.rst rename to docs/contributing/reference/reference_classes.rst index 8a2ded9dfec..fa765961e69 100644 --- a/docs/reference/reference_classes.rst +++ b/docs/contributing/reference/reference_classes.rst @@ -8,7 +8,7 @@ :class:`BracketTracker` ----------------------- -.. autoclass:: black.BracketTracker +.. autoclass:: black.brackets.BracketTracker :members: :class:`EmptyLineTracker` @@ -34,7 +34,7 @@ :class:`ProtoComment` --------------------- -.. autoclass:: black.ProtoComment +.. autoclass:: black.comments.ProtoComment :members: :class:`Report` @@ -47,7 +47,7 @@ :class:`Visitor` ---------------- -.. autoclass:: black.Visitor +.. autoclass:: black.nodes.Visitor :show-inheritance: :members: diff --git a/docs/reference/reference_exceptions.rst b/docs/contributing/reference/reference_exceptions.rst similarity index 80% rename from docs/reference/reference_exceptions.rst rename to docs/contributing/reference/reference_exceptions.rst index 517249fa4ce..aafe61e5017 100644 --- a/docs/reference/reference_exceptions.rst +++ b/docs/contributing/reference/reference_exceptions.rst @@ -5,7 +5,7 @@ .. currentmodule:: black -.. autoexception:: black.CannotSplit +.. autoexception:: black.linegen.CannotSplit .. autoexception:: black.NothingChanged diff --git a/docs/contributing/reference/reference_functions.rst b/docs/contributing/reference/reference_functions.rst new file mode 100644 index 00000000000..3bda5de1774 --- /dev/null +++ b/docs/contributing/reference/reference_functions.rst @@ -0,0 +1,180 @@ +*Black* functions +================= + +*Contents are subject to change.* + +.. currentmodule:: black + +Assertions and checks +--------------------- + +.. autofunction:: black.assert_equivalent + +.. autofunction:: black.assert_stable + +.. autofunction:: black.lines.can_be_split + +.. autofunction:: black.lines.can_omit_invisible_parens + +.. autofunction:: black.nodes.is_empty_tuple + +.. autofunction:: black.nodes.is_import + +.. autofunction:: black.lines.is_line_short_enough + +.. autofunction:: black.nodes.is_multiline_string + +.. autofunction:: black.nodes.is_one_tuple + +.. autofunction:: black.brackets.is_split_after_delimiter + +.. autofunction:: black.brackets.is_split_before_delimiter + +.. autofunction:: black.nodes.is_stub_body + +.. autofunction:: black.nodes.is_stub_suite + +.. autofunction:: black.nodes.is_vararg + +.. autofunction:: black.nodes.is_yield + + +Formatting +---------- + +.. autofunction:: black.format_file_contents + +.. autofunction:: black.format_file_in_place + +.. autofunction:: black.format_stdin_to_stdout + +.. autofunction:: black.format_str + +.. autofunction:: black.reformat_one + +.. autofunction:: black.concurrency.schedule_formatting + +File operations +--------------- + +.. autofunction:: black.dump_to_file + +.. autofunction:: black.find_project_root + +.. autofunction:: black.gen_python_files + +.. autofunction:: black.read_pyproject_toml + +Parsing +------- + +.. autofunction:: black.decode_bytes + +.. autofunction:: black.parsing.lib2to3_parse + +.. autofunction:: black.parsing.lib2to3_unparse + +Split functions +--------------- + +.. autofunction:: black.linegen.bracket_split_build_line + +.. autofunction:: black.linegen.bracket_split_succeeded_or_raise + +.. autofunction:: black.linegen.delimiter_split + +.. autofunction:: black.linegen.left_hand_split + +.. autofunction:: black.linegen.right_hand_split + +.. autofunction:: black.linegen.standalone_comment_split + +.. autofunction:: black.linegen.transform_line + +Caching +------- + +.. autofunction:: black.cache.filter_cached + +.. autofunction:: black.cache.get_cache_dir + +.. autofunction:: black.cache.get_cache_file + +.. autofunction:: black.cache.get_cache_info + +.. autofunction:: black.cache.read_cache + +.. autofunction:: black.cache.write_cache + +Utilities +--------- + +.. py:function:: black.debug.DebugVisitor.show(code: str) -> None + + Pretty-print the lib2to3 AST of a given string of `code`. + +.. autofunction:: black.concurrency.cancel + +.. autofunction:: black.nodes.child_towards + +.. autofunction:: black.nodes.container_of + +.. autofunction:: black.comments.convert_one_fmt_off_pair + +.. autofunction:: black.diff + +.. autofunction:: black.linegen.dont_increase_indentation + +.. autofunction:: black.numerics.format_float_or_int_string + +.. autofunction:: black.nodes.ensure_visible + +.. autofunction:: black.lines.enumerate_reversed + +.. autofunction:: black.comments.generate_comments + +.. autofunction:: black.comments.generate_ignored_nodes + +.. autofunction:: black.comments.is_fmt_on + +.. autofunction:: black.comments.children_contains_fmt_on + +.. autofunction:: black.nodes.first_leaf_of + +.. autofunction:: black.linegen.generate_trailers_to_omit + +.. autofunction:: black.get_future_imports + +.. autofunction:: black.comments.list_comments + +.. autofunction:: black.comments.make_comment + +.. autofunction:: black.linegen.maybe_make_parens_invisible_in_atom + +.. autofunction:: black.brackets.max_delimiter_priority_in_atom + +.. autofunction:: black.normalize_fmt_off + +.. autofunction:: black.numerics.normalize_numeric_literal + +.. autofunction:: black.linegen.normalize_prefix + +.. autofunction:: black.strings.normalize_string_prefix + +.. autofunction:: black.strings.normalize_string_quotes + +.. autofunction:: black.linegen.normalize_invisible_parens + +.. autofunction:: black.patch_click + +.. autofunction:: black.nodes.preceding_leaf + +.. autofunction:: black.re_compile_maybe_verbose + +.. autofunction:: black.linegen.should_split_line + +.. autofunction:: black.concurrency.shutdown + +.. autofunction:: black.strings.sub_twice + +.. autofunction:: black.nodes.whitespace diff --git a/docs/reference/reference_summary.rst b/docs/contributing/reference/reference_summary.rst similarity index 51% rename from docs/reference/reference_summary.rst rename to docs/contributing/reference/reference_summary.rst index 780a4b46ed8..f6ff4681557 100644 --- a/docs/reference/reference_summary.rst +++ b/docs/contributing/reference/reference_summary.rst @@ -1,6 +1,11 @@ Developer reference =================== +.. note:: + + The documentation here is quite outdated and has been neglected. Many objects worthy + of inclusion aren't documented. Contributions are appreciated! + *Contents are subject to change.* .. toctree:: diff --git a/docs/contributing/release_process.md b/docs/contributing/release_process.md new file mode 100644 index 00000000000..be9b08a6c82 --- /dev/null +++ b/docs/contributing/release_process.md @@ -0,0 +1,212 @@ +# Release process + +_Black_ has had a lot of work done into standardizing and automating its release +process. This document sets out to explain how everything works and how to release +_Black_ using said automation. + +## Release cadence + +**We aim to release whatever is on `main` every 1-2 months.** This ensures merged +improvements and bugfixes are shipped to users reasonably quickly, while not massively +fracturing the user-base with too many versions. This also keeps the workload on +maintainers consistent and predictable. + +If there's not much new on `main` to justify a release, it's acceptable to skip a +month's release. Ideally January releases should not be skipped because as per our +[stability policy](labels/stability-policy), the first release in a new calendar year +may make changes to the _stable_ style. While the policy applies to the first release +(instead of only January releases), confining changes to the stable style to January +will keep things predictable (and nicer) for users. + +Unless there is a serious regression or bug that requires immediate patching, **there +should not be more than one release per month**. While version numbers are cheap, +releases require a maintainer to both commit to do the actual cutting of a release, but +also to be able to deal with the potential fallout post-release. Releasing more +frequently than monthly nets rapidly diminishing returns. + +## Cutting a release + +**You must have `write` permissions for the _Black_ repository to cut a release.** + +The 10,000 foot view of the release process is that you prepare a release PR and then +publish a [GitHub Release]. This triggers [release automation](#release-workflows) that +builds all release artifacts and publishes them to the various platforms we publish to. + +To cut a release: + +1. Determine the release's version number + - **_Black_ follows the [CalVer] versioning standard using the `YY.M.N` format** + - So unless there already has been a release during this month, `N` should be `0` + - Example: the first release in January, 2022 → `22.1.0` +1. File a PR editing `CHANGES.md` and the docs to version the latest changes + 1. Replace the `## Unreleased` header with the version number + 1. Remove any empty sections for the current release + 1. (_optional_) Read through and copy-edit the changelog (eg. by moving entries, + fixing typos, or rephrasing entries) + 1. Add a new empty template for the next release above + ([template below](#changelog-template)) + 1. Update references to the latest version in + {doc}`/integrations/source_version_control` and + {doc}`/usage_and_configuration/the_basics` + - Example PR: [GH-3139] +1. Once the release PR is merged, wait until all CI passes + - If CI does not pass, **stop** and investigate the failure(s) as generally we'd want + to fix failing CI before cutting a release +1. [Draft a new GitHub Release][new-release] + 1. Click `Choose a tag` and type in the version number, then select the + `Create new tag: YY.M.N on publish` option that appears + 1. Verify that the new tag targets the `main` branch + 1. You can leave the release title blank, GitHub will default to the tag name + 1. Copy and paste the _raw changelog Markdown_ for the current release into the + description box +1. Publish the GitHub Release, triggering [release automation](#release-workflows) that + will handle the rest +1. At this point, you're basically done. It's good practice to go and [watch and verify + that all the release workflows pass][black-actions], although you will receive a + GitHub notification should something fail. + - If something fails, don't panic. Please go read the respective workflow's logs and + configuration file to reverse-engineer your way to a fix/solution. + +Congratulations! You've successfully cut a new release of _Black_. Go and stand up and +take a break, you deserve it. + +```{important} +Once the release artifacts reach PyPI, you may see new issues being filed indicating +regressions. While regressions are not great, they don't automatically mean a hotfix +release is warranted. Unless the regressions are serious and impact many users, a hotfix +release is probably unnecessary. + +In the end, use your best judgement and ask other maintainers for their thoughts. +``` + +### Changelog template + +Use the following template for a clean changelog after the release: + +``` +## Unreleased + +### Highlights + + + +### Stable style + + + +### Preview style + + + +### Configuration + + + +### Packaging + + + +### Parser + + + +### Performance + + + +### Output + + + +### _Blackd_ + + + +### Integrations + + + +### Documentation + + +``` + +## Release workflows + +All of _Black_'s release automation uses [GitHub Actions]. All workflows are therefore +configured using YAML files in the `.github/workflows` directory of the _Black_ +repository. + +They are triggered by the publication of a [GitHub Release]. + +Below are descriptions of our release workflows. + +### Publish to PyPI + +This is our main workflow. It builds an [sdist] and [wheels] to upload to PyPI where the +vast majority of users will download Black from. It's divided into three job groups: + +#### sdist + pure wheel + +This single job builds the sdist and pure Python wheel (i.e., a wheel that only contains +Python code) using [build] and then uploads them to PyPI using [twine]. These artifacts +are general-purpose and can be used on basically any platform supported by Python. + +#### mypyc wheels (…) + +We use [mypyc] to compile _Black_ into a CPython C extension for significantly improved +performance. Wheels built with mypyc are platform and Python version specific. +[Supported platforms are documented in the FAQ](labels/mypyc-support). + +These matrix jobs use [cibuildwheel] which handles the complicated task of building C +extensions for many environments for us. Since building these wheels is slow, there are +multiple mypyc wheels jobs (hence the term "matrix") that build for a specific platform +(as noted in the job name in parentheses). + +Like the previous job group, the built wheels are uploaded to PyPI using [twine]. + +#### Update stable branch + +So this job doesn't _really_ belong here, but updating the `stable` branch after the +other PyPI jobs pass (they must pass for this job to start) makes the most sense. This +saves us from remembering to update the branch sometime after cutting the release. + +- _Currently this workflow uses an API token associated with @ambv's PyPI account_ + +### Publish executables + +This workflow builds native executables for multiple platforms using [PyInstaller]. This +allows people to download the executable for their platform and run _Black_ without a +[Python runtime](https://wiki.python.org/moin/PythonImplementations) installed. + +The created binaries are stored on the associated GitHub Release for download over _IPv4 +only_ (GitHub still does not have IPv6 access 😢). + +### docker + +This workflow uses the QEMU powered `buildx` feature of Docker to upload an `arm64` and +`amd64`/`x86_64` build of the official _Black_ Docker image™. + +- _Currently this workflow uses an API Token associated with @cooperlees account_ + +```{note} +This also runs on each push to `main`. +``` + +[black-actions]: https://github.com/psf/black/actions +[build]: https://pypa-build.readthedocs.io/ +[calver]: https://calver.org +[cibuildwheel]: https://cibuildwheel.readthedocs.io/ +[gh-3139]: https://github.com/psf/black/pull/3139 +[github actions]: https://github.com/features/actions +[github release]: https://github.com/psf/black/releases +[new-release]: https://github.com/psf/black/releases/new +[mypyc]: https://mypyc.readthedocs.io/ +[mypyc-platform-support]: + /faq.html#what-is-compiled-yes-no-all-about-in-the-version-output +[pyinstaller]: https://www.pyinstaller.org/ +[sdist]: + https://packaging.python.org/en/latest/glossary/#term-Source-Distribution-or-sdist +[twine]: https://github.com/features/actions +[wheels]: https://packaging.python.org/en/latest/glossary/#term-Wheel diff --git a/docs/contributing/the_basics.md b/docs/contributing/the_basics.md new file mode 100644 index 00000000000..9325a9e44ed --- /dev/null +++ b/docs/contributing/the_basics.md @@ -0,0 +1,80 @@ +# The basics + +An overview on contributing to the _Black_ project. + +## Technicalities + +Development on the latest version of Python is preferred. As of this writing it's 3.9. +You can use any operating system. + +Install development dependencies inside a virtual environment of your choice, for +example: + +```console +$ python3 -m venv .venv +$ source .venv/bin/activate +(.venv)$ pip install -r test_requirements.txt +(.venv)$ pip install -e .[d] +(.venv)$ pre-commit install +``` + +Before submitting pull requests, run lints and tests with the following commands from +the root of the black repo: + +```console +# Linting +(.venv)$ pre-commit run -a + +# Unit tests +(.venv)$ tox -e py + +# Optional Fuzz testing +(.venv)$ tox -e fuzz +``` + +### News / Changelog Requirement + +`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If +you feel this PR does not require a changelog entry please state that in a comment and a +maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you +have a line in the following format: + +```md +- `Black` is now more awesome (#X) +``` + +Note that X should be your PR number, not issue number! To workout X, please use +[Next PR Number](https://ichard26.github.io/next-pr-number/?owner=psf&name=black). This +is not perfect but saves a lot of release overhead as now the releaser does not need to +go back and workout what to add to the `CHANGES.md` for each release. + +### Style Changes + +If a change would affect the advertised code style, please modify the documentation (The +_Black_ code style) to reflect that change. Patches that fix unintended bugs in +formatting don't need to be mentioned separately though. If the change is implemented +with the `--preview` flag, please include the change in the future style document +instead and write the changelog entry under a dedicated "Preview changes" heading. + +### Docs Testing + +If you make changes to docs, you can test they still build locally too. + +```console +(.venv)$ pip install -r docs/requirements.txt +(.venv)$ pip install [-e] .[d] +(.venv)$ sphinx-build -a -b html -W docs/ docs/_build/ +``` + +## Hygiene + +If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug, +run it again to confirm it's really fixed. + +If adding a new feature, add a test. In fact, always add a test. But wait, before adding +any large feature, first open an issue for us to discuss the idea first. + +## Finally + +Thanks again for your interest in improving the project! You're taking action when most +people decide to sit and watch. diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 00000000000..8b9ffb0202e --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,138 @@ +# Frequently Asked Questions + +The most common questions and issues users face are aggregated to this FAQ. + +```{contents} +:local: +:backlinks: none +:class: this-will-duplicate-information-and-it-is-still-useful-here +``` + +## Why spaces? I prefer tabs + +PEP 8 recommends spaces over tabs, and they are used by most of the Python community. +_Black_ provides no options to configure the indentation style, and requests for such +options will not be considered. + +However, we recognise that using tabs is an accessibility issue as well. While the +option will never be added to _Black_, visually impaired developers may find conversion +tools such as `expand/unexpand` (for Linux) useful when contributing to Python projects. +A workflow might consist of e.g. setting up appropriate pre-commit and post-merge git +hooks, and scripting `unexpand` to run after applying _Black_. + +## Does Black have an API? + +Not yet. _Black_ is fundamentally a command line tool. Many +[integrations](integrations/index.rst) are provided, but a Python interface is not one +of them. A simple API is being [planned](https://github.com/psf/black/issues/779) +though. + +## Is Black safe to use? + +Yes. _Black_ is strictly about formatting, nothing else. Black strives to ensure that +after formatting the AST is +[checked](the_black_code_style/current_style.md#ast-before-and-after-formatting) with +limited special cases where the code is allowed to differ. If issues are found, an error +is raised and the file is left untouched. Magical comments that influence linters and +other tools, such as `# noqa`, may be moved by _Black_. See below for more details. + +## How stable is Black's style? + +Stable. _Black_ aims to enforce one style and one style only, with some room for +pragmatism. See [The Black Code Style](the_black_code_style/index.rst) for more details. + +Starting in 2022, the formatting output will be stable for the releases made in the same +year (other than unintentional bugs). It is possible to opt-in to the latest formatting +styles, using the `--preview` flag. + +## Why is my file not formatted? + +Most likely because it is ignored in `.gitignore` or excluded with configuration. See +[file collection and discovery](usage_and_configuration/file_collection_and_discovery.md) +for details. + +## Why is my Jupyter Notebook cell not formatted? + +_Black_ is timid about formatting Jupyter Notebooks. Cells containing any of the +following will not be formatted: + +- automagics (e.g. `pip install black`) +- non-Python cell magics (e.g. `%%writeline`). These can be added with the flag + `--python-cell-magics`, e.g. `black --python-cell-magics writeline hello.ipynb`. +- multiline magics, e.g.: + + ```python + %timeit f(1, \ + 2, \ + 3) + ``` + +- code which `IPython`'s `TransformerManager` would transform magics into, e.g.: + + ```python + get_ipython().system('ls') + ``` + +- invalid syntax, as it can't be safely distinguished from automagics in the absence of + a running `IPython` kernel. + +## Why are Flake8's E203 and W503 violated? + +Because they go against PEP 8. E203 falsely triggers on list +[slices](the_black_code_style/current_style.md#slices), and adhering to W503 hinders +readability because operators are misaligned. Disable W503 and enable the +disabled-by-default counterpart W504. E203 should be disabled while changes are still +[discussed](https://github.com/PyCQA/pycodestyle/issues/373). + +## Which Python versions does Black support? + +Currently the runtime requires Python 3.7-3.11. Formatting is supported for files +containing syntax from Python 3.3 to 3.11. We promise to support at least all Python +versions that have not reached their end of life. This is the case for both running +_Black_ and formatting code. + +Support for formatting Python 2 code was removed in version 22.0. While we've made no +plans to stop supporting older Python 3 minor versions immediately, their support might +also be removed some time in the future without a deprecation period. + +Runtime support for 3.6 was removed in version 22.9.0. + +## Why does my linter or typechecker complain after I format my code? + +Some linters and other tools use magical comments (e.g., `# noqa`, `# type: ignore`) to +influence their behavior. While Black does its best to recognize such comments and leave +them in the right place, this detection is not and cannot be perfect. Therefore, you'll +sometimes have to manually move these comments to the right place after you format your +codebase with _Black_. + +## Can I run Black with PyPy? + +Yes, there is support for PyPy 3.7 and higher. + +## Why does Black not detect syntax errors in my code? + +_Black_ is an autoformatter, not a Python linter or interpreter. Detecting all syntax +errors is not a goal. It can format all code accepted by CPython (if you find an example +where that doesn't hold, please report a bug!), but it may also format some code that +CPython doesn't accept. + +(labels/mypyc-support)= + +## What is `compiled: yes/no` all about in the version output? + +While _Black_ is indeed a pure Python project, we use [mypyc] to compile _Black_ into a +C Python extension, usually doubling performance. These compiled wheels are available +for 64-bit versions of Windows, Linux (via the manylinux standard), and macOS across all +supported CPython versions. + +Platforms including musl-based and/or ARM Linux distributions, and ARM Windows are +currently **not** supported. These platforms will fall back to the slower pure Python +wheel available on PyPI. + +If you are experiencing exceptionally weird issues or even segfaults, you can try +passing `--no-binary black` to your pip install invocation. This flag excludes all +wheels (including the pure Python wheel), so this command will use the [sdist]. + +[mypyc]: https://mypyc.readthedocs.io/en/latest/ +[sdist]: + https://packaging.python.org/en/latest/glossary/#term-Source-Distribution-or-sdist diff --git a/docs/getting_started.md b/docs/getting_started.md new file mode 100644 index 00000000000..1825f3b5aa3 --- /dev/null +++ b/docs/getting_started.md @@ -0,0 +1,48 @@ +# Getting Started + +New to _Black_? Don't worry, you've found the perfect place to get started! + +## Do you like the _Black_ code style? + +Before using _Black_ on some of your code, it might be a good idea to first understand +how _Black_ will format your code. _Black_ isn't for everyone and you may find something +that is a dealbreaker for you personally, which is okay! The current _Black_ code style +[is described here](./the_black_code_style/current_style.md). + +## Try it out online + +Also, you can try out _Black_ online for minimal fuss on the +[Black Playground](https://black.vercel.app) generously created by José Padilla. + +## Installation + +_Black_ can be installed by running `pip install black`. It requires Python 3.7+ to run. +If you want to format Jupyter Notebooks, install with `pip install 'black[jupyter]'`. + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+https://github.com/psf/black` + +## Basic usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory}... +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory}... +``` + +## Next steps + +Took a look at [the _Black_ code style](./the_black_code_style/current_style.md) and +tried out _Black_? Fantastic, you're ready for more. Why not explore some more on using +_Black_ by reading +[Usage and Configuration: The basics](./usage_and_configuration/the_basics.md). +Alternatively, you can check out the +[Introducing _Black_ to your project](./guides/introducing_black_to_your_project.md) +guide. diff --git a/docs/guides/index.md b/docs/guides/index.md new file mode 100644 index 00000000000..127279b5e81 --- /dev/null +++ b/docs/guides/index.md @@ -0,0 +1,16 @@ +# Guides + +```{toctree} +--- +hidden: +--- + +introducing_black_to_your_project +using_black_with_other_tools +``` + +Wondering how to do something specific? You've found the right place! Listed below are +topic specific guides available: + +- {doc}`introducing_black_to_your_project` +- {doc}`using_black_with_other_tools` diff --git a/docs/guides/introducing_black_to_your_project.md b/docs/guides/introducing_black_to_your_project.md new file mode 100644 index 00000000000..9ae40a1928e --- /dev/null +++ b/docs/guides/introducing_black_to_your_project.md @@ -0,0 +1,52 @@ +# Introducing _Black_ to your project + +```{note} +This guide is incomplete. Contributions are welcomed and would be deeply +appreciated! +``` + +## Avoiding ruining git blame + +A long-standing argument against moving to automated code formatters like _Black_ is +that the migration will clutter up the output of `git blame`. This was a valid argument, +but since Git version 2.23, Git natively supports +[ignoring revisions in blame](https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt) +with the `--ignore-rev` option. You can also pass a file listing the revisions to ignore +using the `--ignore-revs-file` option. The changes made by the revision will be ignored +when assigning blame. Lines modified by an ignored revision will be blamed on the +previous revision that modified those lines. + +So when migrating your project's code style to _Black_, reformat everything and commit +the changes (preferably in one massive commit). Then put the full 40 characters commit +identifier(s) into a file. + +```text +# Migrate code style to Black +5b4ab991dede475d393e9d69ec388fd6bd949699 +``` + +Afterwards, you can pass that file to `git blame` and see clean and meaningful blame +information. + +```console +$ git blame important.py --ignore-revs-file .git-blame-ignore-revs +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 1) def very_important_function(text, file): +abdfd8b0 (Alice Doe 2019-09-23 11:39:32 -0400 2) text = text.lstrip() +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 3) with open(file, "r+") as f: +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 4) f.write(formatted) +``` + +You can even configure `git` to automatically ignore revisions listed in a file on every +call to `git blame`. + +```console +$ git config blame.ignoreRevsFile .git-blame-ignore-revs +``` + +**The one caveat is that some online Git-repositories like GitLab do not yet support +ignoring revisions using their native blame UI.** So blame information will be cluttered +with a reformatting commit on those platforms. (If you'd like this feature, there's an +open issue for [GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423)). This is +however supported by +[GitHub](https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view), +currently in beta. diff --git a/docs/compatible_configs.md b/docs/guides/using_black_with_other_tools.md similarity index 64% rename from docs/compatible_configs.md rename to docs/guides/using_black_with_other_tools.md index 25e959e3281..1d380bdaba7 100644 --- a/docs/compatible_configs.md +++ b/docs/guides/using_black_with_other_tools.md @@ -1,4 +1,6 @@ -# _Black_ compatible configurations +# Using _Black_ with other tools + +## Black compatible configurations All of Black's changes are harmless (or at least, they should be), but a few do conflict against other tools. It is not uncommon to be using other tools alongside _Black_ like @@ -10,13 +12,33 @@ tools out there. (e.g. `pyproject.toml`). The provided examples are to only configure their corresponding tools, using **their** supported file formats. -## isort +Compatible configuration files can be +[found here](https://github.com/psf/black/blob/main/docs/compatible_configs/). + +### isort [isort](https://pypi.org/p/isort/) helps to sort and format imports in your Python code. _Black_ also formats imports, but in a different way from isort's defaults which leads to conflicting changes. -### Configuration +#### Profile + +Since version 5.0.0, isort supports +[profiles](https://pycqa.github.io/isort/docs/configuration/profiles.html) to allow easy +interoperability with common code styles. You can set the black profile in any of the +[config files](https://pycqa.github.io/isort/docs/configuration/config_files.html) +supported by isort. Below, an example for `pyproject.toml`: + +```toml +[tool.isort] +profile = "black" +``` + +#### Custom Configuration + +If you're using an isort version that is older than 5.0.0 or you have some custom +configuration for _Black_, you can tweak your isort configuration to make it compatible +with _Black_. Below, an example for `.isort.cfg`: ``` multi_line_output = 3 @@ -27,12 +49,12 @@ ensure_newline_before_comments = True line_length = 88 ``` -### Why those options above? +#### Why those options above? _Black_ wraps imports that surpass `line-length` by moving identifiers into their own indented line. If that still doesn't fit the bill, it will put all of them in separate lines and put a trailing comma. A more detailed explanation of this behaviour can be -[found here](https://github.com/psf/black#how-black-wraps-lines). +[found here](../the_black_code_style/current_style.md#how-black-wraps-lines). isort's default mode of wrapping imports that extend past the `line_length` limit is "Grid". @@ -69,23 +91,15 @@ works the same as with _Black_. **Please note** `ensure_newline_before_comments = True` only works since isort >= 5 but does not break older versions so you can keep it if you are running previous versions. -If only isort >= 5 is used you can add `profile = black` instead of all the options -since [profiles](https://timothycrosley.github.io/isort/docs/configuration/profiles/) -are available and do the configuring for you. -### Formats +#### Formats

.isort.cfg -```cfg +```ini [settings] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 +profile = black ```
@@ -93,14 +107,9 @@ line_length = 88
setup.cfg -```cfg +```ini [isort] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 +profile = black ```
@@ -110,12 +119,7 @@ line_length = 88 ```toml [tool.isort] -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -use_parentheses = true -ensure_newline_before_comments = true -line_length = 88 +profile = 'black' ```
@@ -125,47 +129,43 @@ line_length = 88 ```ini [*.py] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 +profile = black ```
-## Flake8 +### Flake8 [Flake8](https://pypi.org/p/flake8/) is a code linter. It warns you of syntax errors, possible bugs, stylistic errors, etc. For the most part, Flake8 follows [PEP 8](https://www.python.org/dev/peps/pep-0008/) when warning about stylistic errors. There are a few deviations that cause incompatibilities with _Black_. -### Configuration +#### Configuration ``` max-line-length = 88 -extend-ignore = E203, W503 +extend-ignore = E203 ``` -### Why those options above? - -When breaking a line, _Black_ will break it before a binary operator. This is compliant -with PEP 8, but this behaviour will cause flake8 to raise -`W503 line break before binary operator` warnings. +#### Why those options above? In some cases, as determined by PEP 8, _Black_ will enforce an equal amount of whitespace around slice operators. Due to this, Flake8 will raise -`E203 whitespace before ':'` warnings. +`E203 whitespace before ':'` warnings. Since this warning is not PEP 8 compliant, Flake8 +should be configured to ignore it via `extend-ignore = E203`. -Since both of these warnings are not PEP 8 compliant, Flake8 should be configured to -ignore these warnings via `extend-ignore = E203, W503`. +When breaking a line, _Black_ will break it before a binary operator. This is compliant +with PEP 8 as of +[April 2016](https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b#diff-64ec08cc46db7540f18f2af46037f599). +There's a disabled-by-default warning in Flake8 which goes against this PEP 8 +recommendation called `W503 line break before binary operator`. It should not be enabled +in your configuration. Also, as like with isort, flake8 should be configured to allow lines up to the length limit of `88`, _Black_'s default. This explains `max-line-length = 88`. -### Formats +#### Formats
.flake8 @@ -173,7 +173,7 @@ limit of `88`, _Black_'s default. This explains `max-line-length = 88`. ```ini [flake8] max-line-length = 88 -extend-ignore = E203, W503 +extend-ignore = E203 ```
@@ -181,10 +181,10 @@ extend-ignore = E203, W503
setup.cfg -```cfg +```ini [flake8] max-line-length = 88 -extend-ignore = E203, W503 +extend-ignore = E203 ```
@@ -195,56 +195,38 @@ extend-ignore = E203, W503 ```ini [flake8] max-line-length = 88 -extend-ignore = E203, W503 +extend-ignore = E203 ```
-## Pylint +### Pylint [Pylint](https://pypi.org/p/pylint/) is also a code linter like Flake8. It has the same checks as flake8 and more. In particular, it has more formatting checks regarding style conventions like variable naming. With so many checks, Pylint is bound to have some mixed feelings about _Black_'s formatting style. -### Configuration +#### Configuration ``` -disable = C0330, C0326 max-line-length = 88 ``` -### Why those options above? - -When _Black_ is folding very long expressions, the closing brackets will -[be dedented](https://github.com/psf/black#how-black-wraps-lines). - -```py3 -ImportantClass.important_method( - exc, limit, lookup_lines, capture_locals, callback -) -``` - -Although, this style is PEP 8 compliant, Pylint will raise -`C0330: Wrong hanging indentation before block (add 4 spaces)` warnings. Since _Black_ -isn't configurable on this style, Pylint should be told to ignore these warnings via -`disable = C0330`. +#### Why those options above? -Also, since _Black_ deals with whitespace around operators and brackets, Pylint's -warning `C0326: Bad whitespace` should be disabled using `disable = C0326`. +Pylint should be configured to only complain about lines that surpass `88` characters +via `max-line-length = 88`. -And as usual, Pylint should be configured to only complain about lines that surpass `88` -characters via `max-line-length = 88`. +If using `pylint<2.6.0`, also disable `C0326` and `C0330` as these are incompatible with +_Black_ formatting and have since been removed. -### Formats +#### Formats
pylintrc ```ini -[MESSAGES CONTROL] -disable = C0330, C0326 - [format] max-line-length = 88 ``` @@ -257,9 +239,6 @@ max-line-length = 88 ```cfg [pylint] max-line-length = 88 - -[pylint.messages_control] -disable = C0330, C0326 ```
@@ -268,9 +247,6 @@ disable = C0330, C0326 pyproject.toml ```toml -[tool.pylint.messages_control] -disable = "C0330, C0326" - [tool.pylint.format] max-line-length = "88" ``` diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000000..9d0db465022 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,139 @@ + + +# The uncompromising code formatter + +> “Any color you like.” + +By using _Black_, you agree to cede control over minutiae of hand-formatting. In return, +_Black_ gives you speed, determinism, and freedom from `pycodestyle` nagging about +formatting. You will save time and mental energy for more important matters. + +_Black_ makes code review faster by producing the smallest diffs possible. Blackened +code looks the same regardless of the project you're reading. Formatting becomes +transparent after a while and you can focus on the content instead. + +Try it out now using the [Black Playground](https://black.vercel.app). + +```{admonition} Note - Black is now stable! +*Black* is [successfully used](https://github.com/psf/black#used-by) by +many projects, small and big. *Black* has a comprehensive test suite, with efficient +parallel tests, our own auto formatting and parallel Continuous Integration runner. +Now that we have become stable, you should not expect large formatting to changes in +the future. Stylistic changes will mostly be responses to bug reports and support for new Python +syntax. + +Also, as a safety measure which slows down processing, *Black* will check that the +reformatted code still produces a valid AST that is effectively equivalent to the +original (see the +[Pragmatism](./the_black_code_style/current_style.md#pragmatism) +section for details). If you're feeling confident, use `--fast`. +``` + +```{note} +{doc}`Black is licensed under the MIT license `. +``` + +## Testimonials + +**Mike Bayer**, author of [SQLAlchemy](https://www.sqlalchemy.org/): + +> _I can't think of any single tool in my entire programming career that has given me a +> bigger productivity increase by its introduction. I can now do refactorings in about +> 1% of the keystrokes that it would have taken me previously when we had no way for +> code to format itself._ + +**Dusty Phillips**, +[writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): + +> _Black is opinionated so you don't have to be._ + +**Hynek Schlawack**, creator of [attrs](https://www.attrs.org/), core developer of +Twisted and CPython: + +> _An auto-formatter that doesn't suck is all I want for Xmas!_ + +**Carl Meyer**, [Django](https://www.djangoproject.com/) core developer: + +> _At least the name is good._ + +**Kenneth Reitz**, creator of [requests](http://python-requests.org/) and +[pipenv](https://docs.pipenv.org/): + +> _This vastly improves the formatting of our code. Thanks a ton!_ + +## Show your style + +Use the badge in your project's README.md: + +```md +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +``` + +Using the badge in README.rst: + +```rst +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +``` + +Looks like this: + +```{image} https://img.shields.io/badge/code%20style-black-000000.svg +:target: https://github.com/psf/black +``` + +## Contents + +```{toctree} +--- +maxdepth: 3 +includehidden: +--- + +the_black_code_style/index +``` + +```{toctree} +--- +maxdepth: 3 +includehidden: +caption: User Guide +--- + +getting_started +usage_and_configuration/index +integrations/index +guides/index +faq +``` + +```{toctree} +--- +maxdepth: 2 +includehidden: +caption: Development +--- + +contributing/index +change_log +authors +``` + +```{toctree} +--- +hidden: +caption: Project Links +--- + +GitHub +PyPI +Chat +``` + +# Indices and tables + +- {ref}`genindex` +- {ref}`search` diff --git a/docs/integrations/editors.md b/docs/integrations/editors.md new file mode 100644 index 00000000000..28c9f48a09f --- /dev/null +++ b/docs/integrations/editors.md @@ -0,0 +1,317 @@ +# Editor integration + +## Emacs + +Options include the following: + +- [wbolster/emacs-python-black](https://github.com/wbolster/emacs-python-black) +- [proofit404/blacken](https://github.com/pythonic-emacs/blacken) +- [Elpy](https://github.com/jorgenschaefer/elpy). + +## PyCharm/IntelliJ IDEA + +1. Install _Black_ with the `d` extra. + + ```console + $ pip install 'black[d]' + ``` + +1. Install + [BlackConnect IntelliJ IDEs plugin](https://plugins.jetbrains.com/plugin/14321-blackconnect). + +1. Open plugin configuration in PyCharm/IntelliJ IDEA + + On macOS: + + `PyCharm -> Preferences -> Tools -> BlackConnect` + + On Windows / Linux / BSD: + + `File -> Settings -> Tools -> BlackConnect` + +1. In `Local Instance (shared between projects)` section: + + 1. Check `Start local blackd instance when plugin loads`. + 1. Press the `Detect` button near `Path` input. The plugin should detect the `blackd` + executable. + +1. In `Trigger Settings` section check `Trigger on code reformat` to enable code + reformatting with _Black_. + +1. Format the currently opened file by selecting `Code -> Reformat Code` or using a + shortcut. + +1. Optionally, to run _Black_ on every file save: + + - In `Trigger Settings` section of plugin configuration check + `Trigger when saving changed files`. + +## Wing IDE + +Wing IDE supports `black` via **Preference Settings** for system wide settings and +**Project Properties** for per-project or workspace specific settings, as explained in +the Wing documentation on +[Auto-Reformatting](https://wingware.com/doc/edit/auto-reformatting). The detailed +procedure is: + +### Prerequistes + +- Wing IDE version 8.0+ + +- Install `black`. + + ```console + $ pip install black + ``` + +- Make sure it runs from the command line, e.g. + + ```console + $ black --help + ``` + +### Preference Settings + +If you want Wing IDE to always reformat with `black` for every project, follow these +steps: + +1. In menubar navigate to `Edit -> Preferences -> Editor -> Reformatting`. + +1. Set **Auto-Reformat** from `disable` (default) to `Line after edit` or + `Whole files before save`. + +1. Set **Reformatter** from `PEP8` (default) to `Black`. + +### Project Properties + +If you want to just reformat for a specific project and not intervene with Wing IDE +global setting, follow these steps: + +1. In menubar navigate to `Project -> Project Properties -> Options`. + +1. Set **Auto-Reformat** from `Use Preferences setting` (default) to `Line after edit` + or `Whole files before save`. + +1. Set **Reformatter** from `Use Preferences setting` (default) to `Black`. + +## Vim + +### Official plugin + +Commands and shortcuts: + +- `:Black` to format the entire file (ranges not supported); + - you can optionally pass `target_version=` with the same values as in the + command line. +- `:BlackUpgrade` to upgrade _Black_ inside the virtualenv; +- `:BlackVersion` to get the current version of _Black_ inside the virtualenv. + +Configuration: + +- `g:black_fast` (defaults to `0`) +- `g:black_linelength` (defaults to `88`) +- `g:black_skip_string_normalization` (defaults to `0`) +- `g:black_virtualenv` (defaults to `~/.vim/black` or `~/.local/share/nvim/black`) +- `g:black_quiet` (defaults to `0`) +- `g:black_preview` (defaults to `0`) + +To install with [vim-plug](https://github.com/junegunn/vim-plug): + +``` +Plug 'psf/black', { 'branch': 'stable' } +``` + +or with [Vundle](https://github.com/VundleVim/Vundle.vim): + +``` +Plugin 'psf/black' +``` + +and execute the following in a terminal: + +```console +$ cd ~/.vim/bundle/black +$ git checkout origin/stable -b stable +``` + +or you can copy the plugin files from +[plugin/black.vim](https://github.com/psf/black/blob/stable/plugin/black.vim) and +[autoload/black.vim](https://github.com/psf/black/blob/stable/autoload/black.vim). + +``` +mkdir -p ~/.vim/pack/python/start/black/plugin +mkdir -p ~/.vim/pack/python/start/black/autoload +curl https://raw.githubusercontent.com/psf/black/stable/plugin/black.vim -o ~/.vim/pack/python/start/black/plugin/black.vim +curl https://raw.githubusercontent.com/psf/black/stable/autoload/black.vim -o ~/.vim/pack/python/start/black/autoload/black.vim +``` + +Let me know if this requires any changes to work with Vim 8's builtin `packadd`, or +Pathogen, and so on. + +This plugin **requires Vim 7.0+ built with Python 3.7+ support**. It needs Python 3.7 to +be able to run _Black_ inside the Vim process which is much faster than calling an +external command. + +On first run, the plugin creates its own virtualenv using the right Python version and +automatically installs _Black_. You can upgrade it later by calling `:BlackUpgrade` and +restarting Vim. + +If you need to do anything special to make your virtualenv work and install _Black_ (for +example you want to run a version from main), create a virtualenv manually and point +`g:black_virtualenv` to it. The plugin will use it. + +To run _Black_ on save, add the following lines to `.vimrc` or `init.vim`: + +``` +augroup black_on_save + autocmd! + autocmd BufWritePre *.py Black +augroup end +``` + +To run _Black_ on a key press (e.g. F9 below), add this: + +``` +nnoremap :Black +``` + +**How to get Vim with Python 3.6?** On Ubuntu 17.10 Vim comes with Python 3.6 by +default. On macOS with Homebrew run: `brew install vim`. When building Vim from source, +use: `./configure --enable-python3interp=yes`. There's many guides online how to do +this. + +**I get an import error when using _Black_ from a virtual environment**: If you get an +error message like this: + +```text +Traceback (most recent call last): + File "", line 63, in + File "/home/gui/.vim/black/lib/python3.7/site-packages/black.py", line 45, in + from typed_ast import ast3, ast27 + File "/home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/ast3.py", line 40, in + from typed_ast import _ast3 +ImportError: /home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/_ast3.cpython-37m-x86_64-linux-gnu.so: undefined symbool: PyExc_KeyboardInterrupt +``` + +Then you need to install `typed_ast` directly from the source code. The error happens +because `pip` will download [Python wheels](https://pythonwheels.com/) if they are +available. Python wheels are a new standard of distributing Python packages and packages +that have Cython and extensions written in C are already compiled, so the installation +is much more faster. The problem here is that somehow the Python environment inside Vim +does not match with those already compiled C extensions and these kind of errors are the +result. Luckily there is an easy fix: installing the packages from the source code. + +The package that causes problems is: + +- [typed-ast](https://pypi.org/project/typed-ast/) + +Now remove those two packages: + +```console +$ pip uninstall typed-ast -y +``` + +And now you can install them with: + +```console +$ pip install --no-binary :all: typed-ast +``` + +The C extensions will be compiled and now Vim's Python environment will match. Note that +you need to have the GCC compiler and the Python development files installed (on +Ubuntu/Debian do `sudo apt-get install build-essential python3-dev`). + +If you later want to update _Black_, you should do it like this: + +```console +$ pip install -U black --no-binary typed-ast +``` + +### With ALE + +1. Install [`ale`](https://github.com/dense-analysis/ale) + +1. Install `black` + +1. Add this to your vimrc: + + ```vim + let g:ale_fixers = {} + let g:ale_fixers.python = ['black'] + ``` + +## Gedit + +gedit is the default text editor of the GNOME, Unix like Operating Systems. Open gedit +as + +```console +$ gedit +``` + +1. `Go to edit > preferences > plugins` +1. Search for `external tools` and activate it. +1. In `Tools menu -> Manage external tools` +1. Add a new tool using `+` button. +1. Copy the below content to the code window. + +```console +#!/bin/bash +Name=$GEDIT_CURRENT_DOCUMENT_NAME +black $Name +``` + +- Set a keyboard shortcut if you like, Ex. `ctrl-B` +- Save: `Nothing` +- Input: `Nothing` +- Output: `Display in bottom pane` if you like. +- Change the name of the tool if you like. + +Use your keyboard shortcut or `Tools -> External Tools` to use your new tool. When you +close and reopen your File, _Black_ will be done with its job. + +## Visual Studio Code + +- Use the + [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) + ([instructions](https://code.visualstudio.com/docs/python/editing#_formatting)). + +- Alternatively the pre-release + [Black Formatter](https://marketplace.visualstudio.com/items?itemName=ms-python.black-formatter) + extension can be used which runs a [Language Server Protocol](https://langserver.org/) + server for Black. Formatting is much more responsive using this extension, **but the + minimum supported version of Black is 22.3.0**. + +## SublimeText 3 + +Use [sublack plugin](https://github.com/jgirardet/sublack). + +## Python LSP Server + +If your editor supports the [Language Server Protocol](https://langserver.org/) (Atom, +Sublime Text, Visual Studio Code and many more), you can use the +[Python LSP Server](https://github.com/python-lsp/python-lsp-server) with the +[python-lsp-black](https://github.com/python-lsp/python-lsp-black) plugin. + +## Atom/Nuclide + +Use [python-black](https://atom.io/packages/python-black) or +[formatters-python](https://atom.io/packages/formatters-python). + +## Gradle (the build tool) + +Use the [Spotless](https://github.com/diffplug/spotless/tree/main/plugin-gradle) plugin. + +## Kakoune + +Add the following hook to your kakrc, then run _Black_ with `:format`. + +``` +hook global WinSetOption filetype=python %{ + set-option window formatcmd 'black -q -' +} +``` + +## Thonny + +Use [Thonny-black-code-format](https://github.com/Franccisco/thonny-black-code-format). diff --git a/docs/integrations/github_actions.md b/docs/integrations/github_actions.md new file mode 100644 index 00000000000..12bcb21fee6 --- /dev/null +++ b/docs/integrations/github_actions.md @@ -0,0 +1,70 @@ +# GitHub Actions integration + +You can use _Black_ within a GitHub Actions workflow without setting your own Python +environment. Great for enforcing that your code matches the _Black_ code style. + +## Compatibility + +This action is known to support all GitHub-hosted runner OSes. In addition, only +published versions of _Black_ are supported (i.e. whatever is available on PyPI). + +Finally, this action installs _Black_ with the `colorama` extra so the `--color` flag +should work fine. + +## Usage + +Create a file named `.github/workflows/black.yml` inside your repository with: + +```yaml +name: Lint + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: psf/black@stable +``` + +We recommend the use of the `@stable` tag, but per version tags also exist if you prefer +that. Note that the action's version you select is independent of the version of _Black_ +the action will use. + +The version of _Black_ the action will use can be configured via `version`. This can be +any +[valid version specifier](https://packaging.python.org/en/latest/glossary/#term-Version-Specifier) +or just the version number if you want an exact version. The action defaults to the +latest release available on PyPI. Only versions available from PyPI are supported, so no +commit SHAs or branch names. + +If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter` +extra. Installing the extra and including Jupyter Notebook files can be configured via +`jupyter` (default is `false`). + +You can also configure the arguments passed to _Black_ via `options` (defaults to +`'--check --diff'`) and `src` (default is `'.'`) + +Here's an example configuration: + +```yaml +- uses: psf/black@stable + with: + options: "--check --verbose" + src: "./src" + jupyter: true + version: "21.5b1" +``` + +If you want to match versions covered by Black's +[stability policy](labels/stability-policy), you can use the compatible release operator +(`~=`): + +```yaml +- uses: psf/black@stable + with: + options: "--check --verbose" + src: "./src" + version: "~= 22.0" +``` diff --git a/docs/integrations/index.md b/docs/integrations/index.md new file mode 100644 index 00000000000..33135d08f1a --- /dev/null +++ b/docs/integrations/index.md @@ -0,0 +1,31 @@ +# Integrations + +```{toctree} +--- +hidden: +--- + +editors +github_actions +source_version_control +``` + +_Black_ can be integrated into many environments, providing a better and smoother +experience. Documentation for integrating _Black_ with a tool can be found for the +following areas: + +- {doc}`Editor / IDE <./editors>` +- {doc}`GitHub Actions <./github_actions>` +- {doc}`Source version control <./source_version_control>` + +Editors and tools not listed will require external contributions. + +Patches welcome! ✨ 🍰 ✨ + +Any tool can pipe code through _Black_ using its stdio mode (just +[use `-` as the file name](https://www.tldp.org/LDP/abs/html/special-chars.html#DASHREF2)). +The formatted code will be returned on stdout (unless `--check` was passed). _Black_ +will still emit messages on stderr but that shouldn't affect your use case. + +This can be used for example with PyCharm's or IntelliJ's +[File Watchers](https://www.jetbrains.com/help/pycharm/file-watchers.html). diff --git a/docs/integrations/source_version_control.md b/docs/integrations/source_version_control.md new file mode 100644 index 00000000000..31d0df27273 --- /dev/null +++ b/docs/integrations/source_version_control.md @@ -0,0 +1,34 @@ +# Version control integration + +Use [pre-commit](https://pre-commit.com/). Once you +[have it installed](https://pre-commit.com/#install), add this to the +`.pre-commit-config.yaml` in your repository: + +```yaml +repos: + - repo: https://github.com/psf/black + rev: 22.8.0 + hooks: + - id: black + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.9 +``` + +Feel free to switch out the `rev` value to something else, like another +[tag/version][black-tags] or even a specific commit. Although we discourage the use of +branches or other mutable refs since the hook [won't auto update as you may +expect][pre-commit-mutable-rev]. + +If you want support for Jupyter Notebooks as well, then replace `id: black` with +`id: black-jupyter`. + +```{note} +The `black-jupyter` hook is only available from version 21.8b0 and onwards. +``` + +[black-tags]: https://github.com/psf/black/tags +[pre-commit-mutable-rev]: + https://pre-commit.com/#using-the-latest-version-for-a-repository diff --git a/docs/license.md b/docs/license.md new file mode 100644 index 00000000000..132c95bfe2a --- /dev/null +++ b/docs/license.md @@ -0,0 +1,9 @@ +--- +orphan: true +--- + +# License + +```{include} ../LICENSE + +``` diff --git a/docs/requirements.txt b/docs/requirements.txt index 4cad9bc205b..3c4b43511f6 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,9 @@ -recommonmark==0.6.0 -Sphinx==3.2.1 -Pygments==2.6.1 \ No newline at end of file +# Used by ReadTheDocs; pinned requirements for stability. + +myst-parser==0.18.1 +Sphinx==5.2.3 +# Older versions break Sphinx even though they're declared to be supported. +docutils==0.19 +sphinxcontrib-programoutput==0.17 +sphinx_copybutton==0.5.0 +furo==2022.9.29 diff --git a/docs/the_black_code_style.md b/docs/the_black_code_style/current_style.md similarity index 70% rename from docs/the_black_code_style.md rename to docs/the_black_code_style/current_style.md index 09d58307a05..59d79c4cd0e 100644 --- a/docs/the_black_code_style.md +++ b/docs/the_black_code_style/current_style.md @@ -2,18 +2,28 @@ ## Code style -_Black_ reformats entire files in place. It is not configurable. It doesn't take -previous formatting into account. It doesn't reformat blocks that start with -`# fmt: off` and end with `# fmt: on`. `# fmt: on/off` have to be on the same level of -indentation. It also recognizes [YAPF](https://github.com/google/yapf)'s block comments -to the same effect, as a courtesy for straddling code. +_Black_ aims for consistency, generality, readability and reducing git diffs. Similar +language constructs are formatted with similar rules. Style configuration options are +deliberately limited and rarely added. Previous formatting is taken into account as +little as possible, with rare exceptions like the magic trailing comma. The coding style +used by _Black_ can be viewed as a strict subset of PEP 8. + +_Black_ reformats entire files in place. It doesn't reformat lines that end with +`# fmt: skip` or blocks that start with `# fmt: off` and end with `# fmt: on`. +`# fmt: on/off` must be on the same level of indentation and in the same block, meaning +no unindents beyond the initial indentation level between them. It also recognizes +[YAPF](https://github.com/google/yapf)'s block comments to the same effect, as a +courtesy for straddling code. + +The rest of this document describes the current formatting style. If you're interested +in trying out where the style is heading, see [future style](./future_style.md) and try +running `black --preview`. ### How _Black_ wraps lines _Black_ ignores previous formatting and applies uniform horizontal and vertical whitespace to your code. The rules for horizontal whitespace can be summarized as: do -whatever makes `pycodestyle` happy. The coding style used by _Black_ can be viewed as a -strict subset of PEP 8. +whatever makes `pycodestyle` happy. As for vertical whitespace, _Black_ tries to render one full expression or simple statement per line. If this fits the allotted line length, great. @@ -75,6 +85,21 @@ def very_important_function( ... ``` +If a data structure literal (tuple, list, set, dict) or a line of "from" imports cannot +fit in the allotted length, it's always split into one element per line. This minimizes +diffs as well as enables readers of code to find which commit introduced a particular +entry. This also makes _Black_ compatible with +[isort](../guides/using_black_with_other_tools.md#isort) with the ready-made `black` +profile or manual configuration. + +You might have noticed that closing brackets are always dedented and that a trailing +comma is always added. Such formatting produces smaller diffs; when you add or remove an +element, it's always just one line. Also, having the closing bracket dedented provides a +clear delimiter between two distinct sections of the code that otherwise share the same +indentation level (like the arguments list and the docstring in the example above). + +(labels/why-no-backslashes)= + _Black_ prefers parentheses over backslashes, and will remove backslashes if found. ```py3 @@ -115,62 +140,6 @@ If you're reaching for backslashes, that's a clear signal that you can do better slightly refactor your code. I hope some of the examples above show you that there are many ways in which you can do it. -However there is one exception: `with` statements using multiple context managers. -Python's grammar does not allow organizing parentheses around the series of context -managers. - -We don't want formatting like: - -```py3 -with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4: - ... # nothing to split on - line too long -``` - -So _Black_ will now format it like this: - -```py3 -with \ - make_context_manager(1) as cm1, \ - make_context_manager(2) as cm2, \ - make_context_manager(3) as cm3, \ - make_context_manager(4) as cm4 \ -: - ... # backslashes and an ugly stranded colon -``` - -You might have noticed that closing brackets are always dedented and that a trailing -comma is always added. Such formatting produces smaller diffs; when you add or remove an -element, it's always just one line. Also, having the closing bracket dedented provides a -clear delimiter between two distinct sections of the code that otherwise share the same -indentation level (like the arguments list and the docstring in the example above). - -If a data structure literal (tuple, list, set, dict) or a line of "from" imports cannot -fit in the allotted length, it's always split into one element per line. This minimizes -diffs as well as enables readers of code to find which commit introduced a particular -entry. This also makes _Black_ compatible with [isort](https://pypi.org/p/isort/) with -the following configuration. - -
-A compatible `.isort.cfg` - -```cfg -[settings] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 -``` - -The equivalent command line is: - -``` -$ isort --multi-line=3 --trailing-comma --force-grid-wrap=0 --use-parentheses --line-width=88 [ file.py ] -``` - -
- ### Line length You probably noticed the peculiar default line length. _Black_ defaults to 88 characters @@ -189,22 +158,22 @@ harder to work with line lengths exceeding 100 characters. It also adversely aff side-by-side diff review on typical screen resolutions. Long lines also make it harder to present code neatly in documentation or talk slides. -If you're using Flake8, you can bump `max-line-length` to 88 and forget about it. -Alternatively, use [Bugbear](https://github.com/PyCQA/flake8-bugbear)'s B950 warning -instead of E501 and keep the max line length at 80 which you are probably already using. -You'd do it like this: +If you're using Flake8, you can bump `max-line-length` to 88 and mostly forget about it. +However, it's better if you use [Bugbear](https://github.com/PyCQA/flake8-bugbear)'s +B950 warning instead of E501, and bump the max line length to 88 (or the `--line-length` +you used for black), which will align more with black's _"try to respect +`--line-length`, but don't become crazy if you can't"_. You'd do it like this: ```ini [flake8] -max-line-length = 80 +max-line-length = 88 ... select = C,E,F,W,B,B950 -ignore = E203, E501, W503 +extend-ignore = E203, E501 ``` -You'll find _Black_'s own .flake8 config file is configured like this. Explanation of -why W503 and E203 are disabled can be found further in this documentation. And if you're -curious about the reasoning behind B950, +Explanation of why E203 is disabled can be found further in this documentation. And if +you're curious about the reasoning behind B950, [Bugbear's documentation](https://github.com/PyCQA/flake8-bugbear#opinionated-warnings) explains it. The tl;dr is "it's like highway speed limits, we won't bother you if you overdo it by a few km/h". @@ -239,21 +208,21 @@ following field or method. This conforms to _Black_ won't insert empty lines after function docstrings unless that empty line is required due to an inner function starting immediately after. +### Comments + +_Black_ does not format comment contents, but it enforces two spaces between code and a +comment on the same line, and a space before the comment text begins. Some types of +comments that require specific spacing rules are respected: doc comments (`#: comment`), +section comments with long runs of hashes, and Spyder cells. Non-breaking spaces after +hashes are also preserved. Comments may sometimes be moved because of formatting +changes, which can break tools that assign special meaning to them. See +[AST before and after formatting](#ast-before-and-after-formatting) for more discussion. + ### Trailing commas _Black_ will add trailing commas to expressions that are split by comma where each element is on its own line. This includes function signatures. -Unnecessary trailing commas are removed if an expression fits in one line. This makes it -1% more likely that your line won't exceed the allotted line length limit. Moreover, in -this scenario, if you added another argument to your call, you'd probably fit it in the -same line anyway. That doesn't make diffs any larger. - -One exception to removing trailing commas is tuple expressions with just one element. In -this case _Black_ won't touch the single trailing comma as this would unexpectedly -change the underlying data type. Note that this is also the case when commas are used -while indexing. This is a tuple in disguise: `numpy_array[3, ]`. - One exception to adding trailing commas is function signatures containing `*`, `*args`, or `**kwargs`. In this case a trailing comma is only safe to use on Python 3.6. _Black_ will detect if your file is already 3.6+ only and use trailing commas in this situation. @@ -262,16 +231,20 @@ in function signatures that have stars in them. In other words, if you'd like a comma in this situation and _Black_ didn't recognize it was safe to do so, put it there manually and _Black_ will keep it. +A pre-existing trailing comma informs _Black_ to always explode contents of the current +bracket pair into one item per line. Read more about this in the +[Pragmatism](#pragmatism) section below. + ### Strings _Black_ prefers double quotes (`"` and `"""`) over single quotes (`'` and `'''`). It will replace the latter with the former as long as it does not result in more backslash escapes than before. -_Black_ also standardizes string prefixes, making them always lowercase. On top of that, -if your code is already Python 3.6+ only or it's using the `unicode_literals` future -import, _Black_ will remove `u` from the string prefix as it is meaningless in those -scenarios. +_Black_ also standardizes string prefixes. Prefix characters are made lowercase with the +exception of [capital "R" prefixes](#rstrings-and-rstrings), unicode literal markers +(`u`) are removed because they are meaningless in Python 3, and in the case of multiple +characters "r" is put first as in spoken language: "raw f-string". The main reason to standardize on a single form of quotes is aesthetics. Having one kind of quotes everywhere reduces reader distraction. It will also enable a future version of @@ -295,12 +268,18 @@ If you are adopting _Black_ in a large project with pre-existing string conventi you can pass `--skip-string-normalization` on the command line. This is meant as an adoption helper, avoid using this for new projects. +_Black_ also processes docstrings. Firstly the indentation of docstrings is corrected +for both quotations and the text within, although relative indentation in the text is +preserved. Superfluous trailing whitespace on each line and unnecessary new lines at the +end of the docstring are removed. All leading tabs are converted to spaces, but tabs +inside text are preserved. Whitespace leading and trailing one-line docstrings is +removed. + ### Numeric literals _Black_ standardizes most numeric literals to use lowercase letters for the syntactic parts and uppercase letters for the digits themselves: `0xAB` instead of `0XAB` and -`1e10` instead of `1E10`. Python 2 long literals are styled as `2L` instead of `2l` to -avoid confusion between `l` and `1`. +`1e10` instead of `1E10`. ### Line breaks & binary operators @@ -309,9 +288,25 @@ multiple lines. This is so that _Black_ is compliant with the recent changes in [PEP 8](https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator) style guide, which emphasizes that this approach improves readability. -This behaviour may raise `W503 line break before binary operator` warnings in style -guide enforcement tools like Flake8. Since `W503` is not PEP 8 compliant, you should -tell Flake8 to ignore these warnings. +Almost all operators will be surrounded by single spaces, the only exceptions are unary +operators (`+`, `-`, and `~`), and power operators when both operands are simple. For +powers, an operand is considered simple if it's only a NAME, numeric CONSTANT, or +attribute access (chained attribute access is allowed), with or without a preceding +unary operator. + +```python +# For example, these won't be surrounded by whitespace +a = x**y +b = config.base**5.2 +c = config.base**runtime.config.exponent +d = 2**5 +e = 2**~5 + +# ... but these will be surrounded by whitespace +f = 2 ** get_exponent() +g = get_x() ** get_y() +h = config['base'] ** 2 +``` ### Slices @@ -349,7 +344,7 @@ pair of parentheses to form an atom. There are a few interesting cases: In those cases, parentheses are removed when the entire statement fits in one line, or if the inner expression doesn't have any delimiters to further split on. If there is only a single delimiter and the expression starts or ends with a bracket, the -parenthesis can also be successfully omitted since the existing bracket pair will +parentheses can also be successfully omitted since the existing bracket pair will organize the expression neatly anyway. Otherwise, the parentheses are added. Please note that _Black_ does not add or remove any additional nested parentheses that @@ -405,16 +400,16 @@ recommended code style for those files is more terse than PEP 8: _Black_ enforces the above rules. There are additional guidelines for formatting `.pyi` file that are not enforced yet but might be in a future version of the formatter: -- all function bodies should be empty (contain `...` instead of the body); -- do not use docstrings; - prefer `...` over `pass`; -- for arguments with a default, use `...` instead of the actual default; - avoid using string literals in type annotations, stub files support forward references natively (like Python 3.7 code with `from __future__ import annotations`); - use variable annotations instead of type comments, even for stubs that target older - versions of Python; -- for arguments that default to `None`, use `Optional[]` explicitly; -- use `float` instead of `Union[int, float]`. + versions of Python. + +### Line endings + +_Black_ will normalize line endings (`\n` or `\r\n`) based on the first line ending of +the file. ## Pragmatism @@ -448,6 +443,9 @@ into one item per line. How do you make it stop? Just delete that trailing comma and _Black_ will collapse your collection into one line if it fits. +If you must, you can recover the behaviour of early versions of _Black_ with the option +`--skip-magic-trailing-comma` / `-C`. + ### r"strings" and R"strings" _Black_ normalizes string quotes as well as string prefixes, making them lowercase. One @@ -456,3 +454,32 @@ exception to this rule is r-strings. It turns out that the very popular default by (among others) GitHub and Visual Studio Code, differentiates between r-strings and R-strings. The former are syntax highlighted as regular expressions while the latter are treated as true raw strings with no special semantics. + +### AST before and after formatting + +When run with `--safe`, _Black_ checks that the code before and after is semantically +equivalent. This check is done by comparing the AST of the source with the AST of the +target. There are three limited cases in which the AST does differ: + +1. _Black_ cleans up leading and trailing whitespace of docstrings, re-indenting them if + needed. It's been one of the most popular user-reported features for the formatter to + fix whitespace issues with docstrings. While the result is technically an AST + difference, due to the various possibilities of forming docstrings, all realtime use + of docstrings that we're aware of sanitizes indentation and leading/trailing + whitespace anyway. + +1. _Black_ manages optional parentheses for some statements. In the case of the `del` + statement, presence of wrapping parentheses or lack of thereof changes the resulting + AST but is semantically equivalent in the interpreter. + +1. _Black_ might move comments around, which includes type comments. Those are part of + the AST as of Python 3.8. While the tool implements a number of special cases for + those comments, there is no guarantee they will remain where they were in the source. + Note that this doesn't change runtime behavior of the source code. + +To put things in perspective, the code equivalence check is a feature of _Black_ which +other formatters don't implement at all. It is of crucial importance to us to ensure +code behaves the way it did before it got reformatted. We treat this as a feature and +there are no plans to relax this in the future. The exceptions enumerated above stem +from either user feedback or implementation details of the tool. In each case we made +due diligence to ensure that the AST divergence is of no practical consequence. diff --git a/docs/the_black_code_style/future_style.md b/docs/the_black_code_style/future_style.md new file mode 100644 index 00000000000..a028a2888ed --- /dev/null +++ b/docs/the_black_code_style/future_style.md @@ -0,0 +1,133 @@ +# The (future of the) Black code style + +```{warning} +Changes to this document often aren't tied and don't relate to releases of +_Black_. It's recommended that you read the latest version available. +``` + +## Using backslashes for with statements + +[Backslashes are bad and should be never be used](labels/why-no-backslashes) however +there is one exception: `with` statements using multiple context managers. Before Python +3.9 Python's grammar does not allow organizing parentheses around the series of context +managers. + +We don't want formatting like: + +```py3 +with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4: + ... # nothing to split on - line too long +``` + +So _Black_ will eventually format it like this: + +```py3 +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + ... # backslashes and an ugly stranded colon +``` + +Although when the target version is Python 3.9 or higher, _Black_ will use parentheses +instead since they're allowed in Python 3.9 and higher. + +An alternative to consider if the backslashes in the above formatting are undesirable is +to use {external:py:obj}`contextlib.ExitStack` to combine context managers in the +following way: + +```python +with contextlib.ExitStack() as exit_stack: + cm1 = exit_stack.enter_context(make_context_manager1()) + cm2 = exit_stack.enter_context(make_context_manager2()) + cm3 = exit_stack.enter_context(make_context_manager3()) + cm4 = exit_stack.enter_context(make_context_manager4()) + ... +``` + +## Preview style + +Experimental, potentially disruptive style changes are gathered under the `--preview` +CLI flag. At the end of each year, these changes may be adopted into the default style, +as described in [The Black Code Style](./index.rst). Because the functionality is +experimental, feedback and issue reports are highly encouraged! + +### Improved string processing + +_Black_ will split long string literals and merge short ones. Parentheses are used where +appropriate. When split, parts of f-strings that don't need formatting are converted to +plain strings. User-made splits are respected when they do not exceed the line length +limit. Line continuation backslashes are converted into parenthesized strings. +Unnecessary parentheses are stripped. The stability and status of this feature is +tracked in [this issue](https://github.com/psf/black/issues/2188). + +### Removing newlines in the beginning of code blocks + +_Black_ will remove newlines in the beginning of new code blocks, i.e. when the +indentation level is increased. For example: + +```python +def my_func(): + + print("The line above me will be deleted!") +``` + +will be changed to: + +```python +def my_func(): + print("The line above me will be deleted!") +``` + +This new feature will be applied to **all code blocks**: `def`, `class`, `if`, `for`, +`while`, `with`, `case` and `match`. + +### Improved parentheses management + +_Black_ will format parentheses around return annotations similarly to other sets of +parentheses. For example: + +```python +def foo() -> (int): + ... + +def foo() -> looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong: + ... +``` + +will be changed to: + +```python +def foo() -> int: + ... + + +def foo() -> ( + looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong +): + ... +``` + +And, extra parentheses in `await` expressions and `with` statements are removed. For +example: + +```python +with ((open("bla.txt")) as f, open("x")): + ... + +async def main(): + await (asyncio.sleep(1)) +``` + +will be changed to: + +```python +with open("bla.txt") as f, open("x"): + ... + + +async def main(): + await asyncio.sleep(1) +``` diff --git a/docs/the_black_code_style/index.md b/docs/the_black_code_style/index.md new file mode 100644 index 00000000000..e5967be2db4 --- /dev/null +++ b/docs/the_black_code_style/index.md @@ -0,0 +1,52 @@ +# The Black Code Style + +```{toctree} +--- +hidden: +--- + +Current style +Future style +``` + +_Black_ is a PEP 8 compliant opinionated formatter with its own style. + +While keeping the style unchanged throughout releases has always been a goal, the +_Black_ code style isn't set in stone. It evolves to accommodate for new features in the +Python language and, occasionally, in response to user feedback. Large-scale style +preferences presented in {doc}`current_style` are very unlikely to change, but minor +style aspects and details might change according to the stability policy presented +below. Ongoing style considerations are tracked on GitHub with the +[design](https://github.com/psf/black/labels/T%3A%20design) issue label. + +(labels/stability-policy)= + +## Stability Policy + +The following policy applies for the _Black_ code style, in non pre-release versions of +_Black_: + +- If code has been formatted with _Black_, it will remain unchanged when formatted with + the same options using any other release in the same calendar year. + + This means projects can safely use `black ~= 22.0` without worrying about formatting + changes disrupting their project in 2022. We may still fix bugs where _Black_ crashes + on some code, and make other improvements that do not affect formatting. + + In rare cases, we may make changes affecting code that has not been previously + formatted with _Black_. For example, we have had bugs where we accidentally removed + some comments. Such bugs can be fixed without breaking the stability policy. + +- The first release in a new calendar year _may_ contain formatting changes, although + these will be minimised as much as possible. This is to allow for improved formatting + enabled by newer Python language syntax as well as due to improvements in the + formatting logic. + +- The `--preview` flag is exempt from this policy. There are no guarantees around the + stability of the output with that flag passed into _Black_. This flag is intended for + allowing experimentation with the proposed changes to the _Black_ code style. + +Documentation for both the current and future styles can be found: + +- {doc}`current_style` +- {doc}`future_style` diff --git a/docs/blackd.md b/docs/usage_and_configuration/black_as_a_server.md similarity index 81% rename from docs/blackd.md rename to docs/usage_and_configuration/black_as_a_server.md index c341308e1e4..a2d4252109a 100644 --- a/docs/blackd.md +++ b/docs/usage_and_configuration/black_as_a_server.md @@ -1,13 +1,18 @@ -## blackd +# Black as a server (blackd) `blackd` is a small HTTP server that exposes _Black_'s functionality over a simple protocol. The main benefit of using it is to avoid the cost of starting up a new _Black_ process every time you want to blacken a file. -### Usage +```{warning} +`blackd` should not be run as a publicly accessible server as there are no security +precautions in place to prevent abuse. **It is intended for local use only**. +``` + +## Usage `blackd` is not packaged alongside _Black_ by default because it has additional -dependencies. You will need to execute `pip install black[d]` to install it. +dependencies. You will need to execute `pip install 'black[d]'` to install it. You can start the server on the default port, binding only to the local interface by running `blackd`. You will see a single line mentioning the server's version, and the @@ -18,14 +23,8 @@ formatting requests. `blackd` provides even less options than _Black_. You can see them by running `blackd --help`: -```text -Usage: blackd [OPTIONS] +```{program-output} blackd --help -Options: - --bind-host TEXT Address to bind the server to. - --bind-port INTEGER Port to listen on - --version Show the version and exit. - -h, --help Show this message and exit. ``` There is no official `blackd` client tool (yet!). You can test that blackd is working @@ -36,7 +35,7 @@ blackd --bind-port 9090 & # or let blackd choose a port curl -s -XPOST "localhost:9090" -d "print('valid')" ``` -### Protocol +## Protocol `blackd` only accepts `POST` requests at the `/` path. The body of the request should contain the python source code to be formatted, encoded according to the `charset` field @@ -54,6 +53,12 @@ The headers controlling how source code is formatted are: - `X-Skip-String-Normalization`: corresponds to the `--skip-string-normalization` command line flag. If present and its value is not the empty string, no string normalization will be performed. +- `X-Skip-Magic-Trailing-Comma`: corresponds to the `--skip-magic-trailing-comma` + command line flag. If present and its value is not an empty string, trailing commas + will not be used as a reason to split lines. +- `X-Preview`: corresponds to the `--preview` command line flag. If present and its + value is not an empty string, experimental and potentially disruptive style changes + will be used. - `X-Fast-Or-Safe`: if set to `fast`, `blackd` will act as _Black_ does when passed the `--fast` command line flag. - `X-Python-Variant`: if set to `pyi`, `blackd` will act as _Black_ does when passed the diff --git a/docs/usage_and_configuration/black_docker_image.md b/docs/usage_and_configuration/black_docker_image.md new file mode 100644 index 00000000000..8de566ea270 --- /dev/null +++ b/docs/usage_and_configuration/black_docker_image.md @@ -0,0 +1,46 @@ +# Black Docker image + +Official _Black_ Docker images are available on +[Docker Hub](https://hub.docker.com/r/pyfound/black). + +_Black_ images with the following tags are available: + +- release numbers, e.g. `21.5b2`, `21.6b0`, `21.7b0` etc.\ + ℹ Recommended for users who want to use a particular version of _Black_. +- `latest_release` - tag created when a new version of _Black_ is released.\ + ℹ Recommended for users who want to use released versions of _Black_. It maps to [the latest release](https://github.com/psf/black/releases/latest) + of _Black_. +- `latest` - tag used for the newest image of _Black_.\ + ℹ Recommended for users who always want to use the latest version of _Black_, even before + it is released. + +There is one more tag used for _Black_ Docker images - `latest_non_release`. It is +created for all unreleased +[commits on the `main` branch](https://github.com/psf/black/commits/main). This tag is +not meant to be used by external users. + +## Usage + +A permanent container doesn't have to be created to use _Black_ as a Docker image. It's +enough to run _Black_ commands for the chosen image denoted as `:tag`. In the below +examples, the `latest_release` tag is used. If `:tag` is omitted, the `latest` tag will +be used. + +More about _Black_ usage can be found in +[Usage and Configuration: The basics](./the_basics.md). + +### Check Black version + +```console +$ docker run --rm pyfound/black:latest_release black --version +``` + +### Check code + +```console +$ docker run --rm --volume $(pwd):/src --workdir /src pyfound/black:latest_release black --check . +``` + +_Remark_: besides [regular _Black_ exit codes](./the_basics.md) returned by `--check` +option, [Docker exit codes](https://docs.docker.com/engine/reference/run/#exit-status) +should also be considered. diff --git a/docs/usage_and_configuration/file_collection_and_discovery.md b/docs/usage_and_configuration/file_collection_and_discovery.md new file mode 100644 index 00000000000..de1d5e6c11e --- /dev/null +++ b/docs/usage_and_configuration/file_collection_and_discovery.md @@ -0,0 +1,38 @@ +# File collection and discovery + +You can directly pass _Black_ files, but you can also pass directories and _Black_ will +walk them, collecting files to format. It determines what files to format or skip +automatically using the inclusion and exclusion regexes and as well their modification +time. + +## Ignoring unmodified files + +_Black_ remembers files it has already formatted, unless the `--diff` flag is used or +code is passed via standard input. This information is stored per-user. The exact +location of the file depends on the _Black_ version and the system on which _Black_ is +run. The file is non-portable. The standard location on common operating systems is: + +- Windows: + `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` +- macOS: + `/Users//Library/Caches/black//cache...pickle` +- Linux: + `/home//.cache/black//cache...pickle` + +`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, +as .pyi, and whether string normalization was omitted. + +To override the location of these files on all systems, set the environment variable +`BLACK_CACHE_DIR` to the preferred location. Alternatively on macOS and Linux, set +`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache +in the directory you're running _Black_ from, set `BLACK_CACHE_DIR=.cache/black`. +_Black_ will then write the above files to `.cache/black`. Note that `BLACK_CACHE_DIR` +will take precedence over `XDG_CACHE_HOME` if both are set. + +## .gitignore + +If `--exclude` is not set, _Black_ will automatically ignore files and directories in +`.gitignore` file(s), if present. + +If you want _Black_ to continue using `.gitignore` while also configuring the exclusion +rules, please use `--extend-exclude`. diff --git a/docs/usage_and_configuration/index.md b/docs/usage_and_configuration/index.md new file mode 100644 index 00000000000..1c86a49b686 --- /dev/null +++ b/docs/usage_and_configuration/index.md @@ -0,0 +1,28 @@ +# Usage and Configuration + +```{toctree} +--- +hidden: +--- + +the_basics +file_collection_and_discovery +black_as_a_server +black_docker_image +``` + +Sometimes, running _Black_ with its defaults and passing filepaths to it just won't cut +it. Passing each file using paths will become burdensome, and maybe you would like +_Black_ to not touch your files and just output diffs. And yes, you _can_ tweak certain +parts of _Black_'s style, but please know that configurability in this area is +purposefully limited. + +Using many of these more advanced features of _Black_ will require some configuration. +Configuration that will either live on the command line or in a TOML configuration file. + +This section covers features of _Black_ and configuring _Black_ in detail: + +- {doc}`The basics <./the_basics>` +- {doc}`File collection and discovery ` +- {doc}`Black as a server (blackd) <./black_as_a_server>` +- {doc}`Black Docker image <./black_docker_image>` diff --git a/docs/usage_and_configuration/the_basics.md b/docs/usage_and_configuration/the_basics.md new file mode 100644 index 00000000000..aa176c4ba3f --- /dev/null +++ b/docs/usage_and_configuration/the_basics.md @@ -0,0 +1,297 @@ +# The basics + +Foundational knowledge on using and configuring Black. + +_Black_ is a well-behaved Unix-style command-line tool: + +- it does nothing if it finds no sources to format; +- it will read from standard input and write to standard output if `-` is used as the + filename; +- it only outputs messages to users on standard error; +- exits with code 0 unless an internal error occurred or a CLI option prompted it. + +## Usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory} +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory} +``` + +### Command line options + +The CLI options of _Black_ can be displayed by expanding the view below or by running +`black --help`. While _Black_ has quite a few knobs these days, it is still opinionated +so style options are deliberately limited and rarely added. + +
+ +CLI reference + +```{program-output} black --help + +``` + +
+ +### Code input alternatives + +#### Standard Input + +_Black_ supports formatting code via stdin, with the result being printed to stdout. +Just let _Black_ know with `-` as the path. + +```console +$ echo "print ( 'hello, world' )" | black - +print("hello, world") +reformatted - +All done! ✨ 🍰 ✨ +1 file reformatted. +``` + +**Tip:** if you need _Black_ to treat stdin input as a file passed directly via the CLI, +use `--stdin-filename`. Useful to make sure _Black_ will respect the `--force-exclude` +option on some editors that rely on using stdin. + +#### As a string + +You can also pass code as a string using the `-c` / `--code` option. + +```console +$ black --code "print ( 'hello, world' )" +print("hello, world") +``` + +### Writeback and reporting + +By default _Black_ reformats the files given and/or found in place. Sometimes you need +_Black_ to just tell you what it _would_ do without actually rewriting the Python files. + +There's two variations to this mode that are independently enabled by their respective +flags. Both variations can be enabled at once. + +#### Exit code + +Passing `--check` will make _Black_ exit with: + +- code 0 if nothing would change; +- code 1 if some files would be reformatted; or +- code 123 if there was an internal error + +```console +$ black test.py --check +All done! ✨ 🍰 ✨ +1 file would be left unchanged. +$ echo $? +0 + +$ black test.py --check +would reformat test.py +Oh no! 💥 💔 💥 +1 file would be reformatted. +$ echo $? +1 + +$ black test.py --check +error: cannot format test.py: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful: /tmp/blk_kjdr1oog.log +Oh no! 💥 💔 💥 +1 file would fail to reformat. +$ echo $? +123 +``` + +#### Diffs + +Passing `--diff` will make _Black_ print out diffs that indicate what changes _Black_ +would've made. They are printed to stdout so capturing them is simple. + +If you'd like colored diffs, you can enable them with the `--color`. + +```console +$ black test.py --diff +--- test.py 2021-03-08 22:23:40.848954 +0000 ++++ test.py 2021-03-08 22:23:47.126319 +0000 +@@ -1 +1 @@ +-print ( 'hello, world' ) ++print("hello, world") +would reformat test.py +All done! ✨ 🍰 ✨ +1 file would be reformatted. +``` + +### Output verbosity + +_Black_ in general tries to produce the right amount of output, balancing between +usefulness and conciseness. By default, _Black_ emits files modified and error messages, +plus a short summary. + +```console +$ black src/ +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +reformatted src/black_primer/lib.py +reformatted src/blackd/__init__.py +reformatted src/black/__init__.py +Oh no! 💥 💔 💥 +3 files reformatted, 2 files left unchanged, 1 file failed to reformat. +``` + +Passing `-v` / `--verbose` will cause _Black_ to also emit messages about files that +were not changed or were ignored due to exclusion patterns. If _Black_ is using a +configuration file, a blue message detailing which one it is using will be emitted. + +```console +$ black src/ -v +Using configuration from /tmp/pyproject.toml. +src/blib2to3 ignored: matches the --extend-exclude regular expression +src/_black_version.py wasn't modified on disk since last run. +src/black/__main__.py wasn't modified on disk since last run. +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +reformatted src/black_primer/lib.py +reformatted src/blackd/__init__.py +reformatted src/black/__init__.py +Oh no! 💥 💔 💥 +3 files reformatted, 2 files left unchanged, 1 file failed to reformat +``` + +Passing `-q` / `--quiet` will cause _Black_ to stop emitting all non-critial output. +Error messages will still be emitted (which can silenced by `2>/dev/null`). + +```console +$ black src/ -q +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +``` + +### Versions + +You can check the version of _Black_ you have installed using the `--version` flag. + +```console +$ black --version +black, version 22.8.0 +``` + +An option to require a specific version to be running is also provided. + +```console +$ black --required-version 21.9b0 -c "format = 'this'" +format = "this" +$ black --required-version 31.5b2 -c "still = 'beta?!'" +Oh no! 💥 💔 💥 The required version does not match the running version! +``` + +This is useful for example when running _Black_ in multiple environments that haven't +necessarily installed the correct version. This option can be set in a configuration +file for consistent results across environments. + +## Configuration via a file + +_Black_ is able to read project-specific default values for its command line options +from a `pyproject.toml` file. This is especially useful for specifying custom +`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your +project. + +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. Applying those defaults will have your +code in compliance with many other _Black_ formatted projects. + +### What on Earth is a `pyproject.toml` file? + +[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a +configuration file to store build system requirements for Python projects. With the help +of tools like [Poetry](https://python-poetry.org/), +[Flit](https://flit.readthedocs.io/en/latest/), or +[Hatch](https://hatch.pypa.io/latest/) it can fully replace the need for `setup.py` and +`setup.cfg` files. + +### Where _Black_ looks for the file + +By default _Black_ looks for `pyproject.toml` starting from the common base directory of +all files and directories passed on the command line. If it's not there, it looks in +parent directories. It stops looking when it finds the file, or a `.git` directory, or a +`.hg` directory, or the root of the file system, whichever comes first. + +If you're formatting standard input, _Black_ will look for configuration starting from +the current working directory. + +You can use a "global" configuration, stored in a specific location in your home +directory. This will be used as a fallback configuration, that is, it will be used if +and only if _Black_ doesn't find any configuration as mentioned above. Depending on your +operating system, this configuration file should be stored as: + +- Windows: `~\.black` +- Unix-like (Linux, MacOS, etc.): `$XDG_CONFIG_HOME/black` (`~/.config/black` if the + `XDG_CONFIG_HOME` environment variable is not set) + +Note that these are paths to the TOML file itself (meaning that they shouldn't be named +as `pyproject.toml`), not directories where you store the configuration. Here, `~` +refers to the path to your home directory. On Windows, this will be something like +`C:\\Users\UserName`. + +You can also explicitly specify the path to a particular file that you want with +`--config`. In this situation _Black_ will not look for any other file. + +If you're running with `--verbose`, you will see a blue message if a file was found and +used. + +Please note `blackd` will not use `pyproject.toml` configuration. + +### Configuration format + +As the file extension suggests, `pyproject.toml` is a +[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for +different tools. _Black_ is using the `[tool.black]` section. The option keys are the +same as long names of options on the command line. + +Note that you have to use single-quoted strings in TOML for regular expressions. It's +the equivalent of r-strings in Python. Multiline strings are treated as verbose regular +expressions by Black. Use `[ ]` to denote a significant space character. + +
+Example pyproject.toml + +```toml +[tool.black] +line-length = 88 +target-version = ['py37'] +include = '\.pyi?$' +# 'extend-exclude' excludes files or directories in addition to the defaults +extend-exclude = ''' +# A regex preceded with ^/ will apply only to files and directories +# in the root of the project. +( + ^/foo.py # exclude a file named foo.py in the root of the project + | .*_pb2.py # exclude autogenerated Protocol Buffer files anywhere in the project +) +''' +``` + +
+ +### Lookup hierarchy + +Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can +override those defaults. Finally, options provided by the user on the command line +override both. + +_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't +look for multiple files, and doesn't compose configuration from different levels of the +file hierarchy. + +## Next steps + +You've probably noted that not all of the options you can pass to _Black_ have been +covered. Don't worry, the rest will be covered in a later section. + +A good next step would be configuring auto-discovery so `black .` is all you need +instead of laborously listing every file or directory. You can get started by heading +over to [File collection and discovery](./file_collection_and_discovery.md). + +Another good choice would be setting up an +[integration with your editor](../integrations/editors.md) of choice or with +[pre-commit for source version control](../integrations/source_version_control.md). diff --git a/gallery/gallery.py b/gallery/gallery.py index 6b42ec3a6d4..38e52e34795 100755 --- a/gallery/gallery.py +++ b/gallery/gallery.py @@ -10,26 +10,16 @@ from concurrent.futures import ThreadPoolExecutor from functools import lru_cache, partial from pathlib import Path -from typing import ( # type: ignore # typing can't see Literal - Generator, - List, - Literal, - NamedTuple, - Optional, - Tuple, - Union, - cast, -) +from typing import Generator, List, NamedTuple, Optional, Tuple, Union, cast from urllib.request import urlopen, urlretrieve PYPI_INSTANCE = "https://pypi.org/pypi" PYPI_TOP_PACKAGES = ( - "https://hugovk.github.io/top-pypi-packages/top-pypi-packages-{days}-days.json" + "https://hugovk.github.io/top-pypi-packages/top-pypi-packages-30-days.min.json" ) INTERNAL_BLACK_REPO = f"{tempfile.gettempdir()}/__black" ArchiveKind = Union[tarfile.TarFile, zipfile.ZipFile] -Days = Union[Literal[30], Literal[365]] subprocess.run = partial(subprocess.run, check=True) # type: ignore # https://github.com/python/mypy/issues/1484 @@ -64,8 +54,8 @@ def get_pypi_download_url(package: str, version: Optional[str]) -> str: return cast(str, source["url"]) -def get_top_packages(days: Days) -> List[str]: - with urlopen(PYPI_TOP_PACKAGES.format(days=days)) as page: +def get_top_packages() -> List[str]: + with urlopen(PYPI_TOP_PACKAGES) as page: result = json.load(page) return [package["project"] for package in result["rows"]] @@ -74,7 +64,7 @@ def get_top_packages(days: Days) -> List[str]: def get_package_source(package: str, version: Optional[str]) -> str: if package == "cpython": if version is None: - version = "master" + version = "main" return f"https://github.com/python/cpython/archive/{version}.zip" elif package == "pypy": if version is None: @@ -128,13 +118,12 @@ def get_package( def download_and_extract_top_packages( directory: Path, - days: Days = 365, workers: int = 8, limit: slice = DEFAULT_SLICE, ) -> Generator[Path, None, None]: with ThreadPoolExecutor(max_workers=workers) as executor: bound_downloader = partial(get_package, version=None, directory=directory) - for package in executor.map(bound_downloader, get_top_packages(days)[limit]): + for package in executor.map(bound_downloader, get_top_packages()[limit]): if package is not None: yield package @@ -248,9 +237,9 @@ def format_repos(repos: Tuple[Path, ...], options: Namespace) -> None: black_version=black_version, input_directory=options.input, ) - git_switch_branch("master", repo=repo) + git_switch_branch("main", repo=repo) - git_switch_branch("master", repo=options.black_repo) + git_switch_branch("main", repo=options.black_repo) def main() -> None: @@ -296,7 +285,7 @@ def main() -> None: type=Path, help="Output directory to download and put result artifacts.", ) - parser.add_argument("versions", nargs="*", default=("master",), help="") + parser.add_argument("versions", nargs="*", default=("main",), help="") options = parser.parse_args() repos = init_repos(options) diff --git a/mypy.ini b/mypy.ini index 295bab4e302..4811cc0be76 100644 --- a/mypy.ini +++ b/mypy.ini @@ -3,36 +3,44 @@ # free to run mypy on Windows, Linux, or macOS and get consistent # results. python_version=3.6 -platform=linux -# flake8-mypy expects the two following for sensible formatting +mypy_path=src + show_column_numbers=True +show_error_codes=True -# show error messages from unrelated files -follow_imports=normal +# be strict +strict=True -# suppress errors about unsatisfied imports -ignore_missing_imports=True +# except for... +no_implicit_reexport = False -# be strict -disallow_untyped_calls=True -warn_return_any=True -strict_optional=True -warn_no_return=True -warn_redundant_casts=True -warn_unused_ignores=True -# Until we're not supporting 3.6 primer needs this -disallow_any_generics=False - -# The following are off by default. Flip them on if you feel -# adventurous. -disallow_untyped_defs=True -check_untyped_defs=True - -# No incremental mode -cache_dir=/dev/null - -[mypy-aiohttp.*] -follow_imports=skip -[mypy-_version] -follow_imports=skip +# Unreachable blocks have been an issue when compiling mypyc, let's try +# to avoid 'em in the first place. +warn_unreachable=True + +[mypy-black] +# The following is because of `patch_click()`. Remove when +# we drop Python 3.6 support. +warn_unused_ignores=False + +[mypy-blib2to3.driver.*] +ignore_missing_imports = True + +[mypy-IPython.*] +ignore_missing_imports = True + +[mypy-colorama.*] +ignore_missing_imports = True + +[mypy-pathspec.*] +ignore_missing_imports = True + +[mypy-tokenize_rt.*] +ignore_missing_imports = True + +[mypy-uvloop.*] +ignore_missing_imports = True + +[mypy-_black_version.*] +ignore_missing_imports = True diff --git a/plugin/black.vim b/plugin/black.vim index 4af044e7a68..fb70424b0ef 100644 --- a/plugin/black.vim +++ b/plugin/black.vim @@ -2,7 +2,7 @@ " Author: Łukasz Langa " Created: Mon Mar 26 23:27:53 2018 -0700 " Requires: Vim Ver7.0+ -" Version: 1.1 +" Version: 1.2 " " Documentation: " This plugin formats Python files. @@ -12,6 +12,12 @@ " - initial version " 1.1: " - restore cursor/window position after formatting +" 1.2: +" - use autoload script + +if exists("g:load_black") + finish +endif if v:version < 700 || !has('python3') func! __BLACK_MISSING() @@ -23,10 +29,6 @@ if v:version < 700 || !has('python3') finish endif -if exists("g:load_black") - finish -endif - let g:load_black = "py1.0" if !exists("g:black_virtualenv") if has("nvim") @@ -42,167 +44,40 @@ if !exists("g:black_linelength") let g:black_linelength = 88 endif if !exists("g:black_skip_string_normalization") - let g:black_skip_string_normalization = 0 + if exists("g:black_string_normalization") + let g:black_skip_string_normalization = !g:black_string_normalization + else + let g:black_skip_string_normalization = 0 + endif +endif +if !exists("g:black_skip_magic_trailing_comma") + if exists("g:black_magic_trailing_comma") + let g:black_skip_magic_trailing_comma = !g:black_magic_trailing_comma + else + let g:black_skip_magic_trailing_comma = 0 + endif +endif +if !exists("g:black_quiet") + let g:black_quiet = 0 +endif +if !exists("g:black_target_version") + let g:black_target_version = "" +endif +if !exists("g:black_preview") + let g:black_preview = 0 endif -python3 << EndPython3 -import collections -import os -import sys -import vim - - -class Flag(collections.namedtuple("FlagBase", "name, cast")): - @property - def var_name(self): - return self.name.replace("-", "_") - - @property - def vim_rc_name(self): - name = self.var_name - if name == "line_length": - name = name.replace("_", "") - if name == "string_normalization": - name = "skip_" + name - return "g:black_" + name - - -FLAGS = [ - Flag(name="line_length", cast=int), - Flag(name="fast", cast=bool), - Flag(name="string_normalization", cast=bool), -] - - -def _get_python_binary(exec_prefix): - try: - default = vim.eval("g:pymode_python").strip() - except vim.error: - default = "" - if default and os.path.exists(default): - return default - if sys.platform[:3] == "win": - return exec_prefix / 'python.exe' - return exec_prefix / 'bin' / 'python3' - -def _get_pip(venv_path): - if sys.platform[:3] == "win": - return venv_path / 'Scripts' / 'pip.exe' - return venv_path / 'bin' / 'pip' - -def _get_virtualenv_site_packages(venv_path, pyver): - if sys.platform[:3] == "win": - return venv_path / 'Lib' / 'site-packages' - return venv_path / 'lib' / f'python{pyver[0]}.{pyver[1]}' / 'site-packages' - -def _initialize_black_env(upgrade=False): - pyver = sys.version_info[:2] - if pyver < (3, 6): - print("Sorry, Black requires Python 3.6+ to run.") - return False - - from pathlib import Path - import subprocess - import venv - virtualenv_path = Path(vim.eval("g:black_virtualenv")).expanduser() - virtualenv_site_packages = str(_get_virtualenv_site_packages(virtualenv_path, pyver)) - first_install = False - if not virtualenv_path.is_dir(): - print('Please wait, one time setup for Black.') - _executable = sys.executable - _base_executable = getattr(sys, "_base_executable", _executable) - try: - executable = str(_get_python_binary(Path(sys.exec_prefix))) - sys.executable = executable - sys._base_executable = executable - print(f'Creating a virtualenv in {virtualenv_path}...') - print('(this path can be customized in .vimrc by setting g:black_virtualenv)') - venv.create(virtualenv_path, with_pip=True) - except Exception: - print('Encountered exception while creating virtualenv (see traceback below).') - print(f'Removing {virtualenv_path}...') - import shutil - shutil.rmtree(virtualenv_path) - raise - finally: - sys.executable = _executable - sys._base_executable = _base_executable - first_install = True - if first_install: - print('Installing Black with pip...') - if upgrade: - print('Upgrading Black with pip...') - if first_install or upgrade: - subprocess.run([str(_get_pip(virtualenv_path)), 'install', '-U', 'black'], stdout=subprocess.PIPE) - print('DONE! You are all set, thanks for waiting ✨ 🍰 ✨') - if first_install: - print('Pro-tip: to upgrade Black in the future, use the :BlackUpgrade command and restart Vim.\n') - if virtualenv_site_packages not in sys.path: - sys.path.insert(0, virtualenv_site_packages) - return True - -if _initialize_black_env(): - import black - import time - -def Black(): - start = time.time() - configs = get_configs() - mode = black.FileMode( - line_length=configs["line_length"], - string_normalization=configs["string_normalization"], - is_pyi=vim.current.buffer.name.endswith('.pyi'), - ) - - buffer_str = '\n'.join(vim.current.buffer) + '\n' - try: - new_buffer_str = black.format_file_contents( - buffer_str, - fast=configs["fast"], - mode=mode, - ) - except black.NothingChanged: - print(f'Already well formatted, good job. (took {time.time() - start:.4f}s)') - except Exception as exc: - print(exc) - else: - current_buffer = vim.current.window.buffer - cursors = [] - for i, tabpage in enumerate(vim.tabpages): - if tabpage.valid: - for j, window in enumerate(tabpage.windows): - if window.valid and window.buffer == current_buffer: - cursors.append((i, j, window.cursor)) - vim.current.buffer[:] = new_buffer_str.split('\n')[:-1] - for i, j, cursor in cursors: - window = vim.tabpages[i].windows[j] - try: - window.cursor = cursor - except vim.error: - window.cursor = (len(window.buffer), 0) - print(f'Reformatted in {time.time() - start:.4f}s.') - -def get_configs(): - path_pyproject_toml = black.find_pyproject_toml(vim.eval("fnamemodify(getcwd(), ':t')")) - if path_pyproject_toml: - toml_config = black.parse_pyproject_toml(path_pyproject_toml) - else: - toml_config = {} - - return { - flag.var_name: flag.cast(toml_config.get(flag.name, vim.eval(flag.vim_rc_name))) - for flag in FLAGS - } - - -def BlackUpgrade(): - _initialize_black_env(upgrade=True) - -def BlackVersion(): - print(f'Black, version {black.__version__} on Python {sys.version}.') - -EndPython3 - -command! Black :py3 Black() -command! BlackUpgrade :py3 BlackUpgrade() -command! BlackVersion :py3 BlackVersion() +function BlackComplete(ArgLead, CmdLine, CursorPos) + return [ +\ 'target_version=py27', +\ 'target_version=py36', +\ 'target_version=py37', +\ 'target_version=py38', +\ 'target_version=py39', +\ 'target_version=py310', +\ ] +endfunction + +command! -nargs=* -complete=customlist,BlackComplete Black :call black#Black() +command! BlackUpgrade :call black#BlackUpgrade() +command! BlackVersion :call black#BlackVersion() diff --git a/pyproject.toml b/pyproject.toml index 9d4da0bf692..554d7d07bf3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,32 +7,210 @@ [tool.black] line-length = 88 -target-version = ['py36', 'py37', 'py38'] +target-version = ['py37', 'py38'] include = '\.pyi?$' -exclude = ''' +extend-exclude = ''' /( - \.eggs - | \.git - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | _build - | buck-out - | build - | dist - # The following are specific to Black, you probably don't want those. | blib2to3 | tests/data | profiling )/ ''' +# We use preview style for formatting Black itself. If you +# want stable formatting across releases, you should keep +# this off. +preview = true - -# Build system information below. +# Build system information and other project-specific configuration below. # NOTE: You don't need this in your own Black configuration. [build-system] -requires = ["setuptools>=41.0", "setuptools-scm", "wheel"] -build-backend = "setuptools.build_meta" +requires = ["hatchling>=1.8.0", "hatch-vcs", "hatch-fancy-pypi-readme"] +build-backend = "hatchling.build" + +[project] +name = "black" +description = "The uncompromising code formatter." +license = "MIT" +requires-python = ">=3.7" +authors = [ + { name = "Łukasz Langa", email = "lukasz@langa.pl" }, +] +keywords = [ + "automation", + "autopep8", + "formatter", + "gofmt", + "pyfmt", + "rustfmt", + "yapf", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Software Development :: Quality Assurance", +] +dependencies = [ + "click>=8.0.0", + "mypy_extensions>=0.4.3", + "pathspec>=0.9.0", + "platformdirs>=2", + "tomli>=1.1.0; python_full_version < '3.11.0a7'", + "typed-ast>=1.4.2; python_version < '3.8' and implementation_name == 'cpython'", + "typing_extensions>=3.10.0.0; python_version < '3.10'", +] +dynamic = ["readme", "version"] + +[project.optional-dependencies] +colorama = ["colorama>=0.4.3"] +uvloop = ["uvloop>=0.15.2"] +d = [ + "aiohttp>=3.7.4", +] +jupyter = [ + "ipython>=7.8.0", + "tokenize-rt>=3.2.0", +] + +[project.scripts] +black = "black:patched_main" +blackd = "blackd:patched_main [d]" + +[project.urls] +Changelog = "https://github.com/psf/black/blob/main/CHANGES.md" +Homepage = "https://github.com/psf/black" + +[tool.hatch.metadata.hooks.fancy-pypi-readme] +content-type = "text/markdown" +fragments = [ + { path = "README.md" }, + { path = "CHANGES.md" }, +] + +[tool.hatch.version] +source = "vcs" + +[tool.hatch.build.hooks.vcs] +version-file = "src/_black_version.py" +template = ''' +version = "{version}" +''' + +[tool.hatch.build.targets.sdist] +exclude = ["/profiling"] + +[tool.hatch.build.targets.wheel] +only-include = ["src"] +sources = ["src"] + +[tool.hatch.build.targets.wheel.hooks.mypyc] +enable-by-default = false +dependencies = [ + "hatch-mypyc>=0.13.0", + "mypy==0.971", + # Required stubs to be removed when the packages support PEP 561 themselves + "types-typed-ast>=1.4.2", +] +require-runtime-dependencies = true +exclude = [ + # There's no good reason for blackd to be compiled. + "/src/blackd", + # Not performance sensitive, so save bytes + compilation time: + "/src/blib2to3/__init__.py", + "/src/blib2to3/pgen2/__init__.py", + "/src/black/output.py", + "/src/black/concurrency.py", + "/src/black/files.py", + "/src/black/report.py", + # Breaks the test suite when compiled (and is also useless): + "/src/black/debug.py", + # Compiled modules can't be run directly and that's a problem here: + "/src/black/__main__.py", +] +options = { debug_level = "0" } + +[tool.cibuildwheel] +build-verbosity = 1 +# So these are the environments we target: +# - Python: CPython 3.7+ only +# - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64 +# - OS: Linux (no musl), Windows, and macOS +build = "cp3*-*" +skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp-*"] +# This is the bare minimum needed to run the test suite. Pulling in the full +# test_requirements.txt would download a bunch of other packages not necessary +# here and would slow down the testing step a fair bit. +test-requires = ["pytest>=6.1.1"] +test-command = 'pytest {project} -k "not incompatible_with_mypyc"' +test-extras = ["d"," jupyter"] +# Skip trying to test arm64 builds on Intel Macs. (so cross-compilation doesn't +# straight up crash) +test-skip = ["*-macosx_arm64", "*-macosx_universal2:arm64"] + +[tool.cibuildwheel.environment] +HATCH_BUILD_HOOKS_ENABLE = "1" +MYPYC_OPT_LEVEL = "3" +MYPYC_DEBUG_LEVEL = "0" +# CPython 3.11 wheels aren't available for aiohttp and building a Cython extension +# from source also doesn't work. +AIOHTTP_NO_EXTENSIONS = "1" + +[tool.cibuildwheel.linux] +before-build = [ + "yum install -y clang gcc", +] + +[tool.cibuildwheel.linux.environment] +HATCH_BUILD_HOOKS_ENABLE = "1" +MYPYC_OPT_LEVEL = "3" +MYPYC_DEBUG_LEVEL = "0" +# Black needs Clang to compile successfully on Linux. +CC = "clang" +AIOHTTP_NO_EXTENSIONS = "1" + +[tool.isort] +atomic = true +profile = "black" +line_length = 88 +skip_gitignore = true +skip_glob = ["src/blib2to3", "tests/data", "profiling"] +known_first_party = ["black", "blib2to3", "blackd", "_black_version"] + +[tool.pytest.ini_options] +# Option below requires `tests/optional.py` +addopts = "--strict-config --strict-markers" +optional-tests = [ + "no_blackd: run when `d` extra NOT installed", + "no_jupyter: run when `jupyter` extra NOT installed", +] +markers = [ + "incompatible_with_mypyc: run when testing mypyc compiled black" +] +xfail_strict = true +filterwarnings = [ + "error", + # this is mitigated by a try/catch in https://github.com/psf/black/pull/2974/ + # this ignore can be removed when support for aiohttp 3.7 is dropped. + '''ignore:Decorator `@unittest_run_loop` is no longer needed in aiohttp 3\.8\+:DeprecationWarning''', + # this is mitigated by a try/catch in https://github.com/psf/black/pull/3198/ + # this ignore can be removed when support for aiohttp 3.x is dropped. + '''ignore:Middleware decorator is deprecated since 4\.0 and its behaviour is default, you can simply remove this decorator:DeprecationWarning''', + # this is mitigated by https://github.com/python/cpython/issues/79071 in python 3.8+ + # this ignore can be removed when support for 3.7 is dropped. + '''ignore:Bare functions are deprecated, use async ones:DeprecationWarning''', + # aiohttp is using deprecated cgi modules - Safe to remove when fixed: + # https://github.com/aio-libs/aiohttp/issues/6905 + '''ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning''', +] diff --git a/readthedocs.yml b/readthedocs.yml deleted file mode 100644 index 15065033d0f..00000000000 --- a/readthedocs.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2 -python: - version: 3.8 - install: - - requirements: docs/requirements.txt - - method: setuptools - path: . diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/scripts/check_pre_commit_rev_in_example.py b/scripts/check_pre_commit_rev_in_example.py new file mode 100644 index 00000000000..9560b3b8401 --- /dev/null +++ b/scripts/check_pre_commit_rev_in_example.py @@ -0,0 +1,54 @@ +""" +Check that the rev value in the example pre-commit configuration matches +the latest version of Black. This saves us from forgetting to update that +during the release process. + +Why can't we just use `rev: stable` and call it a day? Well pre-commit +won't auto update the hook as you may expect (and for good reasons, some +technical and some pragmatic). Encouraging bad practice is also just +not ideal. xref: https://github.com/psf/black/issues/420 +""" + +import os +import sys + +import commonmark +import yaml +from bs4 import BeautifulSoup + + +def main(changes: str, source_version_control: str) -> None: + changes_html = commonmark.commonmark(changes) + changes_soup = BeautifulSoup(changes_html, "html.parser") + headers = changes_soup.find_all("h2") + latest_tag, *_ = [ + header.string for header in headers if header.string != "Unreleased" + ] + + source_version_control_html = commonmark.commonmark(source_version_control) + source_version_control_soup = BeautifulSoup( + source_version_control_html, "html.parser" + ) + pre_commit_repos = yaml.safe_load( + source_version_control_soup.find(class_="language-yaml").string + )["repos"] + + for repo in pre_commit_repos: + pre_commit_rev = repo["rev"] + if not pre_commit_rev == latest_tag: + print( + "Please set the rev in ``source_version_control.md`` to be the latest " + f"one.\nExpected {latest_tag}, got {pre_commit_rev}.\n" + ) + sys.exit(1) + + +if __name__ == "__main__": + with open("CHANGES.md", encoding="utf-8") as fd: + changes = fd.read() + with open( + os.path.join("docs", "integrations", "source_version_control.md"), + encoding="utf-8", + ) as fd: + source_version_control = fd.read() + main(changes, source_version_control) diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py new file mode 100644 index 00000000000..c62780d97ab --- /dev/null +++ b/scripts/check_version_in_basics_example.py @@ -0,0 +1,47 @@ +""" +Check that the rev value in the example from ``the_basics.md`` matches +the latest version of Black. This saves us from forgetting to update that +during the release process. +""" + +import os +import sys + +import commonmark +from bs4 import BeautifulSoup + + +def main(changes: str, the_basics: str) -> None: + changes_html = commonmark.commonmark(changes) + changes_soup = BeautifulSoup(changes_html, "html.parser") + headers = changes_soup.find_all("h2") + tags = [header.string for header in headers if header.string != "Unreleased"] + latest_tag = tags[0] + + the_basics_html = commonmark.commonmark(the_basics) + the_basics_soup = BeautifulSoup(the_basics_html, "html.parser") + (version_example,) = [ + code_block.string + for code_block in the_basics_soup.find_all(class_="language-console") + if "$ black --version" in code_block.string + ] + + for tag in tags: + if tag in version_example and tag != latest_tag: + print( + "Please set the version in the ``black --version`` " + "example from ``the_basics.md`` to be the latest one.\n" + f"Expected {latest_tag}, got {tag}.\n" + ) + sys.exit(1) + + +if __name__ == "__main__": + with open("CHANGES.md", encoding="utf-8") as fd: + changes = fd.read() + with open( + os.path.join("docs", "usage_and_configuration", "the_basics.md"), + encoding="utf-8", + ) as fd: + the_basics = fd.read() + main(changes, the_basics) diff --git a/scripts/diff_shades_gha_helper.py b/scripts/diff_shades_gha_helper.py new file mode 100644 index 00000000000..b5fea5a817d --- /dev/null +++ b/scripts/diff_shades_gha_helper.py @@ -0,0 +1,227 @@ +"""Helper script for psf/black's diff-shades Github Actions integration. + +diff-shades is a tool for analyzing what happens when you run Black on +OSS code capturing it for comparisons or other usage. It's used here to +help measure the impact of a change *before* landing it (in particular +posting a comment on completion for PRs). + +This script exists as a more maintainable alternative to using inline +Javascript in the workflow YAML files. The revision configuration and +resolving, caching, and PR comment logic is contained here. + +For more information, please see the developer docs: + +https://black.readthedocs.io/en/latest/contributing/gauging_changes.html#diff-shades +""" + +import json +import os +import platform +import pprint +import subprocess +import sys +import zipfile +from io import BytesIO +from pathlib import Path +from typing import Any + +import click +import urllib3 +from packaging.version import Version + +if sys.version_info >= (3, 8): + from typing import Final, Literal +else: + from typing_extensions import Final, Literal + +COMMENT_FILE: Final = ".pr-comment.json" +DIFF_STEP_NAME: Final = "Generate HTML diff report" +DOCS_URL: Final = ( + "https://black.readthedocs.io/en/latest/" + "contributing/gauging_changes.html#diff-shades" +) +USER_AGENT: Final = f"psf/black diff-shades workflow via urllib3/{urllib3.__version__}" +SHA_LENGTH: Final = 10 +GH_API_TOKEN: Final = os.getenv("GITHUB_TOKEN") +REPO: Final = os.getenv("GITHUB_REPOSITORY", default="psf/black") +http = urllib3.PoolManager() + + +def set_output(name: str, value: str) -> None: + if len(value) < 200: + print(f"[INFO]: setting '{name}' to '{value}'") + else: + print(f"[INFO]: setting '{name}' to [{len(value)} chars]") + print(f"::set-output name={name}::{value}") + + +def http_get(url: str, *, is_json: bool = True, **kwargs: Any) -> Any: + headers = kwargs.get("headers") or {} + headers["User-Agent"] = USER_AGENT + if "github" in url: + if GH_API_TOKEN: + headers["Authorization"] = f"token {GH_API_TOKEN}" + headers["Accept"] = "application/vnd.github.v3+json" + kwargs["headers"] = headers + + r = http.request("GET", url, **kwargs) + if is_json: + data = json.loads(r.data.decode("utf-8")) + else: + data = r.data + print(f"[INFO]: issued GET request for {r.geturl()}") + if not (200 <= r.status < 300): + pprint.pprint(dict(r.info())) + pprint.pprint(data) + raise RuntimeError(f"unexpected status code: {r.status}") + + return data + + +def get_main_revision() -> str: + data = http_get( + f"https://api.github.com/repos/{REPO}/commits", + fields={"per_page": "1", "sha": "main"}, + ) + assert isinstance(data[0]["sha"], str) + return data[0]["sha"] + + +def get_pr_revision(pr: int) -> str: + data = http_get(f"https://api.github.com/repos/{REPO}/pulls/{pr}") + assert isinstance(data["head"]["sha"], str) + return data["head"]["sha"] + + +def get_pypi_version() -> Version: + data = http_get("https://pypi.org/pypi/black/json") + versions = [Version(v) for v in data["releases"]] + sorted_versions = sorted(versions, reverse=True) + return sorted_versions[0] + + +@click.group() +def main() -> None: + pass + + +@main.command("config", help="Acquire run configuration and metadata.") +@click.argument("event", type=click.Choice(["push", "pull_request"])) +def config(event: Literal["push", "pull_request"]) -> None: + import diff_shades + + if event == "push": + jobs = [{"mode": "preview-changes", "force-flag": "--force-preview-style"}] + # Push on main, let's use PyPI Black as the baseline. + baseline_name = str(get_pypi_version()) + baseline_cmd = f"git checkout {baseline_name}" + target_rev = os.getenv("GITHUB_SHA") + assert target_rev is not None + target_name = "main-" + target_rev[:SHA_LENGTH] + target_cmd = f"git checkout {target_rev}" + + elif event == "pull_request": + jobs = [ + {"mode": "preview-changes", "force-flag": "--force-preview-style"}, + {"mode": "assert-no-changes", "force-flag": "--force-stable-style"}, + ] + # PR, let's use main as the baseline. + baseline_rev = get_main_revision() + baseline_name = "main-" + baseline_rev[:SHA_LENGTH] + baseline_cmd = f"git checkout {baseline_rev}" + pr_ref = os.getenv("GITHUB_REF") + assert pr_ref is not None + pr_num = int(pr_ref[10:-6]) + pr_rev = get_pr_revision(pr_num) + target_name = f"pr-{pr_num}-{pr_rev[:SHA_LENGTH]}" + target_cmd = f"gh pr checkout {pr_num} && git merge origin/main" + + env = f"{platform.system()}-{platform.python_version()}-{diff_shades.__version__}" + for entry in jobs: + entry["baseline-analysis"] = f"{entry['mode']}-{baseline_name}.json" + entry["baseline-setup-cmd"] = baseline_cmd + entry["target-analysis"] = f"{entry['mode']}-{target_name}.json" + entry["target-setup-cmd"] = target_cmd + entry["baseline-cache-key"] = f"{env}-{baseline_name}-{entry['mode']}" + if event == "pull_request": + # These are only needed for the PR comment. + entry["baseline-sha"] = baseline_rev + entry["target-sha"] = pr_rev + + set_output("matrix", json.dumps(jobs, indent=None)) + pprint.pprint(jobs) + + +@main.command("comment-body", help="Generate the body for a summary PR comment.") +@click.argument("baseline", type=click.Path(exists=True, path_type=Path)) +@click.argument("target", type=click.Path(exists=True, path_type=Path)) +@click.argument("baseline-sha") +@click.argument("target-sha") +@click.argument("pr-num", type=int) +def comment_body( + baseline: Path, target: Path, baseline_sha: str, target_sha: str, pr_num: int +) -> None: + # fmt: off + cmd = [ + sys.executable, "-m", "diff_shades", "--no-color", + "compare", str(baseline), str(target), "--quiet", "--check" + ] + # fmt: on + proc = subprocess.run(cmd, stdout=subprocess.PIPE, encoding="utf-8") + if not proc.returncode: + body = ( + f"**diff-shades** reports zero changes comparing this PR ({target_sha}) to" + f" main ({baseline_sha}).\n\n---\n\n" + ) + else: + body = ( + f"**diff-shades** results comparing this PR ({target_sha}) to main" + f" ({baseline_sha}). The full diff is [available in the logs]" + f'($job-diff-url) under the "{DIFF_STEP_NAME}" step.' + ) + body += "\n```text\n" + proc.stdout.strip() + "\n```\n" + body += ( + f"[**What is this?**]({DOCS_URL}) | [Workflow run]($workflow-run-url) |" + " [diff-shades documentation](https://github.com/ichard26/diff-shades#readme)" + ) + print(f"[INFO]: writing comment details to {COMMENT_FILE}") + with open(COMMENT_FILE, "w", encoding="utf-8") as f: + json.dump({"body": body, "pr-number": pr_num}, f) + + +@main.command("comment-details", help="Get PR comment resources from a workflow run.") +@click.argument("run-id") +def comment_details(run_id: str) -> None: + data = http_get(f"https://api.github.com/repos/{REPO}/actions/runs/{run_id}") + if data["event"] != "pull_request" or data["conclusion"] == "cancelled": + set_output("needs-comment", "false") + return + + set_output("needs-comment", "true") + jobs = http_get(data["jobs_url"])["jobs"] + job = next(j for j in jobs if j["name"] == "analysis / preview-changes") + diff_step = next(s for s in job["steps"] if s["name"] == DIFF_STEP_NAME) + diff_url = job["html_url"] + f"#step:{diff_step['number']}:1" + + artifacts = http_get(data["artifacts_url"])["artifacts"] + comment_artifact = next(a for a in artifacts if a["name"] == COMMENT_FILE) + comment_url = comment_artifact["archive_download_url"] + comment_zip = BytesIO(http_get(comment_url, is_json=False)) + with zipfile.ZipFile(comment_zip) as zfile: + with zfile.open(COMMENT_FILE) as rf: + comment_data = json.loads(rf.read().decode("utf-8")) + + set_output("pr-number", str(comment_data["pr-number"])) + body = comment_data["body"] + # It's more convenient to fill in these fields after the first workflow is done + # since this command can access the workflows API (doing it in the main workflow + # while it's still in progress seems impossible). + body = body.replace("$workflow-run-url", data["html_url"]) + body = body.replace("$job-diff-url", diff_url) + # https://github.community/t/set-output-truncates-multiline-strings/16852/3 + escaped = body.replace("%", "%25").replace("\n", "%0A").replace("\r", "%0D") + set_output("comment-body", escaped) + + +if __name__ == "__main__": + main() diff --git a/scripts/fuzz.py b/scripts/fuzz.py new file mode 100644 index 00000000000..25362c927d4 --- /dev/null +++ b/scripts/fuzz.py @@ -0,0 +1,92 @@ +"""Property-based tests for Black. + +By Zac Hatfield-Dodds, based on my Hypothesmith tool for source code +generation. You can run this file with `python`, `pytest`, or (soon) +a coverage-guided fuzzer I'm working on. +""" + +import re + +import hypothesmith +from hypothesis import HealthCheck, given, settings +from hypothesis import strategies as st + +import black +from blib2to3.pgen2.tokenize import TokenError + + +# This test uses the Hypothesis and Hypothesmith libraries to generate random +# syntatically-valid Python source code and run Black in odd modes. +@settings( + max_examples=1000, # roughly 1k tests/minute, or half that under coverage + derandomize=True, # deterministic mode to avoid CI flakiness + deadline=None, # ignore Hypothesis' health checks; we already know that + suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy. +) +@given( + # Note that while Hypothesmith might generate code unlike that written by + # humans, it's a general test that should pass for any *valid* source code. + # (so e.g. running it against code scraped of the internet might also help) + src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(), + # Using randomly-varied modes helps us to exercise less common code paths. + mode=st.builds( + black.FileMode, + line_length=st.just(88) | st.integers(0, 200), + string_normalization=st.booleans(), + preview=st.booleans(), + is_pyi=st.booleans(), + magic_trailing_comma=st.booleans(), + ), +) +def test_idempotent_any_syntatically_valid_python( + src_contents: str, mode: black.FileMode +) -> None: + # Before starting, let's confirm that the input string is valid Python: + compile(src_contents, "", "exec") # else the bug is in hypothesmith + + # Then format the code... + try: + dst_contents = black.format_str(src_contents, mode=mode) + except black.InvalidInput: + # This is a bug - if it's valid Python code, as above, Black should be + # able to cope with it. See issues #970, #1012 + # TODO: remove this try-except block when issues are resolved. + return + except TokenError as e: + if ( # Special-case logic for backslashes followed by newlines or end-of-input + e.args[0] == "EOF in multi-line statement" + and re.search(r"\\($|\r?\n)", src_contents) is not None + ): + # This is a bug - if it's valid Python code, as above, Black should be + # able to cope with it. See issue #1012. + # TODO: remove this block when the issue is resolved. + return + raise + + # And check that we got equivalent and stable output. + black.assert_equivalent(src_contents, dst_contents) + black.assert_stable(src_contents, dst_contents, mode=mode) + + # Future test: check that pure-python and mypyc versions of black + # give identical output for identical input? + + +if __name__ == "__main__": + # Run tests, including shrinking and reporting any known failures. + test_idempotent_any_syntatically_valid_python() + + # If Atheris is available, run coverage-guided fuzzing. + # (if you want only bounded fuzzing, just use `pytest fuzz.py`) + try: + import sys + + import atheris + except ImportError: + pass + else: + test = test_idempotent_any_syntatically_valid_python + atheris.Setup( + sys.argv, + test.hypothesis.fuzz_one_input, # type: ignore[attr-defined] + ) + atheris.Fuzz() diff --git a/scripts/migrate-black.py b/scripts/migrate-black.py new file mode 100755 index 00000000000..ff52939460c --- /dev/null +++ b/scripts/migrate-black.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# check out every commit added by the current branch, blackify them, +# and generate diffs to reconstruct the original commits, but then +# blackified +import logging +import os +import sys +from subprocess import PIPE, Popen, check_output, run + + +def git(*args: str) -> str: + return check_output(["git"] + list(args)).decode("utf8").strip() + + +def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> int: + current_branch = git("branch", "--show-current") + + if not current_branch or base_branch == current_branch: + logger.error("You need to check out a feature branch to work on") + return 1 + + if not os.path.exists(".git"): + logger.error("Run me in the root of your repo") + return 1 + + merge_base = git("merge-base", "HEAD", base_branch) + if not merge_base: + logger.error( + "Could not find a common commit for current head and %s" % base_branch + ) + return 1 + + commits = git( + "log", "--reverse", "--pretty=format:%H", "%s~1..HEAD" % merge_base + ).split() + for commit in commits: + git("checkout", commit, "-b%s-black" % commit) + check_output(black_command, shell=True) + git("commit", "-aqm", "blackify") + + git("checkout", base_branch, "-b%s-black" % current_branch) + + for last_commit, commit in zip(commits, commits[1:]): + allow_empty = ( + b"--allow-empty" in run(["git", "apply", "-h"], stdout=PIPE).stdout + ) + quiet = b"--quiet" in run(["git", "apply", "-h"], stdout=PIPE).stdout + git_diff = Popen( + [ + "git", + "diff", + "--binary", + "--find-copies", + "%s-black..%s-black" % (last_commit, commit), + ], + stdout=PIPE, + ) + git_apply = Popen( + [ + "git", + "apply", + ] + + (["--quiet"] if quiet else []) + + [ + "-3", + "--intent-to-add", + ] + + (["--allow-empty"] if allow_empty else []) + + [ + "-", + ], + stdin=git_diff.stdout, + ) + if git_diff.stdout is not None: + git_diff.stdout.close() + git_apply.communicate() + git("commit", "--allow-empty", "-aqC", commit) + + for commit in commits: + git("branch", "-qD", "%s-black" % commit) + + return 0 + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("base_branch") + parser.add_argument("--black_command", default="black -q .") + parser.add_argument("--logfile", type=argparse.FileType("w"), default=sys.stdout) + args = parser.parse_args() + logger = logging.getLogger(__name__) + logger.addHandler(logging.StreamHandler(args.logfile)) + logger.setLevel(logging.INFO) + sys.exit(blackify(args.base_branch, args.black_command, logger)) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 55c66add7fb..00000000000 --- a/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[options] -setup_requires = setuptools_scm diff --git a/src/black/__init__.py b/src/black/__init__.py index 1d20bb416ac..5b8c9749119 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -1,143 +1,96 @@ -import ast -import asyncio -from abc import ABC, abstractmethod -from collections import defaultdict -from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor -from contextlib import contextmanager -from datetime import datetime -from enum import Enum -from functools import lru_cache, partial, wraps import io -import itertools -import logging -from multiprocessing import Manager, freeze_support -import os -from pathlib import Path -import pickle -import regex as re -import signal +import json +import platform +import re import sys -import tempfile import tokenize import traceback +from contextlib import contextmanager +from dataclasses import replace +from datetime import datetime +from enum import Enum +from json.decoder import JSONDecodeError +from pathlib import Path from typing import ( Any, - Callable, - Collection, Dict, Generator, - Generic, - Iterable, Iterator, List, + MutableMapping, Optional, Pattern, Sequence, Set, Sized, Tuple, - Type, - TypeVar, Union, - cast, - TYPE_CHECKING, ) -from typing_extensions import Final -from mypy_extensions import mypyc_attr -from appdirs import user_cache_dir -from dataclasses import dataclass, field, replace import click -import toml -from typed_ast import ast3, ast27 -from pathspec import PathSpec - -# lib2to3 fork -from blib2to3.pytree import Node, Leaf, type_repr -from blib2to3 import pygram, pytree -from blib2to3.pgen2 import driver, token -from blib2to3.pgen2.grammar import Grammar -from blib2to3.pgen2.parse import ParseError +from click.core import ParameterSource +from mypy_extensions import mypyc_attr +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError from _black_version import version as __version__ +from black.cache import Cache, get_cache_info, read_cache, write_cache +from black.comments import normalize_fmt_off +from black.const import ( + DEFAULT_EXCLUDES, + DEFAULT_INCLUDES, + DEFAULT_LINE_LENGTH, + STDIN_PLACEHOLDER, +) +from black.files import ( + find_project_root, + find_pyproject_toml, + find_user_pyproject_toml, + gen_python_files, + get_gitignore, + normalize_path_maybe_ignore, + parse_pyproject_toml, + wrap_stream_for_windows, +) +from black.handle_ipynb_magics import ( + PYTHON_CELL_MAGICS, + TRANSFORMED_MAGICS, + jupyter_dependencies_are_installed, + mask_cell, + put_trailing_semicolon_back, + remove_trailing_semicolon, + unmask_cell, +) +from black.linegen import LN, LineGenerator, transform_line +from black.lines import EmptyLineTracker, Line +from black.mode import ( + FUTURE_FLAG_TO_FEATURE, + VERSION_TO_FEATURES, + Feature, + Mode, + TargetVersion, + supports_feature, +) +from black.nodes import ( + STARS, + is_number_token, + is_simple_decorator_expression, + is_string_token, + syms, +) +from black.output import color_diff, diff, dump_to_file, err, ipynb_diff, out +from black.parsing import InvalidInput # noqa F401 +from black.parsing import lib2to3_parse, parse_ast, stringify_ast +from black.report import Changed, NothingChanged, Report +from black.trans import iter_fexpr_spans +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node -if TYPE_CHECKING: - import colorama # noqa: F401 - -DEFAULT_LINE_LENGTH = 88 -DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 -DEFAULT_INCLUDES = r"\.pyi?$" -CACHE_DIR = Path(user_cache_dir("black", version=__version__)) - -STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. - +COMPILED = Path(__file__).suffix in (".pyd", ".so") # types FileContent = str Encoding = str NewLine = str -Depth = int -NodeType = int -ParserState = int -LeafID = int -StringID = int -Priority = int -Index = int -LN = Union[Leaf, Node] -Transformer = Callable[["Line", Collection["Feature"]], Iterator["Line"]] -Timestamp = float -FileSize = int -CacheInfo = Tuple[Timestamp, FileSize] -Cache = Dict[Path, CacheInfo] -out = partial(click.secho, bold=True, err=True) -err = partial(click.secho, fg="red", err=True) - -pygram.initialize(CACHE_DIR) -syms = pygram.python_symbols - - -class NothingChanged(UserWarning): - """Raised when reformatted code is the same as source.""" - - -class CannotTransform(Exception): - """Base class for errors raised by Transformers.""" - - -class CannotSplit(CannotTransform): - """A readable split that fits the allotted line length is impossible.""" - - -class InvalidInput(ValueError): - """Raised when input source code fails all parse attempts.""" - - -T = TypeVar("T") -E = TypeVar("E", bound=Exception) - - -class Ok(Generic[T]): - def __init__(self, value: T) -> None: - self._value = value - - def ok(self) -> T: - return self._value - - -class Err(Generic[E]): - def __init__(self, e: E) -> None: - self._e = e - - def err(self) -> E: - return self._e - - -# The 'Result' return type is used to implement an error-handling model heavily -# influenced by that used by the Rust programming language -# (see https://doc.rust-lang.org/book/ch09-00-error-handling.html). -Result = Union[Ok[T], Err[E]] -TResult = Result[T, CannotTransform] # (T)ransform Result -TMatchResult = TResult[Index] class WriteBack(Enum): @@ -160,132 +113,10 @@ def from_configuration( return cls.DIFF if diff else cls.YES -class Changed(Enum): - NO = 0 - CACHED = 1 - YES = 2 - - -class TargetVersion(Enum): - PY27 = 2 - PY33 = 3 - PY34 = 4 - PY35 = 5 - PY36 = 6 - PY37 = 7 - PY38 = 8 - - def is_python2(self) -> bool: - return self is TargetVersion.PY27 - - -PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38} - - -class Feature(Enum): - # All string literals are unicode - UNICODE_LITERALS = 1 - F_STRINGS = 2 - NUMERIC_UNDERSCORES = 3 - TRAILING_COMMA_IN_CALL = 4 - TRAILING_COMMA_IN_DEF = 5 - # The following two feature-flags are mutually exclusive, and exactly one should be - # set for every version of python. - ASYNC_IDENTIFIERS = 6 - ASYNC_KEYWORDS = 7 - ASSIGNMENT_EXPRESSIONS = 8 - POS_ONLY_ARGUMENTS = 9 - FORCE_OPTIONAL_PARENTHESES = 50 - - -VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { - TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY35: { - Feature.UNICODE_LITERALS, - Feature.TRAILING_COMMA_IN_CALL, - Feature.ASYNC_IDENTIFIERS, - }, - TargetVersion.PY36: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_IDENTIFIERS, - }, - TargetVersion.PY37: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - }, - TargetVersion.PY38: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - Feature.ASSIGNMENT_EXPRESSIONS, - Feature.POS_ONLY_ARGUMENTS, - }, -} - - -@dataclass -class Mode: - target_versions: Set[TargetVersion] = field(default_factory=set) - line_length: int = DEFAULT_LINE_LENGTH - string_normalization: bool = True - experimental_string_processing: bool = False - is_pyi: bool = False - - def get_cache_key(self) -> str: - if self.target_versions: - version_str = ",".join( - str(version.value) - for version in sorted(self.target_versions, key=lambda v: v.value) - ) - else: - version_str = "-" - parts = [ - version_str, - str(self.line_length), - str(int(self.string_normalization)), - str(int(self.is_pyi)), - ] - return ".".join(parts) - - # Legacy name, left for integrations. FileMode = Mode -def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: - return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) - - -def find_pyproject_toml(path_search_start: Iterable[str]) -> Optional[str]: - """Find the absolute filepath to a pyproject.toml if it exists""" - path_project_root = find_project_root(path_search_start) - path_pyproject_toml = path_project_root / "pyproject.toml" - return str(path_pyproject_toml) if path_pyproject_toml.is_file() else None - - -def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: - """Parse a pyproject toml file, pulling out relevant parts for Black - - If parsing fails, will raise a toml.TomlDecodeError - """ - pyproject_toml = toml.load(path_config) - config = pyproject_toml.get("tool", {}).get("black", {}) - return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} - - def read_pyproject_toml( ctx: click.Context, param: click.Parameter, value: Optional[str] ) -> Optional[str]: @@ -301,10 +132,10 @@ def read_pyproject_toml( try: config = parse_pyproject_toml(value) - except (toml.TomlDecodeError, OSError) as e: + except (OSError, ValueError) as e: raise click.FileError( filename=value, hint=f"Error reading configuration file: {e}" - ) + ) from None if not config: return None @@ -343,7 +174,34 @@ def target_version_option_callback( return [TargetVersion[val.upper()] for val in v] -@click.command(context_settings=dict(help_option_names=["-h", "--help"])) +def re_compile_maybe_verbose(regex: str) -> Pattern[str]: + """Compile a regular expression string in `regex`. + + If it contains newlines, use verbose mode. + """ + if "\n" in regex: + regex = "(?x)" + regex + compiled: Pattern[str] = re.compile(regex) + return compiled + + +def validate_regex( + ctx: click.Context, + param: click.Parameter, + value: Optional[str], +) -> Optional[Pattern[str]]: + try: + return re_compile_maybe_verbose(value) if value is not None else None + except re.error as e: + raise click.BadParameter(f"Not a valid regular expression: {e}") from None + + +@click.command( + context_settings={"help_option_names": ["-h", "--help"]}, + # While Click does set this field automatically using the docstring, mypyc + # (annoyingly) strips 'em so we need to set it here too. + help="The uncompromising code formatter.", +) @click.option("-c", "--code", type=str, help="Format the code passed in as a string.") @click.option( "-l", @@ -372,27 +230,56 @@ def target_version_option_callback( " when piping source on standard input)." ), ) +@click.option( + "--ipynb", + is_flag=True, + help=( + "Format all input files like Jupyter Notebooks regardless of file extension " + "(useful when piping source on standard input)." + ), +) +@click.option( + "--python-cell-magics", + multiple=True, + help=( + "When processing Jupyter Notebooks, add the given magic to the list" + f" of known python-magics ({', '.join(PYTHON_CELL_MAGICS)})." + " Useful for formatting cells with custom python magics." + ), + default=[], +) @click.option( "-S", "--skip-string-normalization", is_flag=True, help="Don't normalize string quotes or prefixes.", ) +@click.option( + "-C", + "--skip-magic-trailing-comma", + is_flag=True, + help="Don't use trailing commas as a reason to split lines.", +) @click.option( "--experimental-string-processing", is_flag=True, hidden=True, + help="(DEPRECATED and now included in --preview) Normalize string literals.", +) +@click.option( + "--preview", + is_flag=True, help=( - "Experimental option that performs more normalization on string literals." - " Currently disabled because it leads to some crashes." + "Enable potentially disruptive style changes that may be added to Black's main" + " functionality in the next major release." ), ) @click.option( "--check", is_flag=True, help=( - "Don't write the files back, just return the status. Return code 0 means" - " nothing would change. Return code 1 means some files would be reformatted." + "Don't write the files back, just return the status. Return code 0 means" + " nothing would change. Return code 1 means some files would be reformatted." " Return code 123 means there was an internal error." ), ) @@ -411,38 +298,75 @@ def target_version_option_callback( is_flag=True, help="If --fast given, skip temporary sanity checks. [default: --safe]", ) +@click.option( + "--required-version", + type=str, + help=( + "Require a specific version of Black to be running (useful for unifying results" + " across many environments e.g. with a pyproject.toml file). It can be" + " either a major version number or an exact version." + ), +) @click.option( "--include", type=str, default=DEFAULT_INCLUDES, + callback=validate_regex, help=( "A regular expression that matches files and directories that should be" - " included on recursive searches. An empty value means all files are included" - " regardless of the name. Use forward slashes for directories on all platforms" - " (Windows, too). Exclusions are calculated first, inclusions later." + " included on recursive searches. An empty value means all files are included" + " regardless of the name. Use forward slashes for directories on all platforms" + " (Windows, too). Exclusions are calculated first, inclusions later." ), show_default=True, ) @click.option( "--exclude", type=str, - default=DEFAULT_EXCLUDES, + callback=validate_regex, help=( "A regular expression that matches files and directories that should be" - " excluded on recursive searches. An empty value means no paths are excluded." - " Use forward slashes for directories on all platforms (Windows, too). " - " Exclusions are calculated first, inclusions later." + " excluded on recursive searches. An empty value means no paths are excluded." + " Use forward slashes for directories on all platforms (Windows, too)." + " Exclusions are calculated first, inclusions later. [default:" + f" {DEFAULT_EXCLUDES}]" + ), + show_default=False, +) +@click.option( + "--extend-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but adds additional files and directories on top of the" + " excluded ones. (Useful if you simply want to add to the default)" ), - show_default=True, ) @click.option( "--force-exclude", type=str, + callback=validate_regex, help=( "Like --exclude, but files and directories matching this regex will be " - "excluded even when they are passed explicitly as arguments" + "excluded even when they are passed explicitly as arguments." ), ) +@click.option( + "--stdin-filename", + type=str, + help=( + "The name of the file when passing it through stdin. Useful to make " + "sure Black will respect --force-exclude option on some " + "editors that rely on using stdin." + ), +) +@click.option( + "-W", + "--workers", + type=click.IntRange(min=1), + default=None, + help="Number of parallel workers [default: number of CPUs in the system]", +) @click.option( "-q", "--quiet", @@ -458,10 +382,16 @@ def target_version_option_callback( is_flag=True, help=( "Also emit messages to stderr about files that were not changed or were ignored" - " due to --exclude=." + " due to exclusion patterns." + ), +) +@click.version_option( + version=__version__, + message=( + f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n" + f"Python ({platform.python_implementation()}) {platform.python_version()}" ), ) -@click.version_option(version=__version__) @click.argument( "src", nargs=-1, @@ -469,6 +399,7 @@ def target_version_option_callback( exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True ), is_eager=True, + metavar="SRC ...", ) @click.option( "--config", @@ -485,7 +416,7 @@ def target_version_option_callback( help="Read configuration from FILE path.", ) @click.pass_context -def main( +def main( # noqa: C901 ctx: click.Context, code: Optional[str], line_length: int, @@ -495,17 +426,97 @@ def main( color: bool, fast: bool, pyi: bool, + ipynb: bool, + python_cell_magics: Sequence[str], skip_string_normalization: bool, + skip_magic_trailing_comma: bool, experimental_string_processing: bool, + preview: bool, quiet: bool, verbose: bool, - include: str, - exclude: str, - force_exclude: Optional[str], + required_version: Optional[str], + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + stdin_filename: Optional[str], + workers: Optional[int], src: Tuple[str, ...], config: Optional[str], ) -> None: """The uncompromising code formatter.""" + ctx.ensure_object(dict) + + if src and code is not None: + out( + main.get_usage(ctx) + + "\n\n'SRC' and 'code' cannot be passed simultaneously." + ) + ctx.exit(1) + if not src and code is None: + out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.") + ctx.exit(1) + + root, method = ( + find_project_root(src, stdin_filename) if code is None else (None, None) + ) + ctx.obj["root"] = root + + if verbose: + if root: + out( + f"Identified `{root}` as project root containing a {method}.", + fg="blue", + ) + + normalized = [ + (source, source) + if source == "-" + else (normalize_path_maybe_ignore(Path(source), root), source) + for source in src + ] + srcs_string = ", ".join( + [ + f'"{_norm}"' + if _norm + else f'\033[31m"{source} (skipping - invalid)"\033[34m' + for _norm, source in normalized + ] + ) + out(f"Sources to be formatted: {srcs_string}", fg="blue") + + if config: + config_source = ctx.get_parameter_source("config") + user_level_config = str(find_user_pyproject_toml()) + if config == user_level_config: + out( + "Using configuration from user-level config at " + f"'{user_level_config}'.", + fg="blue", + ) + elif config_source in ( + ParameterSource.DEFAULT, + ParameterSource.DEFAULT_MAP, + ): + out("Using configuration from project root.", fg="blue") + else: + out(f"Using configuration in '{config}'.", fg="blue") + + error_msg = "Oh no! 💥 💔 💥" + if ( + required_version + and required_version != __version__ + and required_version != __version__.split(".")[0] + ): + err( + f"{error_msg} The required version `{required_version}` does not match" + f" the running version `{__version__}`!" + ) + ctx.exit(1) + if ipynb and pyi: + err("Cannot pass both `pyi` and `ipynb` flags!") + ctx.exit(1) + write_back = WriteBack.from_configuration(check=check, diff=diff, color=color) if target_version: versions = set(target_version) @@ -516,50 +527,76 @@ def main( target_versions=versions, line_length=line_length, is_pyi=pyi, + is_ipynb=ipynb, string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, experimental_string_processing=experimental_string_processing, + preview=preview, + python_cell_magics=set(python_cell_magics), ) - if config and verbose: - out(f"Using configuration from {config}.", bold=False, fg="blue") + if code is not None: - print(format_str(code, mode=mode)) - ctx.exit(0) - report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) - sources = get_sources( - ctx=ctx, - src=src, - quiet=quiet, - verbose=verbose, - include=include, - exclude=exclude, - force_exclude=force_exclude, - report=report, - ) + # Run in quiet mode by default with -c; the extra output isn't useful. + # You can still pass -v to get verbose output. + quiet = True - path_empty( - sources, - "No Python files are present to be formatted. Nothing to do 😴", - quiet, - verbose, - ctx, - ) + report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) - if len(sources) == 1: - reformat_one( - src=sources.pop(), - fast=fast, - write_back=write_back, - mode=mode, - report=report, + if code is not None: + reformat_code( + content=code, fast=fast, write_back=write_back, mode=mode, report=report ) else: - reformat_many( - sources=sources, fast=fast, write_back=write_back, mode=mode, report=report + try: + sources = get_sources( + ctx=ctx, + src=src, + quiet=quiet, + verbose=verbose, + include=include, + exclude=exclude, + extend_exclude=extend_exclude, + force_exclude=force_exclude, + report=report, + stdin_filename=stdin_filename, + ) + except GitWildMatchPatternError: + ctx.exit(1) + + path_empty( + sources, + "No Python files are present to be formatted. Nothing to do 😴", + quiet, + verbose, + ctx, ) + if len(sources) == 1: + reformat_one( + src=sources.pop(), + fast=fast, + write_back=write_back, + mode=mode, + report=report, + ) + else: + from black.concurrency import reformat_many + + reformat_many( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + workers=workers, + ) + if verbose or not quiet: - out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨") - click.secho(str(report), err=True) + if code is None and (verbose or report.change_count or report.failure_count): + out() + out(error_msg if report.return_code else "All done! ✨ 🍰 ✨") + if code is None: + click.echo(str(report), err=True) ctx.exit(report.return_code) @@ -569,66 +606,75 @@ def get_sources( src: Tuple[str, ...], quiet: bool, verbose: bool, - include: str, - exclude: str, - force_exclude: Optional[str], + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], report: "Report", + stdin_filename: Optional[str], ) -> Set[Path]: """Compute the set of files to be formatted.""" - try: - include_regex = re_compile_maybe_verbose(include) - except re.error: - err(f"Invalid regular expression for include given: {include!r}") - ctx.exit(2) - try: - exclude_regex = re_compile_maybe_verbose(exclude) - except re.error: - err(f"Invalid regular expression for exclude given: {exclude!r}") - ctx.exit(2) - try: - force_exclude_regex = ( - re_compile_maybe_verbose(force_exclude) if force_exclude else None - ) - except re.error: - err(f"Invalid regular expression for force_exclude given: {force_exclude!r}") - ctx.exit(2) - - root = find_project_root(src) sources: Set[Path] = set() - path_empty(src, "No Path provided. Nothing to do 😴", quiet, verbose, ctx) - gitignore = get_gitignore(root) + root = ctx.obj["root"] for s in src: - p = Path(s) - if p.is_dir(): - sources.update( - gen_python_files( - p.iterdir(), - root, - include_regex, - exclude_regex, - force_exclude_regex, - report, - gitignore, - ) - ) - elif s == "-": - sources.add(p) - elif p.is_file(): - normalized_path = normalize_path_maybe_ignore(p, root, report) + if s == "-" and stdin_filename: + p = Path(stdin_filename) + is_stdin = True + else: + p = Path(s) + is_stdin = False + + if is_stdin or p.is_file(): + normalized_path = normalize_path_maybe_ignore(p, ctx.obj["root"], report) if normalized_path is None: continue normalized_path = "/" + normalized_path # Hard-exclude any files that matches the `--force-exclude` regex. - if force_exclude_regex: - force_exclude_match = force_exclude_regex.search(normalized_path) + if force_exclude: + force_exclude_match = force_exclude.search(normalized_path) else: force_exclude_match = None if force_exclude_match and force_exclude_match.group(0): report.path_ignored(p, "matches the --force-exclude regular expression") continue + if is_stdin: + p = Path(f"{STDIN_PLACEHOLDER}{str(p)}") + + if p.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + + sources.add(p) + elif p.is_dir(): + if exclude is None: + exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) + gitignore = get_gitignore(root) + p_gitignore = get_gitignore(p) + # No need to use p's gitignore if it is identical to root's gitignore + # (i.e. root and p point to the same directory). + if gitignore != p_gitignore: + gitignore += p_gitignore + else: + gitignore = None + sources.update( + gen_python_files( + p.iterdir(), + ctx.obj["root"], + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore, + verbose=verbose, + quiet=quiet, + ) + ) + elif s == "-": sources.add(p) else: err(f"invalid path: {s}") @@ -641,12 +687,39 @@ def path_empty( """ Exit if there is no `src` provided for formatting """ - if len(src) == 0: + if not src: if verbose or not quiet: out(msg) - ctx.exit(0) + ctx.exit(0) + + +def reformat_code( + content: str, fast: bool, write_back: WriteBack, mode: Mode, report: Report +) -> None: + """ + Reformat and print out `content` without spawning child processes. + Similar to `reformat_one`, but for string content. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + path = Path("") + try: + changed = Changed.NO + if format_stdin_to_stdout( + content=content, fast=fast, write_back=write_back, mode=mode + ): + changed = Changed.YES + report.done(path, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(path, str(exc)) +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) def reformat_one( src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report" ) -> None: @@ -657,15 +730,31 @@ def reformat_one( """ try: changed = Changed.NO - if not src.is_file() and str(src) == "-": + + if str(src) == "-": + is_stdin = True + elif str(src).startswith(STDIN_PLACEHOLDER): + is_stdin = True + # Use the original name again in case we want to print something + # to the user + src = Path(str(src)[len(STDIN_PLACEHOLDER) :]) + else: + is_stdin = False + + if is_stdin: + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): changed = Changed.YES else: cache: Cache = {} - if write_back != WriteBack.DIFF: + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): cache = read_cache(mode) res_src = src.resolve() - if res_src in cache and cache[res_src] == get_cache_info(res_src): + res_src_s = str(res_src) + if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src): changed = Changed.CACHED if changed is not Changed.CACHED and format_file_in_place( src, fast=fast, write_back=write_back, mode=mode @@ -682,114 +771,6 @@ def reformat_one( report.failed(src, str(exc)) -def reformat_many( - sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report" -) -> None: - """Reformat multiple files using a ProcessPoolExecutor.""" - executor: Executor - loop = asyncio.get_event_loop() - worker_count = os.cpu_count() - if sys.platform == "win32": - # Work around https://bugs.python.org/issue26903 - worker_count = min(worker_count, 61) - try: - executor = ProcessPoolExecutor(max_workers=worker_count) - except (ImportError, OSError): - # we arrive here if the underlying system does not support multi-processing - # like in AWS Lambda or Termux, in which case we gracefully fallback to - # a ThreadPollExecutor with just a single worker (more workers would not do us - # any good due to the Global Interpreter Lock) - executor = ThreadPoolExecutor(max_workers=1) - - try: - loop.run_until_complete( - schedule_formatting( - sources=sources, - fast=fast, - write_back=write_back, - mode=mode, - report=report, - loop=loop, - executor=executor, - ) - ) - finally: - shutdown(loop) - if executor is not None: - executor.shutdown() - - -async def schedule_formatting( - sources: Set[Path], - fast: bool, - write_back: WriteBack, - mode: Mode, - report: "Report", - loop: asyncio.AbstractEventLoop, - executor: Executor, -) -> None: - """Run formatting of `sources` in parallel using the provided `executor`. - - (Use ProcessPoolExecutors for actual parallelism.) - - `write_back`, `fast`, and `mode` options are passed to - :func:`format_file_in_place`. - """ - cache: Cache = {} - if write_back != WriteBack.DIFF: - cache = read_cache(mode) - sources, cached = filter_cached(cache, sources) - for src in sorted(cached): - report.done(src, Changed.CACHED) - if not sources: - return - - cancelled = [] - sources_to_cache = [] - lock = None - if write_back == WriteBack.DIFF: - # For diff output, we need locks to ensure we don't interleave output - # from different processes. - manager = Manager() - lock = manager.Lock() - tasks = { - asyncio.ensure_future( - loop.run_in_executor( - executor, format_file_in_place, src, fast, mode, write_back, lock - ) - ): src - for src in sorted(sources) - } - pending: Iterable["asyncio.Future[bool]"] = tasks.keys() - try: - loop.add_signal_handler(signal.SIGINT, cancel, pending) - loop.add_signal_handler(signal.SIGTERM, cancel, pending) - except NotImplementedError: - # There are no good alternatives for these on Windows. - pass - while pending: - done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) - for task in done: - src = tasks.pop(task) - if task.cancelled(): - cancelled.append(task) - elif task.exception(): - report.failed(src, str(task.exception())) - else: - changed = Changed.YES if task.result() else Changed.NO - # If the file was written back or was successfully checked as - # well-formatted, store this information in the cache. - if write_back is WriteBack.YES or ( - write_back is WriteBack.CHECK and changed is Changed.NO - ): - sources_to_cache.append(src) - report.done(src, changed) - if cancelled: - await asyncio.gather(*cancelled, loop=loop, return_exceptions=True) - if sources_to_cache: - write_cache(cache, sources_to_cache, mode) - - def format_file_in_place( src: Path, fast: bool, @@ -805,6 +786,8 @@ def format_file_in_place( """ if src.suffix == ".pyi": mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) then = datetime.utcfromtimestamp(src.stat().st_mtime) with open(src, "rb") as buf: @@ -813,6 +796,10 @@ def format_file_in_place( dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) except NothingChanged: return False + except JSONDecodeError: + raise ValueError( + f"File '{src}' cannot be parsed as valid Jupyter notebook." + ) from None if write_back == WriteBack.YES: with open(src, "w", encoding=encoding, newline=newline) as f: @@ -821,9 +808,12 @@ def format_file_in_place( now = datetime.utcnow() src_name = f"{src}\t{then} +0000" dst_name = f"{src}\t{now} +0000" - diff_contents = diff(src_contents, dst_contents, src_name, dst_name) + if mode.is_ipynb: + diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name) + else: + diff_contents = diff(src_contents, dst_contents, src_name, dst_name) - if write_back == write_back.COLOR_DIFF: + if write_back == WriteBack.COLOR_DIFF: diff_contents = color_diff(diff_contents) with lock or nullcontext(): @@ -840,61 +830,28 @@ def format_file_in_place( return True -def color_diff(contents: str) -> str: - """Inject the ANSI color codes to the diff.""" - lines = contents.split("\n") - for i, line in enumerate(lines): - if line.startswith("+++") or line.startswith("---"): - line = "\033[1;37m" + line + "\033[0m" # bold white, reset - if line.startswith("@@"): - line = "\033[36m" + line + "\033[0m" # cyan, reset - if line.startswith("+"): - line = "\033[32m" + line + "\033[0m" # green, reset - elif line.startswith("-"): - line = "\033[31m" + line + "\033[0m" # red, reset - lines[i] = line - return "\n".join(lines) - - -def wrap_stream_for_windows( - f: io.TextIOWrapper, -) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32.AnsiToWin32"]: - """ - Wrap the stream in colorama's wrap_stream so colors are shown on Windows. - - If `colorama` is not found, then no change is made. If `colorama` does - exist, then it handles the logic to determine whether or not to change - things. - """ - try: - from colorama import initialise - - # We set `strip=False` so that we can don't have to modify - # test_express_diff_with_color. - f = initialise.wrap_stream( - f, convert=None, strip=False, autoreset=False, wrap=True - ) - - # wrap_stream returns a `colorama.AnsiToWin32.AnsiToWin32` object - # which does not have a `detach()` method. So we fake one. - f.detach = lambda *args, **kwargs: None # type: ignore - except ImportError: - pass - - return f - - def format_stdin_to_stdout( - fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: Mode + fast: bool, + *, + content: Optional[str] = None, + write_back: WriteBack = WriteBack.NO, + mode: Mode, ) -> bool: """Format file on stdin. Return True if changed. + If content is None, it's read from sys.stdin. + If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, write a diff to stdout. The `mode` argument is passed to :func:`format_file_contents`. """ then = datetime.utcnow() - src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) + + if content is None: + src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) + else: + src, encoding, newline = content, "utf-8", "" + dst = src try: dst = format_file_contents(src, fast=fast, mode=mode) @@ -908,6 +865,9 @@ def format_stdin_to_stdout( sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True ) if write_back == WriteBack.YES: + # Make sure there's a newline after the content + if dst and dst[-1] != "\n": + dst += "\n" f.write(dst) elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): now = datetime.utcnow() @@ -921,34 +881,154 @@ def format_stdin_to_stdout( f.detach() +def check_stability_and_equivalence( + src_contents: str, dst_contents: str, *, mode: Mode +) -> None: + """Perform stability and equivalence checks. + + Raise AssertionError if source and destination contents are not + equivalent, or if a second pass of the formatter would format the + content differently. + """ + assert_equivalent(src_contents, dst_contents) + assert_stable(src_contents, dst_contents, mode=mode) + + def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: - """Reformat contents a file and return new contents. + """Reformat contents of a file and return new contents. If `fast` is False, additionally confirm that the reformatted code is valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it. `mode` is passed to :func:`format_str`. """ - if src_contents.strip() == "": + if not src_contents.strip(): raise NothingChanged - dst_contents = format_str(src_contents, mode=mode) + if mode.is_ipynb: + dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode) + else: + dst_contents = format_str(src_contents, mode=mode) if src_contents == dst_contents: raise NothingChanged - if not fast: - assert_equivalent(src_contents, dst_contents) - assert_stable(src_contents, dst_contents, mode=mode) + if not fast and not mode.is_ipynb: + # Jupyter notebooks will already have been checked above. + check_stability_and_equivalence(src_contents, dst_contents, mode=mode) return dst_contents -def format_str(src_contents: str, *, mode: Mode) -> FileContent: - """Reformat a string and return new contents. +def validate_cell(src: str, mode: Mode) -> None: + """Check that cell does not already contain TransformerManager transformations, + or non-Python cell magics, which might cause tokenizer_rt to break because of + indentations. + + If a cell contains ``!ls``, then it'll be transformed to + ``get_ipython().system('ls')``. However, if the cell originally contained + ``get_ipython().system('ls')``, then it would get transformed in the same way: + + >>> TransformerManager().transform_cell("get_ipython().system('ls')") + "get_ipython().system('ls')\n" + >>> TransformerManager().transform_cell("!ls") + "get_ipython().system('ls')\n" + + Due to the impossibility of safely roundtripping in such situations, cells + containing transformed magics will be ignored. + """ + if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): + raise NothingChanged + if ( + src[:2] == "%%" + and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics + ): + raise NothingChanged + + +def format_cell(src: str, *, fast: bool, mode: Mode) -> str: + """Format code in given cell of Jupyter notebook. + + General idea is: + + - if cell has trailing semicolon, remove it; + - if cell has IPython magics, mask them; + - format cell; + - reinstate IPython magics; + - reinstate trailing semicolon (if originally present); + - strip trailing newlines. + + Cells with syntax errors will not be processed, as they + could potentially be automagics or multi-line magics, which + are currently not supported. + """ + validate_cell(src, mode) + src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon( + src + ) + try: + masked_src, replacements = mask_cell(src_without_trailing_semicolon) + except SyntaxError: + raise NothingChanged from None + masked_dst = format_str(masked_src, mode=mode) + if not fast: + check_stability_and_equivalence(masked_src, masked_dst, mode=mode) + dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements) + dst = put_trailing_semicolon_back( + dst_without_trailing_semicolon, has_trailing_semicolon + ) + dst = dst.rstrip("\n") + if dst == src: + raise NothingChanged from None + return dst + + +def validate_metadata(nb: MutableMapping[str, Any]) -> None: + """If notebook is marked as non-Python, don't format it. + + All notebook metadata fields are optional, see + https://nbformat.readthedocs.io/en/latest/format_description.html. So + if a notebook has empty metadata, we will try to parse it anyway. + """ + language = nb.get("metadata", {}).get("language_info", {}).get("name", None) + if language is not None and language != "python": + raise NothingChanged from None + + +def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Format Jupyter notebook. + + Operate cell-by-cell, only on code cells, only for Python notebooks. + If the ``.ipynb`` originally had a trailing newline, it'll be preserved. + """ + trailing_newline = src_contents[-1] == "\n" + modified = False + nb = json.loads(src_contents) + validate_metadata(nb) + for cell in nb["cells"]: + if cell.get("cell_type", None) == "code": + try: + src = "".join(cell["source"]) + dst = format_cell(src, fast=fast, mode=mode) + except NothingChanged: + pass + else: + cell["source"] = dst.splitlines(keepends=True) + modified = True + if modified: + dst_contents = json.dumps(nb, indent=1, ensure_ascii=False) + if trailing_newline: + dst_contents = dst_contents + "\n" + return dst_contents + else: + raise NothingChanged + + +def format_str(src_contents: str, *, mode: Mode) -> str: + """Reformat a string and return new contents. `mode` determines formatting options, such as how many characters per line are allowed. Example: >>> import black - >>> print(black.format_str("def f(arg:str='')->None:...", mode=Mode())) + >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode())) def f(arg: str = "") -> None: ... @@ -971,22 +1051,28 @@ def f( hey """ + dst_contents = _format_str_once(src_contents, mode=mode) + # Forced second pass to work around optional trailing commas (becoming + # forced trailing commas on pass 2) interacting differently with optional + # parentheses. Admittedly ugly. + if src_contents != dst_contents: + return _format_str_once(dst_contents, mode=mode) + return dst_contents + + +def _format_str_once(src_contents: str, *, mode: Mode) -> str: src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) dst_contents = [] - future_imports = get_future_imports(src_node) if mode.target_versions: versions = mode.target_versions else: - versions = detect_target_versions(src_node) - normalize_fmt_off(src_node) - lines = LineGenerator( - remove_u_prefix="unicode_literals" in future_imports - or supports_feature(versions, Feature.UNICODE_LITERALS), - is_pyi=mode.is_pyi, - normalize_strings=mode.string_normalization, - ) + future_imports = get_future_imports(src_node) + versions = detect_target_versions(src_node, future_imports=future_imports) + + normalize_fmt_off(src_node, preview=mode.preview) + lines = LineGenerator(mode=mode) elt = EmptyLineTracker(is_pyi=mode.is_pyi) - empty_line = Line() + empty_line = Line(mode=mode) after = 0 split_line_features = { feature @@ -1010,4653 +1096,73 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: `newline` is either CRLF or LF but `decoded_contents` is decoded with universal newlines (i.e. only contains LF). """ - srcbuf = io.BytesIO(src) - encoding, lines = tokenize.detect_encoding(srcbuf.readline) - if not lines: - return "", encoding, "\n" - - newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" - srcbuf.seek(0) - with io.TextIOWrapper(srcbuf, encoding) as tiow: - return tiow.read(), encoding, newline - - -def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: - if not target_versions: - # No target_version specified, so try all grammars. - return [ - # Python 3.7+ - pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, - # Python 3.0-3.6 - pygram.python_grammar_no_print_statement_no_exec_statement, - # Python 2.7 with future print_function import - pygram.python_grammar_no_print_statement, - # Python 2.7 - pygram.python_grammar, - ] - - if all(version.is_python2() for version in target_versions): - # Python 2-only code, so try Python 2 grammars. - return [ - # Python 2.7 with future print_function import - pygram.python_grammar_no_print_statement, - # Python 2.7 - pygram.python_grammar, - ] - - # Python 3-compatible code, so only try Python 3 grammar. - grammars = [] - # If we have to parse both, try to parse async as a keyword first - if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS): - # Python 3.7+ - grammars.append( - pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords - ) - if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): - # Python 3.0-3.6 - grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) - # At least one of the above branches must have been taken, because every Python - # version has exactly one of the two 'ASYNC_*' flags - return grammars - - -def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: - """Given a string with source, return the lib2to3 Node.""" - if src_txt[-1:] != "\n": - src_txt += "\n" - - for grammar in get_grammars(set(target_versions)): - drv = driver.Driver(grammar, pytree.convert) - try: - result = drv.parse_string(src_txt, True) - break - - except ParseError as pe: - lineno, column = pe.context[1] - lines = src_txt.splitlines() - try: - faulty_line = lines[lineno - 1] - except IndexError: - faulty_line = "" - exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}") - else: - raise exc from None - - if isinstance(result, Leaf): - result = Node(syms.file_input, [result]) - return result - - -def lib2to3_unparse(node: Node) -> str: - """Given a lib2to3 node, return its string representation.""" - code = str(node) - return code - - -class Visitor(Generic[T]): - """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" - - def visit(self, node: LN) -> Iterator[T]: - """Main method to visit `node` and its children. - - It tries to find a `visit_*()` method for the given `node.type`, like - `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. - If no dedicated `visit_*()` method is found, chooses `visit_default()` - instead. - - Then yields objects of type `T` from the selected visitor. - """ - if node.type < 256: - name = token.tok_name[node.type] - else: - name = str(type_repr(node.type)) - # We explicitly branch on whether a visitor exists (instead of - # using self.visit_default as the default arg to getattr) in order - # to save needing to create a bound method object and so mypyc can - # generate a native call to visit_default. - visitf = getattr(self, f"visit_{name}", None) - if visitf: - yield from visitf(node) - else: - yield from self.visit_default(node) - - def visit_default(self, node: LN) -> Iterator[T]: - """Default `visit_*()` implementation. Recurses to children of `node`.""" - if isinstance(node, Node): - for child in node.children: - yield from self.visit(child) - - -@dataclass -class DebugVisitor(Visitor[T]): - tree_depth: int = 0 - - def visit_default(self, node: LN) -> Iterator[T]: - indent = " " * (2 * self.tree_depth) - if isinstance(node, Node): - _type = type_repr(node.type) - out(f"{indent}{_type}", fg="yellow") - self.tree_depth += 1 - for child in node.children: - yield from self.visit(child) - - self.tree_depth -= 1 - out(f"{indent}/{_type}", fg="yellow", bold=False) - else: - _type = token.tok_name.get(node.type, str(node.type)) - out(f"{indent}{_type}", fg="blue", nl=False) - if node.prefix: - # We don't have to handle prefixes for `Node` objects since - # that delegates to the first child anyway. - out(f" {node.prefix!r}", fg="green", bold=False, nl=False) - out(f" {node.value!r}", fg="blue", bold=False) - - @classmethod - def show(cls, code: Union[str, Leaf, Node]) -> None: - """Pretty-print the lib2to3 AST of a given string of `code`. - - Convenience method for debugging. - """ - v: DebugVisitor[None] = DebugVisitor() - if isinstance(code, str): - code = lib2to3_parse(code) - list(v.visit(code)) - - -WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} -STATEMENT: Final = { - syms.if_stmt, - syms.while_stmt, - syms.for_stmt, - syms.try_stmt, - syms.except_clause, - syms.with_stmt, - syms.funcdef, - syms.classdef, -} -STANDALONE_COMMENT: Final = 153 -token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" -LOGIC_OPERATORS: Final = {"and", "or"} -COMPARATORS: Final = { - token.LESS, - token.GREATER, - token.EQEQUAL, - token.NOTEQUAL, - token.LESSEQUAL, - token.GREATEREQUAL, -} -MATH_OPERATORS: Final = { - token.VBAR, - token.CIRCUMFLEX, - token.AMPER, - token.LEFTSHIFT, - token.RIGHTSHIFT, - token.PLUS, - token.MINUS, - token.STAR, - token.SLASH, - token.DOUBLESLASH, - token.PERCENT, - token.AT, - token.TILDE, - token.DOUBLESTAR, -} -STARS: Final = {token.STAR, token.DOUBLESTAR} -VARARGS_SPECIALS: Final = STARS | {token.SLASH} -VARARGS_PARENTS: Final = { - syms.arglist, - syms.argument, # double star in arglist - syms.trailer, # single argument to call - syms.typedargslist, - syms.varargslist, # lambdas -} -UNPACKING_PARENTS: Final = { - syms.atom, # single element of a list or set literal - syms.dictsetmaker, - syms.listmaker, - syms.testlist_gexp, - syms.testlist_star_expr, -} -TEST_DESCENDANTS: Final = { - syms.test, - syms.lambdef, - syms.or_test, - syms.and_test, - syms.not_test, - syms.comparison, - syms.star_expr, - syms.expr, - syms.xor_expr, - syms.and_expr, - syms.shift_expr, - syms.arith_expr, - syms.trailer, - syms.term, - syms.power, -} -ASSIGNMENTS: Final = { - "=", - "+=", - "-=", - "*=", - "@=", - "/=", - "%=", - "&=", - "|=", - "^=", - "<<=", - ">>=", - "**=", - "//=", -} -COMPREHENSION_PRIORITY: Final = 20 -COMMA_PRIORITY: Final = 18 -TERNARY_PRIORITY: Final = 16 -LOGIC_PRIORITY: Final = 14 -STRING_PRIORITY: Final = 12 -COMPARATOR_PRIORITY: Final = 10 -MATH_PRIORITIES: Final = { - token.VBAR: 9, - token.CIRCUMFLEX: 8, - token.AMPER: 7, - token.LEFTSHIFT: 6, - token.RIGHTSHIFT: 6, - token.PLUS: 5, - token.MINUS: 5, - token.STAR: 4, - token.SLASH: 4, - token.DOUBLESLASH: 4, - token.PERCENT: 4, - token.AT: 4, - token.TILDE: 3, - token.DOUBLESTAR: 2, -} -DOT_PRIORITY: Final = 1 - - -@dataclass -class BracketTracker: - """Keeps track of brackets on a line.""" - - depth: int = 0 - bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) - delimiters: Dict[LeafID, Priority] = field(default_factory=dict) - previous: Optional[Leaf] = None - _for_loop_depths: List[int] = field(default_factory=list) - _lambda_argument_depths: List[int] = field(default_factory=list) - invisible: List[Leaf] = field(default_factory=list) - - def mark(self, leaf: Leaf) -> None: - """Mark `leaf` with bracket-related metadata. Keep track of delimiters. - - All leaves receive an int `bracket_depth` field that stores how deep - within brackets a given leaf is. 0 means there are no enclosing brackets - that started on this line. - - If a leaf is itself a closing bracket, it receives an `opening_bracket` - field that it forms a pair with. This is a one-directional link to - avoid reference cycles. - - If a leaf is a delimiter (a token on which Black can split the line if - needed) and it's on depth 0, its `id()` is stored in the tracker's - `delimiters` field. - """ - if leaf.type == token.COMMENT: - return - - self.maybe_decrement_after_for_loop_variable(leaf) - self.maybe_decrement_after_lambda_arguments(leaf) - if leaf.type in CLOSING_BRACKETS: - self.depth -= 1 - opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) - leaf.opening_bracket = opening_bracket - if not leaf.value: - self.invisible.append(leaf) - leaf.bracket_depth = self.depth - if self.depth == 0: - delim = is_split_before_delimiter(leaf, self.previous) - if delim and self.previous is not None: - self.delimiters[id(self.previous)] = delim - else: - delim = is_split_after_delimiter(leaf, self.previous) - if delim: - self.delimiters[id(leaf)] = delim - if leaf.type in OPENING_BRACKETS: - self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf - self.depth += 1 - if not leaf.value: - self.invisible.append(leaf) - self.previous = leaf - self.maybe_increment_lambda_arguments(leaf) - self.maybe_increment_for_loop_variable(leaf) - - def any_open_brackets(self) -> bool: - """Return True if there is an yet unmatched open bracket on the line.""" - return bool(self.bracket_match) - - def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: - """Return the highest priority of a delimiter found on the line. - - Values are consistent with what `is_split_*_delimiter()` return. - Raises ValueError on no delimiters. - """ - return max(v for k, v in self.delimiters.items() if k not in exclude) - - def delimiter_count_with_priority(self, priority: Priority = 0) -> int: - """Return the number of delimiters with the given `priority`. - - If no `priority` is passed, defaults to max priority on the line. - """ - if not self.delimiters: - return 0 - - priority = priority or self.max_delimiter_priority() - return sum(1 for p in self.delimiters.values() if p == priority) - - def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: - """In a for loop, or comprehension, the variables are often unpacks. - - To avoid splitting on the comma in this situation, increase the depth of - tokens between `for` and `in`. - """ - if leaf.type == token.NAME and leaf.value == "for": - self.depth += 1 - self._for_loop_depths.append(self.depth) - return True - - return False - - def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: - """See `maybe_increment_for_loop_variable` above for explanation.""" - if ( - self._for_loop_depths - and self._for_loop_depths[-1] == self.depth - and leaf.type == token.NAME - and leaf.value == "in" - ): - self.depth -= 1 - self._for_loop_depths.pop() - return True - - return False - - def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: - """In a lambda expression, there might be more than one argument. - - To avoid splitting on the comma in this situation, increase the depth of - tokens between `lambda` and `:`. - """ - if leaf.type == token.NAME and leaf.value == "lambda": - self.depth += 1 - self._lambda_argument_depths.append(self.depth) - return True - - return False - - def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: - """See `maybe_increment_lambda_arguments` above for explanation.""" - if ( - self._lambda_argument_depths - and self._lambda_argument_depths[-1] == self.depth - and leaf.type == token.COLON - ): - self.depth -= 1 - self._lambda_argument_depths.pop() - return True - - return False - - def get_open_lsqb(self) -> Optional[Leaf]: - """Return the most recent opening square bracket (if any).""" - return self.bracket_match.get((self.depth - 1, token.RSQB)) - - -@dataclass -class Line: - """Holds leaves and comments. Can be printed with `str(line)`.""" - - depth: int = 0 - leaves: List[Leaf] = field(default_factory=list) - # keys ordered like `leaves` - comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) - bracket_tracker: BracketTracker = field(default_factory=BracketTracker) - inside_brackets: bool = False - should_explode: bool = False - - def append(self, leaf: Leaf, preformatted: bool = False) -> None: - """Add a new `leaf` to the end of the line. - - Unless `preformatted` is True, the `leaf` will receive a new consistent - whitespace prefix and metadata applied by :class:`BracketTracker`. - Trailing commas are maybe removed, unpacked for loop variables are - demoted from being delimiters. - - Inline comments are put aside. - """ - has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) - if not has_value: - return - - if token.COLON == leaf.type and self.is_class_paren_empty: - del self.leaves[-2:] - if self.leaves and not preformatted: - # Note: at this point leaf.prefix should be empty except for - # imports, for which we only preserve newlines. - leaf.prefix += whitespace( - leaf, complex_subscript=self.is_complex_subscript(leaf) - ) - if self.inside_brackets or not preformatted: - self.bracket_tracker.mark(leaf) - if self.maybe_should_explode(leaf): - self.should_explode = True - if not self.append_comment(leaf): - self.leaves.append(leaf) - - def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: - """Like :func:`append()` but disallow invalid standalone comment structure. - - Raises ValueError when any `leaf` is appended after a standalone comment - or when a standalone comment is not the first leaf on the line. - """ - if self.bracket_tracker.depth == 0: - if self.is_comment: - raise ValueError("cannot append to standalone comments") - - if self.leaves and leaf.type == STANDALONE_COMMENT: - raise ValueError( - "cannot append standalone comments to a populated line" - ) - - self.append(leaf, preformatted=preformatted) - - @property - def is_comment(self) -> bool: - """Is this line a standalone comment?""" - return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT - - @property - def is_decorator(self) -> bool: - """Is this line a decorator?""" - return bool(self) and self.leaves[0].type == token.AT - - @property - def is_import(self) -> bool: - """Is this an import line?""" - return bool(self) and is_import(self.leaves[0]) - - @property - def is_class(self) -> bool: - """Is this line a class definition?""" - return ( - bool(self) - and self.leaves[0].type == token.NAME - and self.leaves[0].value == "class" - ) - - @property - def is_stub_class(self) -> bool: - """Is this line a class definition with a body consisting only of "..."?""" - return self.is_class and self.leaves[-3:] == [ - Leaf(token.DOT, ".") for _ in range(3) - ] - - @property - def is_def(self) -> bool: - """Is this a function definition? (Also returns True for async defs.)""" - try: - first_leaf = self.leaves[0] - except IndexError: - return False - - try: - second_leaf: Optional[Leaf] = self.leaves[1] - except IndexError: - second_leaf = None - return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( - first_leaf.type == token.ASYNC - and second_leaf is not None - and second_leaf.type == token.NAME - and second_leaf.value == "def" - ) - - @property - def is_class_paren_empty(self) -> bool: - """Is this a class with no base classes but using parentheses? - - Those are unnecessary and should be removed. - """ - return ( - bool(self) - and len(self.leaves) == 4 - and self.is_class - and self.leaves[2].type == token.LPAR - and self.leaves[2].value == "(" - and self.leaves[3].type == token.RPAR - and self.leaves[3].value == ")" - ) - - @property - def is_triple_quoted_string(self) -> bool: - """Is the line a triple quoted string?""" - return ( - bool(self) - and self.leaves[0].type == token.STRING - and self.leaves[0].value.startswith(('"""', "'''")) - ) - - def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: - """If so, needs to be split before emitting.""" - for leaf in self.leaves: - if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: - return True - - return False - - def contains_uncollapsable_type_comments(self) -> bool: - ignored_ids = set() - try: - last_leaf = self.leaves[-1] - ignored_ids.add(id(last_leaf)) - if last_leaf.type == token.COMMA or ( - last_leaf.type == token.RPAR and not last_leaf.value - ): - # When trailing commas or optional parens are inserted by Black for - # consistency, comments after the previous last element are not moved - # (they don't have to, rendering will still be correct). So we ignore - # trailing commas and invisible. - last_leaf = self.leaves[-2] - ignored_ids.add(id(last_leaf)) - except IndexError: - return False - - # A type comment is uncollapsable if it is attached to a leaf - # that isn't at the end of the line (since that could cause it - # to get associated to a different argument) or if there are - # comments before it (since that could cause it to get hidden - # behind a comment. - comment_seen = False - for leaf_id, comments in self.comments.items(): - for comment in comments: - if is_type_comment(comment): - if comment_seen or ( - not is_type_comment(comment, " ignore") - and leaf_id not in ignored_ids - ): - return True - - comment_seen = True - - return False - - def contains_unsplittable_type_ignore(self) -> bool: - if not self.leaves: - return False - - # If a 'type: ignore' is attached to the end of a line, we - # can't split the line, because we can't know which of the - # subexpressions the ignore was meant to apply to. - # - # We only want this to apply to actual physical lines from the - # original source, though: we don't want the presence of a - # 'type: ignore' at the end of a multiline expression to - # justify pushing it all onto one line. Thus we - # (unfortunately) need to check the actual source lines and - # only report an unsplittable 'type: ignore' if this line was - # one line in the original code. - - # Grab the first and last line numbers, skipping generated leaves - first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) - last_line = next( - (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 - ) - - if first_line == last_line: - # We look at the last two leaves since a comma or an - # invisible paren could have been added at the end of the - # line. - for node in self.leaves[-2:]: - for comment in self.comments.get(id(node), []): - if is_type_comment(comment, " ignore"): - return True - - return False - - def contains_multiline_strings(self) -> bool: - return any(is_multiline_string(leaf) for leaf in self.leaves) - - def maybe_should_explode(self, closing: Leaf) -> bool: - """Return True if this line should explode (always be split), that is when: - - there's a trailing comma here; and - - it's not a one-tuple. - """ - if not ( - closing.type in CLOSING_BRACKETS - and self.leaves - and self.leaves[-1].type == token.COMMA - ): - return False - - if closing.type in {token.RBRACE, token.RSQB}: - return True - - if self.is_import: - return True - - if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves): - return True - - return False - - def append_comment(self, comment: Leaf) -> bool: - """Add an inline or standalone comment to the line.""" - if ( - comment.type == STANDALONE_COMMENT - and self.bracket_tracker.any_open_brackets() - ): - comment.prefix = "" - return False - - if comment.type != token.COMMENT: - return False - - if not self.leaves: - comment.type = STANDALONE_COMMENT - comment.prefix = "" - return False - - last_leaf = self.leaves[-1] - if ( - last_leaf.type == token.RPAR - and not last_leaf.value - and last_leaf.parent - and len(list(last_leaf.parent.leaves())) <= 3 - and not is_type_comment(comment) - ): - # Comments on an optional parens wrapping a single leaf should belong to - # the wrapped node except if it's a type comment. Pinning the comment like - # this avoids unstable formatting caused by comment migration. - if len(self.leaves) < 2: - comment.type = STANDALONE_COMMENT - comment.prefix = "" - return False - - last_leaf = self.leaves[-2] - self.comments.setdefault(id(last_leaf), []).append(comment) - return True - - def comments_after(self, leaf: Leaf) -> List[Leaf]: - """Generate comments that should appear directly after `leaf`.""" - return self.comments.get(id(leaf), []) - - def remove_trailing_comma(self) -> None: - """Remove the trailing comma and moves the comments attached to it.""" - trailing_comma = self.leaves.pop() - trailing_comma_comments = self.comments.pop(id(trailing_comma), []) - self.comments.setdefault(id(self.leaves[-1]), []).extend( - trailing_comma_comments - ) - - def is_complex_subscript(self, leaf: Leaf) -> bool: - """Return True iff `leaf` is part of a slice with non-trivial exprs.""" - open_lsqb = self.bracket_tracker.get_open_lsqb() - if open_lsqb is None: - return False - - subscript_start = open_lsqb.next_sibling - - if isinstance(subscript_start, Node): - if subscript_start.type == syms.listmaker: - return False - - if subscript_start.type == syms.subscriptlist: - subscript_start = child_towards(subscript_start, leaf) - return subscript_start is not None and any( - n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() - ) - - def clone(self) -> "Line": - return Line( - depth=self.depth, - inside_brackets=self.inside_brackets, - should_explode=self.should_explode, - ) - - def __str__(self) -> str: - """Render the line.""" - if not self: - return "\n" - - indent = " " * self.depth - leaves = iter(self.leaves) - first = next(leaves) - res = f"{first.prefix}{indent}{first.value}" - for leaf in leaves: - res += str(leaf) - for comment in itertools.chain.from_iterable(self.comments.values()): - res += str(comment) - - return res + "\n" - - def __bool__(self) -> bool: - """Return True if the line has leaves or comments.""" - return bool(self.leaves or self.comments) - - -@dataclass -class EmptyLineTracker: - """Provides a stateful method that returns the number of potential extra - empty lines needed before and after the currently processed line. - - Note: this tracker works on lines that haven't been split yet. It assumes - the prefix of the first leaf consists of optional newlines. Those newlines - are consumed by `maybe_empty_lines()` and included in the computation. - """ - - is_pyi: bool = False - previous_line: Optional[Line] = None - previous_after: int = 0 - previous_defs: List[int] = field(default_factory=list) - - def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: - """Return the number of extra empty lines before and after the `current_line`. - - This is for separating `def`, `async def` and `class` with extra empty - lines (two on module-level). - """ - before, after = self._maybe_empty_lines(current_line) - before = ( - # Black should not insert empty lines at the beginning - # of the file - 0 - if self.previous_line is None - else before - self.previous_after - ) - self.previous_after = after - self.previous_line = current_line - return before, after - - def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: - max_allowed = 1 - if current_line.depth == 0: - max_allowed = 1 if self.is_pyi else 2 - if current_line.leaves: - # Consume the first leaf's extra newlines. - first_leaf = current_line.leaves[0] - before = first_leaf.prefix.count("\n") - before = min(before, max_allowed) - first_leaf.prefix = "" - else: - before = 0 - depth = current_line.depth - while self.previous_defs and self.previous_defs[-1] >= depth: - self.previous_defs.pop() - if self.is_pyi: - before = 0 if depth else 1 - else: - before = 1 if depth else 2 - if current_line.is_decorator or current_line.is_def or current_line.is_class: - return self._maybe_empty_lines_for_class_or_def(current_line, before) - - if ( - self.previous_line - and self.previous_line.is_import - and not current_line.is_import - and depth == self.previous_line.depth - ): - return (before or 1), 0 - - if ( - self.previous_line - and self.previous_line.is_class - and current_line.is_triple_quoted_string - ): - return before, 1 - - return before, 0 - - def _maybe_empty_lines_for_class_or_def( - self, current_line: Line, before: int - ) -> Tuple[int, int]: - if not current_line.is_decorator: - self.previous_defs.append(current_line.depth) - if self.previous_line is None: - # Don't insert empty lines before the first line in the file. - return 0, 0 - - if self.previous_line.is_decorator: - return 0, 0 - - if self.previous_line.depth < current_line.depth and ( - self.previous_line.is_class or self.previous_line.is_def - ): - return 0, 0 - - if ( - self.previous_line.is_comment - and self.previous_line.depth == current_line.depth - and before == 0 - ): - return 0, 0 - - if self.is_pyi: - if self.previous_line.depth > current_line.depth: - newlines = 1 - elif current_line.is_class or self.previous_line.is_class: - if current_line.is_stub_class and self.previous_line.is_stub_class: - # No blank line between classes with an empty body - newlines = 0 - else: - newlines = 1 - elif current_line.is_def and not self.previous_line.is_def: - # Blank line between a block of functions and a block of non-functions - newlines = 1 - else: - newlines = 0 - else: - newlines = 2 - if current_line.depth and newlines: - newlines -= 1 - return newlines, 0 - - -@dataclass -class LineGenerator(Visitor[Line]): - """Generates reformatted Line objects. Empty lines are not emitted. - - Note: destroys the tree it's visiting by mutating prefixes of its leaves - in ways that will no longer stringify to valid Python code on the tree. - """ - - is_pyi: bool = False - normalize_strings: bool = True - current_line: Line = field(default_factory=Line) - remove_u_prefix: bool = False - - def line(self, indent: int = 0) -> Iterator[Line]: - """Generate a line. - - If the line is empty, only emit if it makes sense. - If the line is too long, split it first and then generate. - - If any lines were generated, set up a new current_line. - """ - if not self.current_line: - self.current_line.depth += indent - return # Line is empty, don't emit. Creating a new one unnecessary. - - complete_line = self.current_line - self.current_line = Line(depth=complete_line.depth + indent) - yield complete_line - - def visit_default(self, node: LN) -> Iterator[Line]: - """Default `visit_*()` implementation. Recurses to children of `node`.""" - if isinstance(node, Leaf): - any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() - for comment in generate_comments(node): - if any_open_brackets: - # any comment within brackets is subject to splitting - self.current_line.append(comment) - elif comment.type == token.COMMENT: - # regular trailing comment - self.current_line.append(comment) - yield from self.line() - - else: - # regular standalone comment - yield from self.line() - - self.current_line.append(comment) - yield from self.line() - - normalize_prefix(node, inside_brackets=any_open_brackets) - if self.normalize_strings and node.type == token.STRING: - normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix) - normalize_string_quotes(node) - if node.type == token.NUMBER: - normalize_numeric_literal(node) - if node.type not in WHITESPACE: - self.current_line.append(node) - yield from super().visit_default(node) - - def visit_INDENT(self, node: Leaf) -> Iterator[Line]: - """Increase indentation level, maybe yield a line.""" - # In blib2to3 INDENT never holds comments. - yield from self.line(+1) - yield from self.visit_default(node) - - def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: - """Decrease indentation level, maybe yield a line.""" - # The current line might still wait for trailing comments. At DEDENT time - # there won't be any (they would be prefixes on the preceding NEWLINE). - # Emit the line then. - yield from self.line() - - # While DEDENT has no value, its prefix may contain standalone comments - # that belong to the current indentation level. Get 'em. - yield from self.visit_default(node) - - # Finally, emit the dedent. - yield from self.line(-1) - - def visit_stmt( - self, node: Node, keywords: Set[str], parens: Set[str] - ) -> Iterator[Line]: - """Visit a statement. - - This implementation is shared for `if`, `while`, `for`, `try`, `except`, - `def`, `with`, `class`, `assert` and assignments. - - The relevant Python language `keywords` for a given statement will be - NAME leaves within it. This methods puts those on a separate line. - - `parens` holds a set of string leaf values immediately after which - invisible parens should be put. - """ - normalize_invisible_parens(node, parens_after=parens) - for child in node.children: - if child.type == token.NAME and child.value in keywords: # type: ignore - yield from self.line() - - yield from self.visit(child) - - def visit_suite(self, node: Node) -> Iterator[Line]: - """Visit a suite.""" - if self.is_pyi and is_stub_suite(node): - yield from self.visit(node.children[2]) - else: - yield from self.visit_default(node) - - def visit_simple_stmt(self, node: Node) -> Iterator[Line]: - """Visit a statement without nested statements.""" - is_suite_like = node.parent and node.parent.type in STATEMENT - if is_suite_like: - if self.is_pyi and is_stub_body(node): - yield from self.visit_default(node) - else: - yield from self.line(+1) - yield from self.visit_default(node) - yield from self.line(-1) - - else: - if not self.is_pyi or not node.parent or not is_stub_suite(node.parent): - yield from self.line() - yield from self.visit_default(node) - - def visit_async_stmt(self, node: Node) -> Iterator[Line]: - """Visit `async def`, `async for`, `async with`.""" - yield from self.line() - - children = iter(node.children) - for child in children: - yield from self.visit(child) - - if child.type == token.ASYNC: - break - - internal_stmt = next(children) - for child in internal_stmt.children: - yield from self.visit(child) - - def visit_decorators(self, node: Node) -> Iterator[Line]: - """Visit decorators.""" - for child in node.children: - yield from self.line() - yield from self.visit(child) - - def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: - """Remove a semicolon and put the other statement on a separate line.""" - yield from self.line() - - def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: - """End of file. Process outstanding comments and end with a newline.""" - yield from self.visit_default(leaf) - yield from self.line() - - def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: - if not self.current_line.bracket_tracker.any_open_brackets(): - yield from self.line() - yield from self.visit_default(leaf) - - def visit_factor(self, node: Node) -> Iterator[Line]: - """Force parentheses between a unary op and a binary power: - - -2 ** 8 -> -(2 ** 8) - """ - _operator, operand = node.children - if ( - operand.type == syms.power - and len(operand.children) == 3 - and operand.children[1].type == token.DOUBLESTAR - ): - lpar = Leaf(token.LPAR, "(") - rpar = Leaf(token.RPAR, ")") - index = operand.remove() or 0 - node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) - yield from self.visit_default(node) - - def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: - if is_docstring(leaf) and "\\\n" not in leaf.value: - # We're ignoring docstrings with backslash newline escapes because changing - # indentation of those changes the AST representation of the code. - prefix = get_string_prefix(leaf.value) - lead_len = len(prefix) + 3 - tail_len = -3 - indent = " " * 4 * self.current_line.depth - docstring = fix_docstring(leaf.value[lead_len:tail_len], indent) - if docstring: - if leaf.value[lead_len - 1] == docstring[0]: - docstring = " " + docstring - if leaf.value[tail_len + 1] == docstring[-1]: - docstring = docstring + " " - leaf.value = leaf.value[0:lead_len] + docstring + leaf.value[tail_len:] - normalize_string_quotes(leaf) - - yield from self.visit_default(leaf) - - def __post_init__(self) -> None: - """You are in a twisty little maze of passages.""" - v = self.visit_stmt - Ø: Set[str] = set() - self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) - self.visit_if_stmt = partial( - v, keywords={"if", "else", "elif"}, parens={"if", "elif"} - ) - self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) - self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) - self.visit_try_stmt = partial( - v, keywords={"try", "except", "else", "finally"}, parens=Ø - ) - self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) - self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) - self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) - self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) - self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) - self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) - self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) - self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) - self.visit_async_funcdef = self.visit_async_stmt - self.visit_decorated = self.visit_decorators - - -IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist} -BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE} -OPENING_BRACKETS = set(BRACKET.keys()) -CLOSING_BRACKETS = set(BRACKET.values()) -BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS -ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} - - -def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 - """Return whitespace prefix if needed for the given `leaf`. - - `complex_subscript` signals whether the given leaf is part of a subscription - which has non-trivial arguments, like arithmetic expressions or function calls. - """ - NO = "" - SPACE = " " - DOUBLESPACE = " " - t = leaf.type - p = leaf.parent - v = leaf.value - if t in ALWAYS_NO_SPACE: - return NO - - if t == token.COMMENT: - return DOUBLESPACE - - assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" - if t == token.COLON and p.type not in { - syms.subscript, - syms.subscriptlist, - syms.sliceop, - }: - return NO - - prev = leaf.prev_sibling - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type in OPENING_BRACKETS: - return NO - - if t == token.COLON: - if prevp.type == token.COLON: - return NO - - elif prevp.type != token.COMMA and not complex_subscript: - return NO - - return SPACE - - if prevp.type == token.EQUAL: - if prevp.parent: - if prevp.parent.type in { - syms.arglist, - syms.argument, - syms.parameters, - syms.varargslist, - }: - return NO - - elif prevp.parent.type == syms.typedargslist: - # A bit hacky: if the equal sign has whitespace, it means we - # previously found it's a typed argument. So, we're using - # that, too. - return prevp.prefix - - elif prevp.type in VARARGS_SPECIALS: - if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): - return NO - - elif prevp.type == token.COLON: - if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: - return SPACE if complex_subscript else NO - - elif ( - prevp.parent - and prevp.parent.type == syms.factor - and prevp.type in MATH_OPERATORS - ): - return NO - - elif ( - prevp.type == token.RIGHTSHIFT - and prevp.parent - and prevp.parent.type == syms.shift_expr - and prevp.prev_sibling - and prevp.prev_sibling.type == token.NAME - and prevp.prev_sibling.value == "print" # type: ignore - ): - # Python 2 print chevron - return NO - - elif prev.type in OPENING_BRACKETS: - return NO - - if p.type in {syms.parameters, syms.arglist}: - # untyped function signatures or calls - if not prev or prev.type != token.COMMA: - return NO - - elif p.type == syms.varargslist: - # lambdas - if prev and prev.type != token.COMMA: - return NO - - elif p.type == syms.typedargslist: - # typed function signatures - if not prev: - return NO - - if t == token.EQUAL: - if prev.type != syms.tname: - return NO - - elif prev.type == token.EQUAL: - # A bit hacky: if the equal sign has whitespace, it means we - # previously found it's a typed argument. So, we're using that, too. - return prev.prefix - - elif prev.type != token.COMMA: - return NO - - elif p.type == syms.tname: - # type names - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type != token.COMMA: - return NO - - elif p.type == syms.trailer: - # attributes and calls - if t == token.LPAR or t == token.RPAR: - return NO - - if not prev: - if t == token.DOT: - prevp = preceding_leaf(p) - if not prevp or prevp.type != token.NUMBER: - return NO - - elif t == token.LSQB: - return NO - - elif prev.type != token.COMMA: - return NO - - elif p.type == syms.argument: - # single argument - if t == token.EQUAL: - return NO - - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type == token.LPAR: - return NO - - elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: - return NO - - elif p.type == syms.decorator: - # decorators - return NO - - elif p.type == syms.dotted_name: - if prev: - return NO - - prevp = preceding_leaf(p) - if not prevp or prevp.type == token.AT or prevp.type == token.DOT: - return NO - - elif p.type == syms.classdef: - if t == token.LPAR: - return NO - - if prev and prev.type == token.LPAR: - return NO - - elif p.type in {syms.subscript, syms.sliceop}: - # indexing - if not prev: - assert p.parent is not None, "subscripts are always parented" - if p.parent.type == syms.subscriptlist: - return SPACE - - return NO - - elif not complex_subscript: - return NO - - elif p.type == syms.atom: - if prev and t == token.DOT: - # dots, but not the first one. - return NO - - elif p.type == syms.dictsetmaker: - # dict unpacking - if prev and prev.type == token.DOUBLESTAR: - return NO - - elif p.type in {syms.factor, syms.star_expr}: - # unary ops - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type in OPENING_BRACKETS: - return NO - - prevp_parent = prevp.parent - assert prevp_parent is not None - if prevp.type == token.COLON and prevp_parent.type in { - syms.subscript, - syms.sliceop, - }: - return NO - - elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: - return NO - - elif t in {token.NAME, token.NUMBER, token.STRING}: - return NO - - elif p.type == syms.import_from: - if t == token.DOT: - if prev and prev.type == token.DOT: - return NO - - elif t == token.NAME: - if v == "import": - return SPACE - - if prev and prev.type == token.DOT: - return NO - - elif p.type == syms.sliceop: - return NO - - return SPACE - - -def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: - """Return the first leaf that precedes `node`, if any.""" - while node: - res = node.prev_sibling - if res: - if isinstance(res, Leaf): - return res - - try: - return list(res.leaves())[-1] - - except IndexError: - return None - - node = node.parent - return None - - -def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: - """Return if the `node` and its previous siblings match types against the provided - list of tokens; the provided `node`has its type matched against the last element in - the list. `None` can be used as the first element to declare that the start of the - list is anchored at the start of its parent's children.""" - if not tokens: - return True - if tokens[-1] is None: - return node is None - if not node: - return False - if node.type != tokens[-1]: - return False - return prev_siblings_are(node.prev_sibling, tokens[:-1]) - - -def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: - """Return the child of `ancestor` that contains `descendant`.""" - node: Optional[LN] = descendant - while node and node.parent != ancestor: - node = node.parent - return node - - -def container_of(leaf: Leaf) -> LN: - """Return `leaf` or one of its ancestors that is the topmost container of it. - - By "container" we mean a node where `leaf` is the very first child. - """ - same_prefix = leaf.prefix - container: LN = leaf - while container: - parent = container.parent - if parent is None: - break - - if parent.children[0].prefix != same_prefix: - break - - if parent.type == syms.file_input: - break - - if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: - break - - container = parent - return container - - -def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: - """Return the priority of the `leaf` delimiter, given a line break after it. - - The delimiter priorities returned here are from those delimiters that would - cause a line break after themselves. - - Higher numbers are higher priority. - """ - if leaf.type == token.COMMA: - return COMMA_PRIORITY - - return 0 - - -def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: - """Return the priority of the `leaf` delimiter, given a line break before it. - - The delimiter priorities returned here are from those delimiters that would - cause a line break before themselves. - - Higher numbers are higher priority. - """ - if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): - # * and ** might also be MATH_OPERATORS but in this case they are not. - # Don't treat them as a delimiter. - return 0 - - if ( - leaf.type == token.DOT - and leaf.parent - and leaf.parent.type not in {syms.import_from, syms.dotted_name} - and (previous is None or previous.type in CLOSING_BRACKETS) - ): - return DOT_PRIORITY - - if ( - leaf.type in MATH_OPERATORS - and leaf.parent - and leaf.parent.type not in {syms.factor, syms.star_expr} - ): - return MATH_PRIORITIES[leaf.type] - - if leaf.type in COMPARATORS: - return COMPARATOR_PRIORITY - - if ( - leaf.type == token.STRING - and previous is not None - and previous.type == token.STRING - ): - return STRING_PRIORITY - - if leaf.type not in {token.NAME, token.ASYNC}: - return 0 - - if ( - leaf.value == "for" - and leaf.parent - and leaf.parent.type in {syms.comp_for, syms.old_comp_for} - or leaf.type == token.ASYNC - ): - if ( - not isinstance(leaf.prev_sibling, Leaf) - or leaf.prev_sibling.value != "async" - ): - return COMPREHENSION_PRIORITY - - if ( - leaf.value == "if" - and leaf.parent - and leaf.parent.type in {syms.comp_if, syms.old_comp_if} - ): - return COMPREHENSION_PRIORITY - - if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: - return TERNARY_PRIORITY - - if leaf.value == "is": - return COMPARATOR_PRIORITY - - if ( - leaf.value == "in" - and leaf.parent - and leaf.parent.type in {syms.comp_op, syms.comparison} - and not ( - previous is not None - and previous.type == token.NAME - and previous.value == "not" - ) - ): - return COMPARATOR_PRIORITY - - if ( - leaf.value == "not" - and leaf.parent - and leaf.parent.type == syms.comp_op - and not ( - previous is not None - and previous.type == token.NAME - and previous.value == "is" - ) - ): - return COMPARATOR_PRIORITY - - if leaf.value in LOGIC_OPERATORS and leaf.parent: - return LOGIC_PRIORITY - - return 0 - - -FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"} -FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"} - - -def generate_comments(leaf: LN) -> Iterator[Leaf]: - """Clean the prefix of the `leaf` and generate comments from it, if any. - - Comments in lib2to3 are shoved into the whitespace prefix. This happens - in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation - move because it does away with modifying the grammar to include all the - possible places in which comments can be placed. - - The sad consequence for us though is that comments don't "belong" anywhere. - This is why this function generates simple parentless Leaf objects for - comments. We simply don't know what the correct parent should be. - - No matter though, we can live without this. We really only need to - differentiate between inline and standalone comments. The latter don't - share the line with any code. - - Inline comments are emitted as regular token.COMMENT leaves. Standalone - are emitted with a fake STANDALONE_COMMENT token identifier. - """ - for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): - yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) - - -@dataclass -class ProtoComment: - """Describes a piece of syntax that is a comment. - - It's not a :class:`blib2to3.pytree.Leaf` so that: - - * it can be cached (`Leaf` objects should not be reused more than once as - they store their lineno, column, prefix, and parent information); - * `newlines` and `consumed` fields are kept separate from the `value`. This - simplifies handling of special marker comments like ``# fmt: off/on``. - """ - - type: int # token.COMMENT or STANDALONE_COMMENT - value: str # content of the comment - newlines: int # how many newlines before the comment - consumed: int # how many characters of the original leaf's prefix did we consume - - -@lru_cache(maxsize=4096) -def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: - """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" - result: List[ProtoComment] = [] - if not prefix or "#" not in prefix: - return result - - consumed = 0 - nlines = 0 - ignored_lines = 0 - for index, line in enumerate(prefix.split("\n")): - consumed += len(line) + 1 # adding the length of the split '\n' - line = line.lstrip() - if not line: - nlines += 1 - if not line.startswith("#"): - # Escaped newlines outside of a comment are not really newlines at - # all. We treat a single-line comment following an escaped newline - # as a simple trailing comment. - if line.endswith("\\"): - ignored_lines += 1 - continue - - if index == ignored_lines and not is_endmarker: - comment_type = token.COMMENT # simple trailing comment - else: - comment_type = STANDALONE_COMMENT - comment = make_comment(line) - result.append( - ProtoComment( - type=comment_type, value=comment, newlines=nlines, consumed=consumed - ) - ) - nlines = 0 - return result - - -def make_comment(content: str) -> str: - """Return a consistently formatted comment from the given `content` string. - - All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single - space between the hash sign and the content. - - If `content` didn't start with a hash sign, one is provided. - """ - content = content.rstrip() - if not content: - return "#" - - if content[0] == "#": - content = content[1:] - if content and content[0] not in " !:#'%": - content = " " + content - return "#" + content - - -def transform_line( - line: Line, mode: Mode, features: Collection[Feature] = () -) -> Iterator[Line]: - """Transform a `line`, potentially splitting it into many lines. - - They should fit in the allotted `line_length` but might not be able to. - - `features` are syntactical features that may be used in the output. - """ - if line.is_comment: - yield line - return - - line_str = line_to_string(line) - - def init_st(ST: Type[StringTransformer]) -> StringTransformer: - """Initialize StringTransformer""" - return ST(mode.line_length, mode.string_normalization) - - string_merge = init_st(StringMerger) - string_paren_strip = init_st(StringParenStripper) - string_split = init_st(StringSplitter) - string_paren_wrap = init_st(StringParenWrapper) - - transformers: List[Transformer] - if ( - not line.contains_uncollapsable_type_comments() - and not line.should_explode - and ( - is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) - or line.contains_unsplittable_type_ignore() - ) - and not (line.inside_brackets and line.contains_standalone_comments()) - ): - # Only apply basic string preprocessing, since lines shouldn't be split here. - if mode.experimental_string_processing: - transformers = [string_merge, string_paren_strip] - else: - transformers = [] - elif line.is_def: - transformers = [left_hand_split] - else: - - def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: - """Wraps calls to `right_hand_split`. - - The calls increasingly `omit` right-hand trailers (bracket pairs with - content), meaning the trailers get glued together to split on another - bracket pair instead. - """ - for omit in generate_trailers_to_omit(line, mode.line_length): - lines = list( - right_hand_split(line, mode.line_length, features, omit=omit) - ) - # Note: this check is only able to figure out if the first line of the - # *current* transformation fits in the line length. This is true only - # for simple cases. All others require running more transforms via - # `transform_line()`. This check doesn't know if those would succeed. - if is_line_short_enough(lines[0], line_length=mode.line_length): - yield from lines - return - - # All splits failed, best effort split with no omits. - # This mostly happens to multiline strings that are by definition - # reported as not fitting a single line, as well as lines that contain - # trailing commas (those have to be exploded). - yield from right_hand_split( - line, line_length=mode.line_length, features=features - ) - - if mode.experimental_string_processing: - if line.inside_brackets: - transformers = [ - string_merge, - string_paren_strip, - delimiter_split, - standalone_comment_split, - string_split, - string_paren_wrap, - rhs, - ] - else: - transformers = [ - string_merge, - string_paren_strip, - string_split, - string_paren_wrap, - rhs, - ] - else: - if line.inside_brackets: - transformers = [delimiter_split, standalone_comment_split, rhs] - else: - transformers = [rhs] - - for transform in transformers: - # We are accumulating lines in `result` because we might want to abort - # mission and return the original line in the end, or attempt a different - # split altogether. - try: - result = run_transformer(line, transform, mode, features, line_str=line_str) - except CannotTransform: - continue - else: - yield from result - break - - else: - yield line - - -@dataclass # type: ignore -class StringTransformer(ABC): - """ - An implementation of the Transformer protocol that relies on its - subclasses overriding the template methods `do_match(...)` and - `do_transform(...)`. - - This Transformer works exclusively on strings (for example, by merging - or splitting them). - - The following sections can be found among the docstrings of each concrete - StringTransformer subclass. - - Requirements: - Which requirements must be met of the given Line for this - StringTransformer to be applied? - - Transformations: - If the given Line meets all of the above requirements, which string - transformations can you expect to be applied to it by this - StringTransformer? - - Collaborations: - What contractual agreements does this StringTransformer have with other - StringTransfomers? Such collaborations should be eliminated/minimized - as much as possible. - """ - - line_length: int - normalize_strings: bool - __name__ = "StringTransformer" - - @abstractmethod - def do_match(self, line: Line) -> TMatchResult: - """ - Returns: - * Ok(string_idx) such that `line.leaves[string_idx]` is our target - string, if a match was able to be made. - OR - * Err(CannotTransform), if a match was not able to be made. - """ - - @abstractmethod - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - """ - Yields: - * Ok(new_line) where new_line is the new transformed line. - OR - * Err(CannotTransform) if the transformation failed for some reason. The - `do_match(...)` template method should usually be used to reject - the form of the given Line, but in some cases it is difficult to - know whether or not a Line meets the StringTransformer's - requirements until the transformation is already midway. - - Side Effects: - This method should NOT mutate @line directly, but it MAY mutate the - Line's underlying Node structure. (WARNING: If the underlying Node - structure IS altered, then this method should NOT be allowed to - yield an CannotTransform after that point.) - """ - - def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]: - """ - StringTransformer instances have a call signature that mirrors that of - the Transformer type. - - Raises: - CannotTransform(...) if the concrete StringTransformer class is unable - to transform @line. - """ - # Optimization to avoid calling `self.do_match(...)` when the line does - # not contain any string. - if not any(leaf.type == token.STRING for leaf in line.leaves): - raise CannotTransform("There are no strings in this line.") - - match_result = self.do_match(line) - - if isinstance(match_result, Err): - cant_transform = match_result.err() - raise CannotTransform( - f"The string transformer {self.__class__.__name__} does not recognize" - " this line as one that it can transform." - ) from cant_transform - - string_idx = match_result.ok() - - for line_result in self.do_transform(line, string_idx): - if isinstance(line_result, Err): - cant_transform = line_result.err() - raise CannotTransform( - "StringTransformer failed while attempting to transform string." - ) from cant_transform - line = line_result.ok() - yield line - - -@dataclass -class CustomSplit: - """A custom (i.e. manual) string split. - - A single CustomSplit instance represents a single substring. - - Examples: - Consider the following string: - ``` - "Hi there friend." - " This is a custom" - f" string {split}." - ``` - - This string will correspond to the following three CustomSplit instances: - ``` - CustomSplit(False, 16) - CustomSplit(False, 17) - CustomSplit(True, 16) - ``` - """ - - has_prefix: bool - break_idx: int - - -class CustomSplitMapMixin: - """ - This mixin class is used to map merged strings to a sequence of - CustomSplits, which will then be used to re-split the strings iff none of - the resultant substrings go over the configured max line length. - """ - - _Key = Tuple[StringID, str] - _CUSTOM_SPLIT_MAP: Dict[_Key, Tuple[CustomSplit, ...]] = defaultdict(tuple) - - @staticmethod - def _get_key(string: str) -> "CustomSplitMapMixin._Key": - """ - Returns: - A unique identifier that is used internally to map @string to a - group of custom splits. - """ - return (id(string), string) - - def add_custom_splits( - self, string: str, custom_splits: Iterable[CustomSplit] - ) -> None: - """Custom Split Map Setter Method - - Side Effects: - Adds a mapping from @string to the custom splits @custom_splits. - """ - key = self._get_key(string) - self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) - - def pop_custom_splits(self, string: str) -> List[CustomSplit]: - """Custom Split Map Getter Method - - Returns: - * A list of the custom splits that are mapped to @string, if any - exist. - OR - * [], otherwise. - - Side Effects: - Deletes the mapping between @string and its associated custom - splits (which are returned to the caller). - """ - key = self._get_key(string) - - custom_splits = self._CUSTOM_SPLIT_MAP[key] - del self._CUSTOM_SPLIT_MAP[key] - - return list(custom_splits) - - def has_custom_splits(self, string: str) -> bool: - """ - Returns: - True iff @string is associated with a set of custom splits. - """ - key = self._get_key(string) - return key in self._CUSTOM_SPLIT_MAP - - -class StringMerger(CustomSplitMapMixin, StringTransformer): - """StringTransformer that merges strings together. - - Requirements: - (A) The line contains adjacent strings such that at most one substring - has inline comments AND none of those inline comments are pragmas AND - the set of all substring prefixes is either of length 1 or equal to - {"", "f"} AND none of the substrings are raw strings (i.e. are prefixed - with 'r'). - OR - (B) The line contains a string which uses line continuation backslashes. - - Transformations: - Depending on which of the two requirements above where met, either: - - (A) The string group associated with the target string is merged. - OR - (B) All line-continuation backslashes are removed from the target string. - - Collaborations: - StringMerger provides custom split information to StringSplitter. - """ - - def do_match(self, line: Line) -> TMatchResult: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - if ( - leaf.type == token.STRING - and is_valid_index(i + 1) - and LL[i + 1].type == token.STRING - ): - return Ok(i) - - if leaf.type == token.STRING and "\\\n" in leaf.value: - return Ok(i) - - return TErr("This line has no strings that need merging.") - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - new_line = line - rblc_result = self.__remove_backslash_line_continuation_chars( - new_line, string_idx - ) - if isinstance(rblc_result, Ok): - new_line = rblc_result.ok() - - msg_result = self.__merge_string_group(new_line, string_idx) - if isinstance(msg_result, Ok): - new_line = msg_result.ok() - - if isinstance(rblc_result, Err) and isinstance(msg_result, Err): - msg_cant_transform = msg_result.err() - rblc_cant_transform = rblc_result.err() - cant_transform = CannotTransform( - "StringMerger failed to merge any strings in this line." - ) - - # Chain the errors together using `__cause__`. - msg_cant_transform.__cause__ = rblc_cant_transform - cant_transform.__cause__ = msg_cant_transform - - yield Err(cant_transform) - else: - yield Ok(new_line) - - @staticmethod - def __remove_backslash_line_continuation_chars( - line: Line, string_idx: int - ) -> TResult[Line]: - """ - Merge strings that were split across multiple lines using - line-continuation backslashes. - - Returns: - Ok(new_line), if @line contains backslash line-continuation - characters. - OR - Err(CannotTransform), otherwise. - """ - LL = line.leaves - - string_leaf = LL[string_idx] - if not ( - string_leaf.type == token.STRING - and "\\\n" in string_leaf.value - and not has_triple_quotes(string_leaf.value) - ): - return TErr( - f"String leaf {string_leaf} does not contain any backslash line" - " continuation characters." - ) - - new_line = line.clone() - new_line.comments = line.comments.copy() - append_leaves(new_line, line, LL) - - new_string_leaf = new_line.leaves[string_idx] - new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") - - return Ok(new_line) - - def __merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]: - """ - Merges string group (i.e. set of adjacent strings) where the first - string in the group is `line.leaves[string_idx]`. - - Returns: - Ok(new_line), if ALL of the validation checks found in - __validate_msg(...) pass. - OR - Err(CannotTransform), otherwise. - """ - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - vresult = self.__validate_msg(line, string_idx) - if isinstance(vresult, Err): - return vresult - - # If the string group is wrapped inside an Atom node, we must make sure - # to later replace that Atom with our new (merged) string leaf. - atom_node = LL[string_idx].parent - - # We will place BREAK_MARK in between every two substrings that we - # merge. We will then later go through our final result and use the - # various instances of BREAK_MARK we find to add the right values to - # the custom split map. - BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" - - QUOTE = LL[string_idx].value[-1] - - def make_naked(string: str, string_prefix: str) -> str: - """Strip @string (i.e. make it a "naked" string) - - Pre-conditions: - * assert_is_leaf_string(@string) - - Returns: - A string that is identical to @string except that - @string_prefix has been stripped, the surrounding QUOTE - characters have been removed, and any remaining QUOTE - characters have been escaped. - """ - assert_is_leaf_string(string) - - RE_EVEN_BACKSLASHES = r"(?:(?= 0 - ), "Logic error while filling the custom string breakpoint cache." - - temp_string = temp_string[mark_idx + len(BREAK_MARK) :] - breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 - custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) - - string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) - - if atom_node is not None: - replace_child(atom_node, string_leaf) - - # Build the final line ('new_line') that this method will later return. - new_line = line.clone() - for (i, leaf) in enumerate(LL): - if i == string_idx: - new_line.append(string_leaf) - - if string_idx <= i < string_idx + num_of_strings: - for comment_leaf in line.comments_after(LL[i]): - new_line.append(comment_leaf, preformatted=True) - continue - - append_leaves(new_line, line, [leaf]) - - self.add_custom_splits(string_leaf.value, custom_splits) - return Ok(new_line) - - @staticmethod - def __validate_msg(line: Line, string_idx: int) -> TResult[None]: - """Validate (M)erge (S)tring (G)roup - - Transform-time string validation logic for __merge_string_group(...). - - Returns: - * Ok(None), if ALL validation checks (listed below) pass. - OR - * Err(CannotTransform), if any of the following are true: - - The target string is not in a string group (i.e. it has no - adjacent strings). - - The string group has more than one inline comment. - - The string group has an inline comment that appears to be a pragma. - - The set of all string prefixes in the string group is of - length greater than one and is not equal to {"", "f"}. - - The string group consists of raw strings. - """ - num_of_inline_string_comments = 0 - set_of_prefixes = set() - num_of_strings = 0 - for leaf in line.leaves[string_idx:]: - if leaf.type != token.STRING: - # If the string group is trailed by a comma, we count the - # comments trailing the comma to be one of the string group's - # comments. - if leaf.type == token.COMMA and id(leaf) in line.comments: - num_of_inline_string_comments += 1 - break - - if has_triple_quotes(leaf.value): - return TErr("StringMerger does NOT merge multiline strings.") - - num_of_strings += 1 - prefix = get_string_prefix(leaf.value) - if "r" in prefix: - return TErr("StringMerger does NOT merge raw strings.") - - set_of_prefixes.add(prefix) - - if id(leaf) in line.comments: - num_of_inline_string_comments += 1 - if contains_pragma_comment(line.comments[id(leaf)]): - return TErr("Cannot merge strings which have pragma comments.") - - if num_of_strings < 2: - return TErr( - f"Not enough strings to merge (num_of_strings={num_of_strings})." - ) - - if num_of_inline_string_comments > 1: - return TErr( - f"Too many inline string comments ({num_of_inline_string_comments})." - ) - - if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: - return TErr(f"Too many different prefixes ({set_of_prefixes}).") - - return Ok(None) - - -class StringParenStripper(StringTransformer): - """StringTransformer that strips surrounding parentheses from strings. - - Requirements: - The line contains a string which is surrounded by parentheses and: - - The target string is NOT the only argument to a function call). - - If the target string contains a PERCENT, the brackets are not - preceeded or followed by an operator with higher precedence than - PERCENT. - - Transformations: - The parentheses mentioned in the 'Requirements' section are stripped. - - Collaborations: - StringParenStripper has its own inherent usefulness, but it is also - relied on to clean up the parentheses created by StringParenWrapper (in - the event that they are no longer needed). - """ - - def do_match(self, line: Line) -> TMatchResult: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - for (idx, leaf) in enumerate(LL): - # Should be a string... - if leaf.type != token.STRING: - continue - - # Should be preceded by a non-empty LPAR... - if ( - not is_valid_index(idx - 1) - or LL[idx - 1].type != token.LPAR - or is_empty_lpar(LL[idx - 1]) - ): - continue - - # That LPAR should NOT be preceded by a function name or a closing - # bracket (which could be a function which returns a function or a - # list/dictionary that contains a function)... - if is_valid_index(idx - 2) and ( - LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS - ): - continue - - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - next_idx = string_parser.parse(LL, string_idx) - - # if the leaves in the parsed string include a PERCENT, we need to - # make sure the initial LPAR is NOT preceded by an operator with - # higher or equal precedence to PERCENT - if is_valid_index(idx - 2): - # mypy can't quite follow unless we name this - before_lpar = LL[idx - 2] - if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( - ( - before_lpar.type - in { - token.STAR, - token.AT, - token.SLASH, - token.DOUBLESLASH, - token.PERCENT, - token.TILDE, - token.DOUBLESTAR, - token.AWAIT, - token.LSQB, - token.LPAR, - } - ) - or ( - # only unary PLUS/MINUS - before_lpar.parent - and before_lpar.parent.type == syms.factor - and (before_lpar.type in {token.PLUS, token.MINUS}) - ) - ): - continue - - # Should be followed by a non-empty RPAR... - if ( - is_valid_index(next_idx) - and LL[next_idx].type == token.RPAR - and not is_empty_rpar(LL[next_idx]) - ): - # That RPAR should NOT be followed by anything with higher - # precedence than PERCENT - if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { - token.DOUBLESTAR, - token.LSQB, - token.LPAR, - token.DOT, - }: - continue - - return Ok(string_idx) - - return TErr("This line has no strings wrapped in parens.") - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - LL = line.leaves - - string_parser = StringParser() - rpar_idx = string_parser.parse(LL, string_idx) - - for leaf in (LL[string_idx - 1], LL[rpar_idx]): - if line.comments_after(leaf): - yield TErr( - "Will not strip parentheses which have comments attached to them." - ) - - new_line = line.clone() - new_line.comments = line.comments.copy() - append_leaves(new_line, line, LL[: string_idx - 1]) - - string_leaf = Leaf(token.STRING, LL[string_idx].value) - LL[string_idx - 1].remove() - replace_child(LL[string_idx], string_leaf) - new_line.append(string_leaf) - - append_leaves( - new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :] - ) - - LL[rpar_idx].remove() - - yield Ok(new_line) - - -class BaseStringSplitter(StringTransformer): - """ - Abstract class for StringTransformers which transform a Line's strings by splitting - them or placing them on their own lines where necessary to avoid going over - the configured line length. - - Requirements: - * The target string value is responsible for the line going over the - line length limit. It follows that after all of black's other line - split methods have been exhausted, this line (or one of the resulting - lines after all line splits are performed) would still be over the - line_length limit unless we split this string. - AND - * The target string is NOT a "pointless" string (i.e. a string that has - no parent or siblings). - AND - * The target string is not followed by an inline comment that appears - to be a pragma. - AND - * The target string is not a multiline (i.e. triple-quote) string. - """ - - @abstractmethod - def do_splitter_match(self, line: Line) -> TMatchResult: - """ - BaseStringSplitter asks its clients to override this method instead of - `StringTransformer.do_match(...)`. - - Follows the same protocol as `StringTransformer.do_match(...)`. - - Refer to `help(StringTransformer.do_match)` for more information. - """ - - def do_match(self, line: Line) -> TMatchResult: - match_result = self.do_splitter_match(line) - if isinstance(match_result, Err): - return match_result - - string_idx = match_result.ok() - vresult = self.__validate(line, string_idx) - if isinstance(vresult, Err): - return vresult - - return match_result - - def __validate(self, line: Line, string_idx: int) -> TResult[None]: - """ - Checks that @line meets all of the requirements listed in this classes' - docstring. Refer to `help(BaseStringSplitter)` for a detailed - description of those requirements. - - Returns: - * Ok(None), if ALL of the requirements are met. - OR - * Err(CannotTransform), if ANY of the requirements are NOT met. - """ - LL = line.leaves - - string_leaf = LL[string_idx] - - max_string_length = self.__get_max_string_length(line, string_idx) - if len(string_leaf.value) <= max_string_length: - return TErr( - "The string itself is not what is causing this line to be too long." - ) - - if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ - token.STRING, - token.NEWLINE, - ]: - return TErr( - f"This string ({string_leaf.value}) appears to be pointless (i.e. has" - " no parent)." - ) - - if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( - line.comments[id(line.leaves[string_idx])] - ): - return TErr( - "Line appears to end with an inline pragma comment. Splitting the line" - " could modify the pragma's behavior." - ) - - if has_triple_quotes(string_leaf.value): - return TErr("We cannot split multiline strings.") - - return Ok(None) - - def __get_max_string_length(self, line: Line, string_idx: int) -> int: - """ - Calculates the max string length used when attempting to determine - whether or not the target string is responsible for causing the line to - go over the line length limit. - - WARNING: This method is tightly coupled to both StringSplitter and - (especially) StringParenWrapper. There is probably a better way to - accomplish what is being done here. - - Returns: - max_string_length: such that `line.leaves[string_idx].value > - max_string_length` implies that the target string IS responsible - for causing this line to exceed the line length limit. - """ - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - # We use the shorthand "WMA4" in comments to abbreviate "We must - # account for". When giving examples, we use STRING to mean some/any - # valid string. - # - # Finally, we use the following convenience variables: - # - # P: The leaf that is before the target string leaf. - # N: The leaf that is after the target string leaf. - # NN: The leaf that is after N. - - # WMA4 the whitespace at the beginning of the line. - offset = line.depth * 4 - - if is_valid_index(string_idx - 1): - p_idx = string_idx - 1 - if ( - LL[string_idx - 1].type == token.LPAR - and LL[string_idx - 1].value == "" - and string_idx >= 2 - ): - # If the previous leaf is an empty LPAR placeholder, we should skip it. - p_idx -= 1 - - P = LL[p_idx] - if P.type == token.PLUS: - # WMA4 a space and a '+' character (e.g. `+ STRING`). - offset += 2 - - if P.type == token.COMMA: - # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. - offset += 3 - - if P.type in [token.COLON, token.EQUAL, token.NAME]: - # This conditional branch is meant to handle dictionary keys, - # variable assignments, 'return STRING' statement lines, and - # 'else STRING' ternary expression lines. - - # WMA4 a single space. - offset += 1 - - # WMA4 the lengths of any leaves that came before that space. - for leaf in LL[: p_idx + 1]: - offset += len(str(leaf)) - - if is_valid_index(string_idx + 1): - N = LL[string_idx + 1] - if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: - # If the next leaf is an empty RPAR placeholder, we should skip it. - N = LL[string_idx + 2] - - if N.type == token.COMMA: - # WMA4 a single comma at the end of the string (e.g `STRING,`). - offset += 1 - - if is_valid_index(string_idx + 2): - NN = LL[string_idx + 2] - - if N.type == token.DOT and NN.type == token.NAME: - # This conditional branch is meant to handle method calls invoked - # off of a string literal up to and including the LPAR character. - - # WMA4 the '.' character. - offset += 1 - - if ( - is_valid_index(string_idx + 3) - and LL[string_idx + 3].type == token.LPAR - ): - # WMA4 the left parenthesis character. - offset += 1 - - # WMA4 the length of the method's name. - offset += len(NN.value) - - has_comments = False - for comment_leaf in line.comments_after(LL[string_idx]): - if not has_comments: - has_comments = True - # WMA4 two spaces before the '#' character. - offset += 2 - - # WMA4 the length of the inline comment. - offset += len(comment_leaf.value) - - max_string_length = self.line_length - offset - return max_string_length - - -class StringSplitter(CustomSplitMapMixin, BaseStringSplitter): - """ - StringTransformer that splits "atom" strings (i.e. strings which exist on - lines by themselves). - - Requirements: - * The line consists ONLY of a single string (with the exception of a - '+' symbol which MAY exist at the start of the line), MAYBE a string - trailer, and MAYBE a trailing comma. - AND - * All of the requirements listed in BaseStringSplitter's docstring. - - Transformations: - The string mentioned in the 'Requirements' section is split into as - many substrings as necessary to adhere to the configured line length. - - In the final set of substrings, no substring should be smaller than - MIN_SUBSTR_SIZE characters. - - The string will ONLY be split on spaces (i.e. each new substring should - start with a space). - - If the string is an f-string, it will NOT be split in the middle of an - f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x - else bar()} is an f-expression). - - If the string that is being split has an associated set of custom split - records and those custom splits will NOT result in any line going over - the configured line length, those custom splits are used. Otherwise the - string is split as late as possible (from left-to-right) while still - adhering to the transformation rules listed above. - - Collaborations: - StringSplitter relies on StringMerger to construct the appropriate - CustomSplit objects and add them to the custom split map. - """ - - MIN_SUBSTR_SIZE = 6 - # Matches an "f-expression" (e.g. {var}) that might be found in an f-string. - RE_FEXPR = r""" - (? TMatchResult: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - idx = 0 - - # The first leaf MAY be a '+' symbol... - if is_valid_index(idx) and LL[idx].type == token.PLUS: - idx += 1 - - # The next/first leaf MAY be an empty LPAR... - if is_valid_index(idx) and is_empty_lpar(LL[idx]): - idx += 1 - - # The next/first leaf MUST be a string... - if not is_valid_index(idx) or LL[idx].type != token.STRING: - return TErr("Line does not start with a string.") - - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # That string MAY be followed by an empty RPAR... - if is_valid_index(idx) and is_empty_rpar(LL[idx]): - idx += 1 - - # That string / empty RPAR leaf MAY be followed by a comma... - if is_valid_index(idx) and LL[idx].type == token.COMMA: - idx += 1 - - # But no more leaves are allowed... - if is_valid_index(idx): - return TErr("This line does not end with a string.") - - return Ok(string_idx) - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - LL = line.leaves - - QUOTE = LL[string_idx].value[-1] - - is_valid_index = is_valid_index_factory(LL) - insert_str_child = insert_str_child_factory(LL[string_idx]) - - prefix = get_string_prefix(LL[string_idx].value) - - # We MAY choose to drop the 'f' prefix from substrings that don't - # contain any f-expressions, but ONLY if the original f-string - # contains at least one f-expression. Otherwise, we will alter the AST - # of the program. - drop_pointless_f_prefix = ("f" in prefix) and re.search( - self.RE_FEXPR, LL[string_idx].value, re.VERBOSE - ) - - first_string_line = True - starts_with_plus = LL[0].type == token.PLUS - - def line_needs_plus() -> bool: - return first_string_line and starts_with_plus - - def maybe_append_plus(new_line: Line) -> None: - """ - Side Effects: - If @line starts with a plus and this is the first line we are - constructing, this function appends a PLUS leaf to @new_line - and replaces the old PLUS leaf in the node structure. Otherwise - this function does nothing. - """ - if line_needs_plus(): - plus_leaf = Leaf(token.PLUS, "+") - replace_child(LL[0], plus_leaf) - new_line.append(plus_leaf) - - ends_with_comma = ( - is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA - ) - - def max_last_string() -> int: - """ - Returns: - The max allowed length of the string value used for the last - line we will construct. - """ - result = self.line_length - result -= line.depth * 4 - result -= 1 if ends_with_comma else 0 - result -= 2 if line_needs_plus() else 0 - return result - - # --- Calculate Max Break Index (for string value) - # We start with the line length limit - max_break_idx = self.line_length - # The last index of a string of length N is N-1. - max_break_idx -= 1 - # Leading whitespace is not present in the string value (e.g. Leaf.value). - max_break_idx -= line.depth * 4 - if max_break_idx < 0: - yield TErr( - f"Unable to split {LL[string_idx].value} at such high of a line depth:" - f" {line.depth}" - ) - return - - # Check if StringMerger registered any custom splits. - custom_splits = self.pop_custom_splits(LL[string_idx].value) - # We use them ONLY if none of them would produce lines that exceed the - # line limit. - use_custom_breakpoints = bool( - custom_splits - and all(csplit.break_idx <= max_break_idx for csplit in custom_splits) - ) - - # Temporary storage for the remaining chunk of the string line that - # can't fit onto the line currently being constructed. - rest_value = LL[string_idx].value - - def more_splits_should_be_made() -> bool: - """ - Returns: - True iff `rest_value` (the remaining string value from the last - split), should be split again. - """ - if use_custom_breakpoints: - return len(custom_splits) > 1 - else: - return len(rest_value) > max_last_string() - - string_line_results: List[Ok[Line]] = [] - while more_splits_should_be_made(): - if use_custom_breakpoints: - # Custom User Split (manual) - csplit = custom_splits.pop(0) - break_idx = csplit.break_idx - else: - # Algorithmic Split (automatic) - max_bidx = max_break_idx - 2 if line_needs_plus() else max_break_idx - maybe_break_idx = self.__get_break_idx(rest_value, max_bidx) - if maybe_break_idx is None: - # If we are unable to algorithmically determine a good split - # and this string has custom splits registered to it, we - # fall back to using them--which means we have to start - # over from the beginning. - if custom_splits: - rest_value = LL[string_idx].value - string_line_results = [] - first_string_line = True - use_custom_breakpoints = True - continue - - # Otherwise, we stop splitting here. - break - - break_idx = maybe_break_idx - - # --- Construct `next_value` - next_value = rest_value[:break_idx] + QUOTE - if ( - # Are we allowed to try to drop a pointless 'f' prefix? - drop_pointless_f_prefix - # If we are, will we be successful? - and next_value != self.__normalize_f_string(next_value, prefix) - ): - # If the current custom split did NOT originally use a prefix, - # then `csplit.break_idx` will be off by one after removing - # the 'f' prefix. - break_idx = ( - break_idx + 1 - if use_custom_breakpoints and not csplit.has_prefix - else break_idx - ) - next_value = rest_value[:break_idx] + QUOTE - next_value = self.__normalize_f_string(next_value, prefix) - - # --- Construct `next_leaf` - next_leaf = Leaf(token.STRING, next_value) - insert_str_child(next_leaf) - self.__maybe_normalize_string_quotes(next_leaf) - - # --- Construct `next_line` - next_line = line.clone() - maybe_append_plus(next_line) - next_line.append(next_leaf) - string_line_results.append(Ok(next_line)) - - rest_value = prefix + QUOTE + rest_value[break_idx:] - first_string_line = False - - yield from string_line_results - - if drop_pointless_f_prefix: - rest_value = self.__normalize_f_string(rest_value, prefix) - - rest_leaf = Leaf(token.STRING, rest_value) - insert_str_child(rest_leaf) - - # NOTE: I could not find a test case that verifies that the following - # line is actually necessary, but it seems to be. Otherwise we risk - # not normalizing the last substring, right? - self.__maybe_normalize_string_quotes(rest_leaf) - - last_line = line.clone() - maybe_append_plus(last_line) - - # If there are any leaves to the right of the target string... - if is_valid_index(string_idx + 1): - # We use `temp_value` here to determine how long the last line - # would be if we were to append all the leaves to the right of the - # target string to the last string line. - temp_value = rest_value - for leaf in LL[string_idx + 1 :]: - temp_value += str(leaf) - if leaf.type == token.LPAR: - break - - # Try to fit them all on the same line with the last substring... - if ( - len(temp_value) <= max_last_string() - or LL[string_idx + 1].type == token.COMMA - ): - last_line.append(rest_leaf) - append_leaves(last_line, line, LL[string_idx + 1 :]) - yield Ok(last_line) - # Otherwise, place the last substring on one line and everything - # else on a line below that... - else: - last_line.append(rest_leaf) - yield Ok(last_line) - - non_string_line = line.clone() - append_leaves(non_string_line, line, LL[string_idx + 1 :]) - yield Ok(non_string_line) - # Else the target string was the last leaf... - else: - last_line.append(rest_leaf) - last_line.comments = line.comments.copy() - yield Ok(last_line) - - def __get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: - """ - This method contains the algorithm that StringSplitter uses to - determine which character to split each string at. - - Args: - @string: The substring that we are attempting to split. - @max_break_idx: The ideal break index. We will return this value if it - meets all the necessary conditions. In the likely event that it - doesn't we will try to find the closest index BELOW @max_break_idx - that does. If that fails, we will expand our search by also - considering all valid indices ABOVE @max_break_idx. - - Pre-Conditions: - * assert_is_leaf_string(@string) - * 0 <= @max_break_idx < len(@string) - - Returns: - break_idx, if an index is able to be found that meets all of the - conditions listed in the 'Transformations' section of this classes' - docstring. - OR - None, otherwise. - """ - is_valid_index = is_valid_index_factory(string) - - assert is_valid_index(max_break_idx) - assert_is_leaf_string(string) - - _fexpr_slices: Optional[List[Tuple[Index, Index]]] = None - - def fexpr_slices() -> Iterator[Tuple[Index, Index]]: - """ - Yields: - All ranges of @string which, if @string were to be split there, - would result in the splitting of an f-expression (which is NOT - allowed). - """ - nonlocal _fexpr_slices - - if _fexpr_slices is None: - _fexpr_slices = [] - for match in re.finditer(self.RE_FEXPR, string, re.VERBOSE): - _fexpr_slices.append(match.span()) - - yield from _fexpr_slices - - is_fstring = "f" in get_string_prefix(string) - - def breaks_fstring_expression(i: Index) -> bool: - """ - Returns: - True iff returning @i would result in the splitting of an - f-expression (which is NOT allowed). - """ - if not is_fstring: - return False - - for (start, end) in fexpr_slices(): - if start <= i < end: - return True - - return False - - def passes_all_checks(i: Index) -> bool: - """ - Returns: - True iff ALL of the conditions listed in the 'Transformations' - section of this classes' docstring would be be met by returning @i. - """ - is_space = string[i] == " " - is_big_enough = ( - len(string[i:]) >= self.MIN_SUBSTR_SIZE - and len(string[:i]) >= self.MIN_SUBSTR_SIZE - ) - return is_space and is_big_enough and not breaks_fstring_expression(i) - - # First, we check all indices BELOW @max_break_idx. - break_idx = max_break_idx - while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): - break_idx -= 1 - - if not passes_all_checks(break_idx): - # If that fails, we check all indices ABOVE @max_break_idx. - # - # If we are able to find a valid index here, the next line is going - # to be longer than the specified line length, but it's probably - # better than doing nothing at all. - break_idx = max_break_idx + 1 - while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): - break_idx += 1 - - if not is_valid_index(break_idx) or not passes_all_checks(break_idx): - return None - - return break_idx - - def __maybe_normalize_string_quotes(self, leaf: Leaf) -> None: - if self.normalize_strings: - normalize_string_quotes(leaf) - - def __normalize_f_string(self, string: str, prefix: str) -> str: - """ - Pre-Conditions: - * assert_is_leaf_string(@string) - - Returns: - * If @string is an f-string that contains no f-expressions, we - return a string identical to @string except that the 'f' prefix - has been stripped and all double braces (i.e. '{{' or '}}') have - been normalized (i.e. turned into '{' or '}'). - OR - * Otherwise, we return @string. - """ - assert_is_leaf_string(string) - - if "f" in prefix and not re.search(self.RE_FEXPR, string, re.VERBOSE): - new_prefix = prefix.replace("f", "") - - temp = string[len(prefix) :] - temp = re.sub(r"\{\{", "{", temp) - temp = re.sub(r"\}\}", "}", temp) - new_string = temp - - return f"{new_prefix}{new_string}" - else: - return string - - -class StringParenWrapper(CustomSplitMapMixin, BaseStringSplitter): - """ - StringTransformer that splits non-"atom" strings (i.e. strings that do not - exist on lines by themselves). - - Requirements: - All of the requirements listed in BaseStringSplitter's docstring in - addition to the requirements listed below: - - * The line is a return/yield statement, which returns/yields a string. - OR - * The line is part of a ternary expression (e.g. `x = y if cond else - z`) such that the line starts with `else `, where is - some string. - OR - * The line is an assert statement, which ends with a string. - OR - * The line is an assignment statement (e.g. `x = ` or `x += - `) such that the variable is being assigned the value of some - string. - OR - * The line is a dictionary key assignment where some valid key is being - assigned the value of some string. - - Transformations: - The chosen string is wrapped in parentheses and then split at the LPAR. - - We then have one line which ends with an LPAR and another line that - starts with the chosen string. The latter line is then split again at - the RPAR. This results in the RPAR (and possibly a trailing comma) - being placed on its own line. - - NOTE: If any leaves exist to the right of the chosen string (except - for a trailing comma, which would be placed after the RPAR), those - leaves are placed inside the parentheses. In effect, the chosen - string is not necessarily being "wrapped" by parentheses. We can, - however, count on the LPAR being placed directly before the chosen - string. - - In other words, StringParenWrapper creates "atom" strings. These - can then be split again by StringSplitter, if necessary. - - Collaborations: - In the event that a string line split by StringParenWrapper is - changed such that it no longer needs to be given its own line, - StringParenWrapper relies on StringParenStripper to clean up the - parentheses it created. - """ - - def do_splitter_match(self, line: Line) -> TMatchResult: - LL = line.leaves - - string_idx = None - string_idx = string_idx or self._return_match(LL) - string_idx = string_idx or self._else_match(LL) - string_idx = string_idx or self._assert_match(LL) - string_idx = string_idx or self._assign_match(LL) - string_idx = string_idx or self._dict_match(LL) - - if string_idx is not None: - string_value = line.leaves[string_idx].value - # If the string has no spaces... - if " " not in string_value: - # And will still violate the line length limit when split... - max_string_length = self.line_length - ((line.depth + 1) * 4) - if len(string_value) > max_string_length: - # And has no associated custom splits... - if not self.has_custom_splits(string_value): - # Then we should NOT put this string on its own line. - return TErr( - "We do not wrap long strings in parentheses when the" - " resultant line would still be over the specified line" - " length and can't be split further by StringSplitter." - ) - return Ok(string_idx) - - return TErr("This line does not contain any non-atomic strings.") - - @staticmethod - def _return_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the return/yield statement - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of a return/yield statement and the first leaf - # contains either the "return" or "yield" keywords... - if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ - 0 - ].value in ["return", "yield"]: - is_valid_index = is_valid_index_factory(LL) - - idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 - # The next visible leaf MUST contain a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - return idx - - return None - - @staticmethod - def _else_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the ternary expression - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of a ternary expression and the first leaf - # contains the "else" keyword... - if ( - parent_type(LL[0]) == syms.test - and LL[0].type == token.NAME - and LL[0].value == "else" - ): - is_valid_index = is_valid_index_factory(LL) - - idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 - # The next visible leaf MUST contain a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - return idx - - return None - - @staticmethod - def _assert_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the assert statement - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of an assert statement and the first leaf - # contains the "assert" keyword... - if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - # We MUST find a comma... - if leaf.type == token.COMMA: - idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 - - # That comma MUST be followed by a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # But no more leaves are allowed... - if not is_valid_index(idx): - return string_idx - - return None - - @staticmethod - def _assign_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the assignment statement - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of an expression statement or is a function - # argument AND the first leaf contains a variable name... - if ( - parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] - and LL[0].type == token.NAME - ): - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - # We MUST find either an '=' or '+=' symbol... - if leaf.type in [token.EQUAL, token.PLUSEQUAL]: - idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 - - # That symbol MUST be followed by a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # The next leaf MAY be a comma iff this line is apart - # of a function argument... - if ( - parent_type(LL[0]) == syms.argument - and is_valid_index(idx) - and LL[idx].type == token.COMMA - ): - idx += 1 - - # But no more leaves are allowed... - if not is_valid_index(idx): - return string_idx - - return None - - @staticmethod - def _dict_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the dictionary key assignment - statement requirements listed in the 'Requirements' section of this - classes' docstring. - OR - None, otherwise. - """ - # If this line is apart of a dictionary key assignment... - if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]: - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - # We MUST find a colon... - if leaf.type == token.COLON: - idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 - - # That colon MUST be followed by a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # That string MAY be followed by a comma... - if is_valid_index(idx) and LL[idx].type == token.COMMA: - idx += 1 - - # But no more leaves are allowed... - if not is_valid_index(idx): - return string_idx - - return None - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - insert_str_child = insert_str_child_factory(LL[string_idx]) - - comma_idx = len(LL) - 1 - ends_with_comma = False - if LL[comma_idx].type == token.COMMA: - ends_with_comma = True - - leaves_to_steal_comments_from = [LL[string_idx]] - if ends_with_comma: - leaves_to_steal_comments_from.append(LL[comma_idx]) - - # --- First Line - first_line = line.clone() - left_leaves = LL[:string_idx] - - # We have to remember to account for (possibly invisible) LPAR and RPAR - # leaves that already wrapped the target string. If these leaves do - # exist, we will replace them with our own LPAR and RPAR leaves. - old_parens_exist = False - if left_leaves and left_leaves[-1].type == token.LPAR: - old_parens_exist = True - leaves_to_steal_comments_from.append(left_leaves[-1]) - left_leaves.pop() - - append_leaves(first_line, line, left_leaves) - - lpar_leaf = Leaf(token.LPAR, "(") - if old_parens_exist: - replace_child(LL[string_idx - 1], lpar_leaf) - else: - insert_str_child(lpar_leaf) - first_line.append(lpar_leaf) - - # We throw inline comments that were originally to the right of the - # target string to the top line. They will now be shown to the right of - # the LPAR. - for leaf in leaves_to_steal_comments_from: - for comment_leaf in line.comments_after(leaf): - first_line.append(comment_leaf, preformatted=True) - - yield Ok(first_line) - - # --- Middle (String) Line - # We only need to yield one (possibly too long) string line, since the - # `StringSplitter` will break it down further if necessary. - string_value = LL[string_idx].value - string_line = Line( - depth=line.depth + 1, - inside_brackets=True, - should_explode=line.should_explode, - ) - string_leaf = Leaf(token.STRING, string_value) - insert_str_child(string_leaf) - string_line.append(string_leaf) - - old_rpar_leaf = None - if is_valid_index(string_idx + 1): - right_leaves = LL[string_idx + 1 :] - if ends_with_comma: - right_leaves.pop() - - if old_parens_exist: - assert ( - right_leaves and right_leaves[-1].type == token.RPAR - ), "Apparently, old parentheses do NOT exist?!" - old_rpar_leaf = right_leaves.pop() - - append_leaves(string_line, line, right_leaves) - - yield Ok(string_line) - - # --- Last Line - last_line = line.clone() - last_line.bracket_tracker = first_line.bracket_tracker - - new_rpar_leaf = Leaf(token.RPAR, ")") - if old_rpar_leaf is not None: - replace_child(old_rpar_leaf, new_rpar_leaf) - else: - insert_str_child(new_rpar_leaf) - last_line.append(new_rpar_leaf) - - # If the target string ended with a comma, we place this comma to the - # right of the RPAR on the last line. - if ends_with_comma: - comma_leaf = Leaf(token.COMMA, ",") - replace_child(LL[comma_idx], comma_leaf) - last_line.append(comma_leaf) - - yield Ok(last_line) - - -class StringParser: - """ - A state machine that aids in parsing a string's "trailer", which can be - either non-existent, an old-style formatting sequence (e.g. `% varX` or `% - (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, - varY)`). - - NOTE: A new StringParser object MUST be instantiated for each string - trailer we need to parse. - - Examples: - We shall assume that `line` equals the `Line` object that corresponds - to the following line of python code: - ``` - x = "Some {}.".format("String") + some_other_string - ``` - - Furthermore, we will assume that `string_idx` is some index such that: - ``` - assert line.leaves[string_idx].value == "Some {}." - ``` - - The following code snippet then holds: - ``` - string_parser = StringParser() - idx = string_parser.parse(line.leaves, string_idx) - assert line.leaves[idx].type == token.PLUS - ``` - """ - - DEFAULT_TOKEN = -1 - - # String Parser States - START = 1 - DOT = 2 - NAME = 3 - PERCENT = 4 - SINGLE_FMT_ARG = 5 - LPAR = 6 - RPAR = 7 - DONE = 8 - - # Lookup Table for Next State - _goto: Dict[Tuple[ParserState, NodeType], ParserState] = { - # A string trailer may start with '.' OR '%'. - (START, token.DOT): DOT, - (START, token.PERCENT): PERCENT, - (START, DEFAULT_TOKEN): DONE, - # A '.' MUST be followed by an attribute or method name. - (DOT, token.NAME): NAME, - # A method name MUST be followed by an '(', whereas an attribute name - # is the last symbol in the string trailer. - (NAME, token.LPAR): LPAR, - (NAME, DEFAULT_TOKEN): DONE, - # A '%' symbol can be followed by an '(' or a single argument (e.g. a - # string or variable name). - (PERCENT, token.LPAR): LPAR, - (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, - # If a '%' symbol is followed by a single argument, that argument is - # the last leaf in the string trailer. - (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, - # If present, a ')' symbol is the last symbol in a string trailer. - # (NOTE: LPARS and nested RPARS are not included in this lookup table, - # since they are treated as a special case by the parsing logic in this - # classes' implementation.) - (RPAR, DEFAULT_TOKEN): DONE, - } - - def __init__(self) -> None: - self._state = self.START - self._unmatched_lpars = 0 - - def parse(self, leaves: List[Leaf], string_idx: int) -> int: - """ - Pre-conditions: - * @leaves[@string_idx].type == token.STRING - - Returns: - The index directly after the last leaf which is apart of the string - trailer, if a "trailer" exists. - OR - @string_idx + 1, if no string "trailer" exists. - """ - assert leaves[string_idx].type == token.STRING - - idx = string_idx + 1 - while idx < len(leaves) and self._next_state(leaves[idx]): - idx += 1 - return idx - - def _next_state(self, leaf: Leaf) -> bool: - """ - Pre-conditions: - * On the first call to this function, @leaf MUST be the leaf that - was directly after the string leaf in question (e.g. if our target - string is `line.leaves[i]` then the first call to this method must - be `line.leaves[i + 1]`). - * On the next call to this function, the leaf parameter passed in - MUST be the leaf directly following @leaf. - - Returns: - True iff @leaf is apart of the string's trailer. - """ - # We ignore empty LPAR or RPAR leaves. - if is_empty_par(leaf): - return True - - next_token = leaf.type - if next_token == token.LPAR: - self._unmatched_lpars += 1 - - current_state = self._state - - # The LPAR parser state is a special case. We will return True until we - # find the matching RPAR token. - if current_state == self.LPAR: - if next_token == token.RPAR: - self._unmatched_lpars -= 1 - if self._unmatched_lpars == 0: - self._state = self.RPAR - # Otherwise, we use a lookup table to determine the next state. - else: - # If the lookup table matches the current state to the next - # token, we use the lookup table. - if (current_state, next_token) in self._goto: - self._state = self._goto[current_state, next_token] - else: - # Otherwise, we check if a the current state was assigned a - # default. - if (current_state, self.DEFAULT_TOKEN) in self._goto: - self._state = self._goto[current_state, self.DEFAULT_TOKEN] - # If no default has been assigned, then this parser has a logic - # error. - else: - raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") - - if self._state == self.DONE: - return False - - return True - - -def TErr(err_msg: str) -> Err[CannotTransform]: - """(T)ransform Err - - Convenience function used when working with the TResult type. - """ - cant_transform = CannotTransform(err_msg) - return Err(cant_transform) - - -def contains_pragma_comment(comment_list: List[Leaf]) -> bool: - """ - Returns: - True iff one of the comments in @comment_list is a pragma used by one - of the more common static analysis tools for python (e.g. mypy, flake8, - pylint). - """ - for comment in comment_list: - if comment.value.startswith(("# type:", "# noqa", "# pylint:")): - return True - - return False - - -def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: - """ - Factory for a convenience function that is used to orphan @string_leaf - and then insert multiple new leaves into the same part of the node - structure that @string_leaf had originally occupied. - - Examples: - Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = - string_leaf.parent`. Assume the node `N` has the following - original structure: - - Node( - expr_stmt, [ - Leaf(NAME, 'x'), - Leaf(EQUAL, '='), - Leaf(STRING, '"foo"'), - ] - ) - - We then run the code snippet shown below. - ``` - insert_str_child = insert_str_child_factory(string_leaf) - - lpar = Leaf(token.LPAR, '(') - insert_str_child(lpar) - - bar = Leaf(token.STRING, '"bar"') - insert_str_child(bar) - - rpar = Leaf(token.RPAR, ')') - insert_str_child(rpar) - ``` - - After which point, it follows that `string_leaf.parent is None` and - the node `N` now has the following structure: - - Node( - expr_stmt, [ - Leaf(NAME, 'x'), - Leaf(EQUAL, '='), - Leaf(LPAR, '('), - Leaf(STRING, '"bar"'), - Leaf(RPAR, ')'), - ] - ) - """ - string_parent = string_leaf.parent - string_child_idx = string_leaf.remove() - - def insert_str_child(child: LN) -> None: - nonlocal string_child_idx - - assert string_parent is not None - assert string_child_idx is not None - - string_parent.insert_child(string_child_idx, child) - string_child_idx += 1 - - return insert_str_child - - -def has_triple_quotes(string: str) -> bool: - """ - Returns: - True iff @string starts with three quotation characters. - """ - raw_string = string.lstrip(STRING_PREFIX_CHARS) - return raw_string[:3] in {'"""', "'''"} - - -def parent_type(node: Optional[LN]) -> Optional[NodeType]: - """ - Returns: - @node.parent.type, if @node is not None and has a parent. - OR - None, otherwise. - """ - if node is None or node.parent is None: - return None - - return node.parent.type - - -def is_empty_par(leaf: Leaf) -> bool: - return is_empty_lpar(leaf) or is_empty_rpar(leaf) - - -def is_empty_lpar(leaf: Leaf) -> bool: - return leaf.type == token.LPAR and leaf.value == "" - - -def is_empty_rpar(leaf: Leaf) -> bool: - return leaf.type == token.RPAR and leaf.value == "" - - -def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: - """ - Examples: - ``` - my_list = [1, 2, 3] - - is_valid_index = is_valid_index_factory(my_list) - - assert is_valid_index(0) - assert is_valid_index(2) - - assert not is_valid_index(3) - assert not is_valid_index(-1) - ``` - """ - - def is_valid_index(idx: int) -> bool: - """ - Returns: - True iff @idx is positive AND seq[@idx] does NOT raise an - IndexError. - """ - return 0 <= idx < len(seq) - - return is_valid_index - - -def line_to_string(line: Line) -> str: - """Returns the string representation of @line. - - WARNING: This is known to be computationally expensive. - """ - return str(line).strip("\n") - - -def append_leaves(new_line: Line, old_line: Line, leaves: List[Leaf]) -> None: - """ - Append leaves (taken from @old_line) to @new_line, making sure to fix the - underlying Node structure where appropriate. - - All of the leaves in @leaves are duplicated. The duplicates are then - appended to @new_line and used to replace their originals in the underlying - Node structure. Any comments attached to the old leaves are reattached to - the new leaves. - - Pre-conditions: - set(@leaves) is a subset of set(@old_line.leaves). - """ - for old_leaf in leaves: - new_leaf = Leaf(old_leaf.type, old_leaf.value) - replace_child(old_leaf, new_leaf) - new_line.append(new_leaf) - - for comment_leaf in old_line.comments_after(old_leaf): - new_line.append(comment_leaf, preformatted=True) - - -def replace_child(old_child: LN, new_child: LN) -> None: - """ - Side Effects: - * If @old_child.parent is set, replace @old_child with @new_child in - @old_child's underlying Node structure. - OR - * Otherwise, this function does nothing. - """ - parent = old_child.parent - if not parent: - return - - child_idx = old_child.remove() - if child_idx is not None: - parent.insert_child(child_idx, new_child) - - -def get_string_prefix(string: str) -> str: - """ - Pre-conditions: - * assert_is_leaf_string(@string) - - Returns: - @string's prefix (e.g. '', 'r', 'f', or 'rf'). - """ - assert_is_leaf_string(string) - - prefix = "" - prefix_idx = 0 - while string[prefix_idx] in STRING_PREFIX_CHARS: - prefix += string[prefix_idx].lower() - prefix_idx += 1 - - return prefix - - -def assert_is_leaf_string(string: str) -> None: - """ - Checks the pre-condition that @string has the format that you would expect - of `leaf.value` where `leaf` is some Leaf such that `leaf.type == - token.STRING`. A more precise description of the pre-conditions that are - checked are listed below. - - Pre-conditions: - * @string starts with either ', ", ', or " where - `set()` is some subset of `set(STRING_PREFIX_CHARS)`. - * @string ends with a quote character (' or "). - - Raises: - AssertionError(...) if the pre-conditions listed above are not - satisfied. - """ - dquote_idx = string.find('"') - squote_idx = string.find("'") - if -1 in [dquote_idx, squote_idx]: - quote_idx = max(dquote_idx, squote_idx) - else: - quote_idx = min(squote_idx, dquote_idx) - - assert ( - 0 <= quote_idx < len(string) - 1 - ), f"{string!r} is missing a starting quote character (' or \")." - assert string[-1] in ( - "'", - '"', - ), f"{string!r} is missing an ending quote character (' or \")." - assert set(string[:quote_idx]).issubset( - set(STRING_PREFIX_CHARS) - ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." - - -def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: - """Split line into many lines, starting with the first matching bracket pair. - - Note: this usually looks weird, only use this for function definitions. - Prefer RHS otherwise. This is why this function is not symmetrical with - :func:`right_hand_split` which also handles optional parentheses. - """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] - current_leaves = head_leaves - matching_bracket: Optional[Leaf] = None - for leaf in line.leaves: - if ( - current_leaves is body_leaves - and leaf.type in CLOSING_BRACKETS - and leaf.opening_bracket is matching_bracket - ): - current_leaves = tail_leaves if body_leaves else head_leaves - current_leaves.append(leaf) - if current_leaves is head_leaves: - if leaf.type in OPENING_BRACKETS: - matching_bracket = leaf - current_leaves = body_leaves - if not matching_bracket: - raise CannotSplit("No brackets found") - - head = bracket_split_build_line(head_leaves, line, matching_bracket) - body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) - tail = bracket_split_build_line(tail_leaves, line, matching_bracket) - bracket_split_succeeded_or_raise(head, body, tail) - for result in (head, body, tail): - if result: - yield result - - -def right_hand_split( - line: Line, - line_length: int, - features: Collection[Feature] = (), - omit: Collection[LeafID] = (), -) -> Iterator[Line]: - """Split line into many lines, starting with the last matching bracket pair. - - If the split was by optional parentheses, attempt splitting without them, too. - `omit` is a collection of closing bracket IDs that shouldn't be considered for - this split. - - Note: running this function modifies `bracket_depth` on the leaves of `line`. - """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] - current_leaves = tail_leaves - opening_bracket: Optional[Leaf] = None - closing_bracket: Optional[Leaf] = None - for leaf in reversed(line.leaves): - if current_leaves is body_leaves: - if leaf is opening_bracket: - current_leaves = head_leaves if body_leaves else tail_leaves - current_leaves.append(leaf) - if current_leaves is tail_leaves: - if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf - current_leaves = body_leaves - if not (opening_bracket and closing_bracket and head_leaves): - # If there is no opening or closing_bracket that means the split failed and - # all content is in the tail. Otherwise, if `head_leaves` are empty, it means - # the matching `opening_bracket` wasn't available on `line` anymore. - raise CannotSplit("No brackets found") - - tail_leaves.reverse() - body_leaves.reverse() - head_leaves.reverse() - head = bracket_split_build_line(head_leaves, line, opening_bracket) - body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) - tail = bracket_split_build_line(tail_leaves, line, opening_bracket) - bracket_split_succeeded_or_raise(head, body, tail) - if ( - Feature.FORCE_OPTIONAL_PARENTHESES not in features - # the opening bracket is an optional paren - and opening_bracket.type == token.LPAR - and not opening_bracket.value - # the closing bracket is an optional paren - and closing_bracket.type == token.RPAR - and not closing_bracket.value - # it's not an import (optional parens are the only thing we can split on - # in this case; attempting a split without them is a waste of time) - and not line.is_import - # there are no standalone comments in the body - and not body.contains_standalone_comments(0) - # and we can actually remove the parens - and can_omit_invisible_parens(body, line_length, omit_on_explode=omit) - ): - omit = {id(closing_bracket), *omit} - try: - yield from right_hand_split(line, line_length, features=features, omit=omit) - return - - except CannotSplit: - if not ( - can_be_split(body) - or is_line_short_enough(body, line_length=line_length) - ): - raise CannotSplit( - "Splitting failed, body is still too long and can't be split." - ) - - elif head.contains_multiline_strings() or tail.contains_multiline_strings(): - raise CannotSplit( - "The current optional pair of parentheses is bound to fail to" - " satisfy the splitting algorithm because the head or the tail" - " contains multiline strings which by definition never fit one" - " line." - ) - - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - for result in (head, body, tail): - if result: - yield result - - -def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: - """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. - - Do nothing otherwise. - - A left- or right-hand split is based on a pair of brackets. Content before - (and including) the opening bracket is left on one line, content inside the - brackets is put on a separate line, and finally content starting with and - following the closing bracket is put on a separate line. - - Those are called `head`, `body`, and `tail`, respectively. If the split - produced the same line (all content in `head`) or ended up with an empty `body` - and the `tail` is just the closing bracket, then it's considered failed. - """ - tail_len = len(str(tail).strip()) - if not body: - if tail_len == 0: - raise CannotSplit("Splitting brackets produced the same line") - - elif tail_len < 3: - raise CannotSplit( - f"Splitting brackets on an empty body to save {tail_len} characters is" - " not worth it" - ) - - -def bracket_split_build_line( - leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False -) -> Line: - """Return a new line with given `leaves` and respective comments from `original`. - - If `is_body` is True, the result line is one-indented inside brackets and as such - has its first leaf's prefix normalized and a trailing comma added when expected. - """ - result = Line(depth=original.depth) - if is_body: - result.inside_brackets = True - result.depth += 1 - if leaves: - # Since body is a new indent level, remove spurious leading whitespace. - normalize_prefix(leaves[0], inside_brackets=True) - # Ensure a trailing comma for imports and standalone function arguments, but - # be careful not to add one after any comments or within type annotations. - no_commas = ( - original.is_def - and opening_bracket.value == "(" - and not any(leaf.type == token.COMMA for leaf in leaves) - ) - - if original.is_import or no_commas: - for i in range(len(leaves) - 1, -1, -1): - if leaves[i].type == STANDALONE_COMMENT: - continue - - if leaves[i].type != token.COMMA: - new_comma = Leaf(token.COMMA, ",") - leaves.insert(i + 1, new_comma) - break - - # Populate the line - for leaf in leaves: - result.append(leaf, preformatted=True) - for comment_after in original.comments_after(leaf): - result.append(comment_after, preformatted=True) - if is_body and should_split_body_explode(result, opening_bracket): - result.should_explode = True - return result - - -def dont_increase_indentation(split_func: Transformer) -> Transformer: - """Normalize prefix of the first leaf in every line returned by `split_func`. - - This is a decorator over relevant split functions. - """ - - @wraps(split_func) - def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - for line in split_func(line, features): - normalize_prefix(line.leaves[0], inside_brackets=True) - yield line - - return split_wrapper - - -@dont_increase_indentation -def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - """Split according to delimiters of the highest priority. - - If the appropriate Features are given, the split will add trailing commas - also in function signatures and calls that contain `*` and `**`. - """ - try: - last_leaf = line.leaves[-1] - except IndexError: - raise CannotSplit("Line empty") - - bt = line.bracket_tracker - try: - delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) - except ValueError: - raise CannotSplit("No delimiters found") - - if delimiter_priority == DOT_PRIORITY: - if bt.delimiter_count_with_priority(delimiter_priority) == 1: - raise CannotSplit("Splitting a single attribute from its owner looks wrong") - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - lowest_depth = sys.maxsize - trailing_comma_safe = True - - def append_to_line(leaf: Leaf) -> Iterator[Line]: - """Append `leaf` to current line or to new line if appending impossible.""" - nonlocal current_line - try: - current_line.append_safe(leaf, preformatted=True) - except ValueError: - yield current_line - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - current_line.append(leaf) - - for leaf in line.leaves: - yield from append_to_line(leaf) - - for comment_after in line.comments_after(leaf): - yield from append_to_line(comment_after) - - lowest_depth = min(lowest_depth, leaf.bracket_depth) - if leaf.bracket_depth == lowest_depth: - if is_vararg(leaf, within={syms.typedargslist}): - trailing_comma_safe = ( - trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features - ) - elif is_vararg(leaf, within={syms.arglist, syms.argument}): - trailing_comma_safe = ( - trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features - ) - - leaf_priority = bt.delimiters.get(id(leaf)) - if leaf_priority == delimiter_priority: - yield current_line - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - if current_line: - if ( - trailing_comma_safe - and delimiter_priority == COMMA_PRIORITY - and current_line.leaves[-1].type != token.COMMA - and current_line.leaves[-1].type != STANDALONE_COMMENT - ): - new_comma = Leaf(token.COMMA, ",") - current_line.append(new_comma) - yield current_line - - -@dont_increase_indentation -def standalone_comment_split( - line: Line, features: Collection[Feature] = () -) -> Iterator[Line]: - """Split standalone comments from the rest of the line.""" - if not line.contains_standalone_comments(0): - raise CannotSplit("Line does not have any standalone comments") - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - - def append_to_line(leaf: Leaf) -> Iterator[Line]: - """Append `leaf` to current line or to new line if appending impossible.""" - nonlocal current_line - try: - current_line.append_safe(leaf, preformatted=True) - except ValueError: - yield current_line - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - current_line.append(leaf) - - for leaf in line.leaves: - yield from append_to_line(leaf) - - for comment_after in line.comments_after(leaf): - yield from append_to_line(comment_after) - - if current_line: - yield current_line - - -def is_import(leaf: Leaf) -> bool: - """Return True if the given leaf starts an import statement.""" - p = leaf.parent - t = leaf.type - v = leaf.value - return bool( - t == token.NAME - and ( - (v == "import" and p and p.type == syms.import_name) - or (v == "from" and p and p.type == syms.import_from) - ) - ) - - -def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: - """Return True if the given leaf is a special comment. - Only returns true for type comments for now.""" - t = leaf.type - v = leaf.value - return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) - - -def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: - """Leave existing extra newlines if not `inside_brackets`. Remove everything - else. - - Note: don't use backslashes for formatting or you'll lose your voting rights. - """ - if not inside_brackets: - spl = leaf.prefix.split("#") - if "\\" not in spl[0]: - nl_count = spl[-1].count("\n") - if len(spl) > 1: - nl_count -= 1 - leaf.prefix = "\n" * nl_count - return - - leaf.prefix = "" - - -def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None: - """Make all string prefixes lowercase. - - If remove_u_prefix is given, also removes any u prefix from the string. - - Note: Mutates its argument. - """ - match = re.match(r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", leaf.value, re.DOTALL) - assert match is not None, f"failed to match string {leaf.value!r}" - orig_prefix = match.group(1) - new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u") - if remove_u_prefix: - new_prefix = new_prefix.replace("u", "") - leaf.value = f"{new_prefix}{match.group(2)}" - - -def normalize_string_quotes(leaf: Leaf) -> None: - """Prefer double quotes but only if it doesn't cause more escaping. - - Adds or removes backslashes as appropriate. Doesn't parse and fix - strings nested in f-strings (yet). - - Note: Mutates its argument. - """ - value = leaf.value.lstrip(STRING_PREFIX_CHARS) - if value[:3] == '"""': - return - - elif value[:3] == "'''": - orig_quote = "'''" - new_quote = '"""' - elif value[0] == '"': - orig_quote = '"' - new_quote = "'" - else: - orig_quote = "'" - new_quote = '"' - first_quote_pos = leaf.value.find(orig_quote) - if first_quote_pos == -1: - return # There's an internal error - - prefix = leaf.value[:first_quote_pos] - unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") - escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") - escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") - body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)] - if "r" in prefix.casefold(): - if unescaped_new_quote.search(body): - # There's at least one unescaped new_quote in this raw string - # so converting is impossible - return - - # Do not introduce or remove backslashes in raw strings - new_body = body - else: - # remove unnecessary escapes - new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) - if body != new_body: - # Consider the string without unnecessary escapes as the original - body = new_body - leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}" - new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) - new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) - if "f" in prefix.casefold(): - matches = re.findall( - r""" - (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { - ([^{].*?) # contents of the brackets except if begins with {{ - \}(?:[^}]|$) # A } followed by end of the string or a non-} - """, - new_body, - re.VERBOSE, - ) - for m in matches: - if "\\" in str(m): - # Do not introduce backslashes in interpolated expressions - return - - if new_quote == '"""' and new_body[-1:] == '"': - # edge case: - new_body = new_body[:-1] + '\\"' - orig_escape_count = body.count("\\") - new_escape_count = new_body.count("\\") - if new_escape_count > orig_escape_count: - return # Do not introduce more escaping - - if new_escape_count == orig_escape_count and orig_quote == '"': - return # Prefer double quotes - - leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}" - - -def normalize_numeric_literal(leaf: Leaf) -> None: - """Normalizes numeric (float, int, and complex) literals. - - All letters used in the representation are normalized to lowercase (except - in Python 2 long literals). - """ - text = leaf.value.lower() - if text.startswith(("0o", "0b")): - # Leave octal and binary literals alone. - pass - elif text.startswith("0x"): - # Change hex literals to upper case. - before, after = text[:2], text[2:] - text = f"{before}{after.upper()}" - elif "e" in text: - before, after = text.split("e") - sign = "" - if after.startswith("-"): - after = after[1:] - sign = "-" - elif after.startswith("+"): - after = after[1:] - before = format_float_or_int_string(before) - text = f"{before}e{sign}{after}" - elif text.endswith(("j", "l")): - number = text[:-1] - suffix = text[-1] - # Capitalize in "2L" because "l" looks too similar to "1". - if suffix == "l": - suffix = "L" - text = f"{format_float_or_int_string(number)}{suffix}" - else: - text = format_float_or_int_string(text) - leaf.value = text - - -def format_float_or_int_string(text: str) -> str: - """Formats a float string like "1.0".""" - if "." not in text: - return text - - before, after = text.split(".") - return f"{before or 0}.{after or 0}" - - -def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: - """Make existing optional parentheses invisible or create new ones. - - `parens_after` is a set of string leaf values immediately after which parens - should be put. - - Standardizes on visible parentheses for single-element tuples, and keeps - existing visible parentheses for other tuples and generator expressions. - """ - for pc in list_comments(node.prefix, is_endmarker=False): - if pc.value in FMT_OFF: - # This `node` has a prefix with `# fmt: off`, don't mess with parens. - return - check_lpar = False - for index, child in enumerate(list(node.children)): - # Fixes a bug where invisible parens are not properly stripped from - # assignment statements that contain type annotations. - if isinstance(child, Node) and child.type == syms.annassign: - normalize_invisible_parens(child, parens_after=parens_after) - - # Add parentheses around long tuple unpacking in assignments. - if ( - index == 0 - and isinstance(child, Node) - and child.type == syms.testlist_star_expr - ): - check_lpar = True - - if check_lpar: - if is_walrus_assignment(child): - continue - - if child.type == syms.atom: - if maybe_make_parens_invisible_in_atom(child, parent=node): - wrap_in_parentheses(node, child, visible=False) - elif is_one_tuple(child): - wrap_in_parentheses(node, child, visible=True) - elif node.type == syms.import_from: - # "import from" nodes store parentheses directly as part of - # the statement - if child.type == token.LPAR: - # make parentheses invisible - child.value = "" # type: ignore - node.children[-1].value = "" # type: ignore - elif child.type != token.STAR: - # insert invisible parentheses - node.insert_child(index, Leaf(token.LPAR, "")) - node.append_child(Leaf(token.RPAR, "")) - break - - elif not (isinstance(child, Leaf) and is_multiline_string(child)): - wrap_in_parentheses(node, child, visible=False) - - check_lpar = isinstance(child, Leaf) and child.value in parens_after - - -def normalize_fmt_off(node: Node) -> None: - """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" - try_again = True - while try_again: - try_again = convert_one_fmt_off_pair(node) - - -def convert_one_fmt_off_pair(node: Node) -> bool: - """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. - - Returns True if a pair was converted. - """ - for leaf in node.leaves(): - previous_consumed = 0 - for comment in list_comments(leaf.prefix, is_endmarker=False): - if comment.value in FMT_OFF: - # We only want standalone comments. If there's no previous leaf or - # the previous leaf is indentation, it's a standalone comment in - # disguise. - if comment.type != STANDALONE_COMMENT: - prev = preceding_leaf(leaf) - if prev and prev.type not in WHITESPACE: - continue - - ignored_nodes = list(generate_ignored_nodes(leaf)) - if not ignored_nodes: - continue - - first = ignored_nodes[0] # Can be a container node with the `leaf`. - parent = first.parent - prefix = first.prefix - first.prefix = prefix[comment.consumed :] - hidden_value = ( - comment.value + "\n" + "".join(str(n) for n in ignored_nodes) - ) - if hidden_value.endswith("\n"): - # That happens when one of the `ignored_nodes` ended with a NEWLINE - # leaf (possibly followed by a DEDENT). - hidden_value = hidden_value[:-1] - first_idx: Optional[int] = None - for ignored in ignored_nodes: - index = ignored.remove() - if first_idx is None: - first_idx = index - assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" - assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" - parent.insert_child( - first_idx, - Leaf( - STANDALONE_COMMENT, - hidden_value, - prefix=prefix[:previous_consumed] + "\n" * comment.newlines, - ), - ) - return True - - previous_consumed = comment.consumed - - return False - - -def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]: - """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. - - Stops at the end of the block. - """ - container: Optional[LN] = container_of(leaf) - while container is not None and container.type != token.ENDMARKER: - if is_fmt_on(container): - return - - # fix for fmt: on in children - if contains_fmt_on_at_column(container, leaf.column): - for child in container.children: - if contains_fmt_on_at_column(child, leaf.column): - return - yield child - else: - yield container - container = container.next_sibling - - -def is_fmt_on(container: LN) -> bool: - """Determine whether formatting is switched on within a container. - Determined by whether the last `# fmt:` comment is `on` or `off`. - """ - fmt_on = False - for comment in list_comments(container.prefix, is_endmarker=False): - if comment.value in FMT_ON: - fmt_on = True - elif comment.value in FMT_OFF: - fmt_on = False - return fmt_on - - -def contains_fmt_on_at_column(container: LN, column: int) -> bool: - """Determine if children at a given column have formatting switched on.""" - for child in container.children: - if ( - isinstance(child, Node) - and first_leaf_column(child) == column - or isinstance(child, Leaf) - and child.column == column - ): - if is_fmt_on(child): - return True - - return False - - -def first_leaf_column(node: Node) -> Optional[int]: - """Returns the column of the first leaf child of a node.""" - for child in node.children: - if isinstance(child, Leaf): - return child.column - return None - - -def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: - """If it's safe, make the parens in the atom `node` invisible, recursively. - Additionally, remove repeated, adjacent invisible parens from the atom `node` - as they are redundant. - - Returns whether the node should itself be wrapped in invisible parentheses. - - """ - if ( - node.type != syms.atom - or is_empty_tuple(node) - or is_one_tuple(node) - or (is_yield(node) and parent.type != syms.expr_stmt) - or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY - ): - return False - - first = node.children[0] - last = node.children[-1] - if first.type == token.LPAR and last.type == token.RPAR: - middle = node.children[1] - # make parentheses invisible - first.value = "" # type: ignore - last.value = "" # type: ignore - maybe_make_parens_invisible_in_atom(middle, parent=parent) - - if is_atom_with_invisible_parens(middle): - # Strip the invisible parens from `middle` by replacing - # it with the child in-between the invisible parens - middle.replace(middle.children[1]) - - return False - - return True - - -def is_atom_with_invisible_parens(node: LN) -> bool: - """Given a `LN`, determines whether it's an atom `node` with invisible - parens. Useful in dedupe-ing and normalizing parens. - """ - if isinstance(node, Leaf) or node.type != syms.atom: - return False - - first, last = node.children[0], node.children[-1] - return ( - isinstance(first, Leaf) - and first.type == token.LPAR - and first.value == "" - and isinstance(last, Leaf) - and last.type == token.RPAR - and last.value == "" - ) - - -def is_empty_tuple(node: LN) -> bool: - """Return True if `node` holds an empty tuple.""" - return ( - node.type == syms.atom - and len(node.children) == 2 - and node.children[0].type == token.LPAR - and node.children[1].type == token.RPAR - ) - - -def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: - """Returns `wrapped` if `node` is of the shape ( wrapped ). - - Parenthesis can be optional. Returns None otherwise""" - if len(node.children) != 3: - return None - - lpar, wrapped, rpar = node.children - if not (lpar.type == token.LPAR and rpar.type == token.RPAR): - return None - - return wrapped - - -def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: - """Wrap `child` in parentheses. - - This replaces `child` with an atom holding the parentheses and the old - child. That requires moving the prefix. - - If `visible` is False, the leaves will be valueless (and thus invisible). - """ - lpar = Leaf(token.LPAR, "(" if visible else "") - rpar = Leaf(token.RPAR, ")" if visible else "") - prefix = child.prefix - child.prefix = "" - index = child.remove() or 0 - new_child = Node(syms.atom, [lpar, child, rpar]) - new_child.prefix = prefix - parent.insert_child(index, new_child) - - -def is_one_tuple(node: LN) -> bool: - """Return True if `node` holds a tuple with one element, with or without parens.""" - if node.type == syms.atom: - gexp = unwrap_singleton_parenthesis(node) - if gexp is None or gexp.type != syms.testlist_gexp: - return False - - return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA - - return ( - node.type in IMPLICIT_TUPLE - and len(node.children) == 2 - and node.children[1].type == token.COMMA - ) - - -def is_walrus_assignment(node: LN) -> bool: - """Return True iff `node` is of the shape ( test := test )""" - inner = unwrap_singleton_parenthesis(node) - return inner is not None and inner.type == syms.namedexpr_test - - -def is_yield(node: LN) -> bool: - """Return True if `node` holds a `yield` or `yield from` expression.""" - if node.type == syms.yield_expr: - return True - - if node.type == token.NAME and node.value == "yield": # type: ignore - return True - - if node.type != syms.atom: - return False - - if len(node.children) != 3: - return False - - lpar, expr, rpar = node.children - if lpar.type == token.LPAR and rpar.type == token.RPAR: - return is_yield(expr) - - return False - - -def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: - """Return True if `leaf` is a star or double star in a vararg or kwarg. - - If `within` includes VARARGS_PARENTS, this applies to function signatures. - If `within` includes UNPACKING_PARENTS, it applies to right hand-side - extended iterable unpacking (PEP 3132) and additional unpacking - generalizations (PEP 448). - """ - if leaf.type not in VARARGS_SPECIALS or not leaf.parent: - return False - - p = leaf.parent - if p.type == syms.star_expr: - # Star expressions are also used as assignment targets in extended - # iterable unpacking (PEP 3132). See what its parent is instead. - if not p.parent: - return False - - p = p.parent - - return p.type in within - - -def is_multiline_string(leaf: Leaf) -> bool: - """Return True if `leaf` is a multiline string that actually spans many lines.""" - return has_triple_quotes(leaf.value) and "\n" in leaf.value - - -def is_stub_suite(node: Node) -> bool: - """Return True if `node` is a suite with a stub body.""" - if ( - len(node.children) != 4 - or node.children[0].type != token.NEWLINE - or node.children[1].type != token.INDENT - or node.children[3].type != token.DEDENT - ): - return False - - return is_stub_body(node.children[2]) - - -def is_stub_body(node: LN) -> bool: - """Return True if `node` is a simple statement containing an ellipsis.""" - if not isinstance(node, Node) or node.type != syms.simple_stmt: - return False - - if len(node.children) != 2: - return False - - child = node.children[0] - return ( - child.type == syms.atom - and len(child.children) == 3 - and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) - ) - - -def max_delimiter_priority_in_atom(node: LN) -> Priority: - """Return maximum delimiter priority inside `node`. - - This is specific to atoms with contents contained in a pair of parentheses. - If `node` isn't an atom or there are no enclosing parentheses, returns 0. - """ - if node.type != syms.atom: - return 0 - - first = node.children[0] - last = node.children[-1] - if not (first.type == token.LPAR and last.type == token.RPAR): - return 0 - - bt = BracketTracker() - for c in node.children[1:-1]: - if isinstance(c, Leaf): - bt.mark(c) - else: - for leaf in c.leaves(): - bt.mark(leaf) - try: - return bt.max_delimiter_priority() - - except ValueError: - return 0 - - -def ensure_visible(leaf: Leaf) -> None: - """Make sure parentheses are visible. - - They could be invisible as part of some statements (see - :func:`normalize_invisible_parens` and :func:`visit_import_from`). - """ - if leaf.type == token.LPAR: - leaf.value = "(" - elif leaf.type == token.RPAR: - leaf.value = ")" - - -def should_split_body_explode(line: Line, opening_bracket: Leaf) -> bool: - """Should `line` be immediately split with `delimiter_split()` after RHS?""" - - if not (opening_bracket.parent and opening_bracket.value in "[{("): - return False - - # We're essentially checking if the body is delimited by commas and there's more - # than one of them (we're excluding the trailing comma and if the delimiter priority - # is still commas, that means there's more). - exclude = set() - trailing_comma = False - try: - last_leaf = line.leaves[-1] - if last_leaf.type == token.COMMA: - trailing_comma = True - exclude.add(id(last_leaf)) - max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) - except (IndexError, ValueError): - return False - - return max_priority == COMMA_PRIORITY and ( - trailing_comma - # always explode imports - or opening_bracket.parent.type in {syms.atom, syms.import_from} - ) - - -def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool: - """Return True if content between `opening` and `closing` looks like a one-tuple.""" - if opening.type != token.LPAR and closing.type != token.RPAR: - return False - - depth = closing.bracket_depth + 1 - for _opening_index, leaf in enumerate(leaves): - if leaf is opening: - break - - else: - raise LookupError("Opening paren not found in `leaves`") - - commas = 0 - _opening_index += 1 - for leaf in leaves[_opening_index:]: - if leaf is closing: - break - - bracket_depth = leaf.bracket_depth - if bracket_depth == depth and leaf.type == token.COMMA: - commas += 1 - if leaf.parent and leaf.parent.type in { - syms.arglist, - syms.typedargslist, - }: - commas += 1 - break + srcbuf = io.BytesIO(src) + encoding, lines = tokenize.detect_encoding(srcbuf.readline) + if not lines: + return "", encoding, "\n" - return commas < 2 + newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" + srcbuf.seek(0) + with io.TextIOWrapper(srcbuf, encoding) as tiow: + return tiow.read(), encoding, newline -def get_features_used(node: Node) -> Set[Feature]: +def get_features_used( # noqa: C901 + node: Node, *, future_imports: Optional[Set[str]] = None +) -> Set[Feature]: """Return a set of (relatively) new Python features used in this file. Currently looking for: - f-strings; + - self-documenting expressions in f-strings (f"{x=}"); - underscores in numeric literals; - trailing commas after * or ** in function signatures and calls; - positional only arguments in function signatures and lambdas; + - assignment expression; + - relaxed decorator syntax; + - usage of __future__ flags (annotations); + - print / exec statements; """ features: Set[Feature] = set() + if future_imports: + features |= { + FUTURE_FLAG_TO_FEATURE[future_import] + for future_import in future_imports + if future_import in FUTURE_FLAG_TO_FEATURE + } + for n in node.pre_order(): - if n.type == token.STRING: - value_head = n.value[:2] # type: ignore + if is_string_token(n): + value_head = n.value[:2] if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: features.add(Feature.F_STRINGS) - - elif n.type == token.NUMBER: - if "_" in n.value: # type: ignore + if Feature.DEBUG_F_STRINGS not in features: + for span_beg, span_end in iter_fexpr_spans(n.value): + if n.value[span_beg : span_end - 1].rstrip().endswith("="): + features.add(Feature.DEBUG_F_STRINGS) + break + + elif is_number_token(n): + if "_" in n.value: features.add(Feature.NUMERIC_UNDERSCORES) elif n.type == token.SLASH: - if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}: + if n.parent and n.parent.type in { + syms.typedargslist, + syms.arglist, + syms.varargslist, + }: features.add(Feature.POS_ONLY_ARGUMENTS) elif n.type == token.COLONEQUAL: features.add(Feature.ASSIGNMENT_EXPRESSIONS) + elif n.type == syms.decorator: + if len(n.children) > 1 and not is_simple_decorator_expression( + n.children[1] + ): + features.add(Feature.RELAXED_DECORATORS) + elif ( n.type in {syms.typedargslist, syms.arglist} and n.children @@ -5676,91 +1182,51 @@ def get_features_used(node: Node) -> Set[Feature]: if argch.type in STARS: features.add(feature) - return features - - -def detect_target_versions(node: Node) -> Set[TargetVersion]: - """Detect the version to target based on the nodes used.""" - features = get_features_used(node) - return { - version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] - } - - -def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: - """Generate sets of closing bracket IDs that should be omitted in a RHS. - - Brackets can be omitted if the entire trailer up to and including - a preceding closing bracket fits in one line. + elif ( + n.type in {syms.return_stmt, syms.yield_expr} + and len(n.children) >= 2 + and n.children[1].type == syms.testlist_star_expr + and any(child.type == syms.star_expr for child in n.children[1].children) + ): + features.add(Feature.UNPACKING_ON_FLOW) - Yielded sets are cumulative (contain results of previous yields, too). First - set is empty, unless the line should explode, in which case bracket pairs until - the one that needs to explode are omitted. - """ + elif ( + n.type == syms.annassign + and len(n.children) >= 4 + and n.children[3].type == syms.testlist_star_expr + ): + features.add(Feature.ANN_ASSIGN_EXTENDED_RHS) - omit: Set[LeafID] = set() - if not line.should_explode: - yield omit - - length = 4 * line.depth - opening_bracket: Optional[Leaf] = None - closing_bracket: Optional[Leaf] = None - inner_brackets: Set[LeafID] = set() - for index, leaf, leaf_length in enumerate_with_length(line, reversed=True): - length += leaf_length - if length > line_length: - break + elif ( + n.type == syms.except_clause + and len(n.children) >= 2 + and n.children[1].type == token.STAR + ): + features.add(Feature.EXCEPT_STAR) - has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) - if leaf.type == STANDALONE_COMMENT or has_inline_comment: - break + elif n.type in {syms.subscriptlist, syms.trailer} and any( + child.type == syms.star_expr for child in n.children + ): + features.add(Feature.VARIADIC_GENERICS) - if opening_bracket: - if leaf is opening_bracket: - opening_bracket = None - elif leaf.type in CLOSING_BRACKETS: - prev = line.leaves[index - 1] if index > 0 else None - if ( - line.should_explode - and prev - and prev.type == token.COMMA - and not is_one_tuple_between( - leaf.opening_bracket, leaf, line.leaves - ) - ): - # Never omit bracket pairs with trailing commas. - # We need to explode on those. - break - - inner_brackets.add(id(leaf)) - elif leaf.type in CLOSING_BRACKETS: - prev = line.leaves[index - 1] if index > 0 else None - if prev and prev.type in OPENING_BRACKETS: - # Empty brackets would fail a split so treat them as "inner" - # brackets (e.g. only add them to the `omit` set if another - # pair of brackets was good enough. - inner_brackets.add(id(leaf)) - continue + elif ( + n.type == syms.tname_star + and len(n.children) == 3 + and n.children[2].type == syms.star_expr + ): + features.add(Feature.VARIADIC_GENERICS) - if closing_bracket: - omit.add(id(closing_bracket)) - omit.update(inner_brackets) - inner_brackets.clear() - yield omit + return features - if ( - line.should_explode - and prev - and prev.type == token.COMMA - and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves) - ): - # Never omit bracket pairs with trailing commas. - # We need to explode on those. - break - if leaf.value: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf +def detect_target_versions( + node: Node, *, future_imports: Optional[Set[str]] = None +) -> Set[TargetVersion]: + """Detect the version to target based on the nodes used.""" + features = get_features_used(node, future_imports=future_imports) + return { + version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] + } def get_future_imports(node: Node) -> Set[str]: @@ -5813,354 +1279,45 @@ def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: return imports -@lru_cache() -def get_gitignore(root: Path) -> PathSpec: - """ Return a PathSpec matching gitignore content if present.""" - gitignore = root / ".gitignore" - lines: List[str] = [] - if gitignore.is_file(): - with gitignore.open() as gf: - lines = gf.readlines() - return PathSpec.from_lines("gitwildmatch", lines) - - -def normalize_path_maybe_ignore( - path: Path, root: Path, report: "Report" -) -> Optional[str]: - """Normalize `path`. May return `None` if `path` was ignored. - - `report` is where "path ignored" output goes. - """ - try: - normalized_path = path.resolve().relative_to(root).as_posix() - except OSError as e: - report.path_ignored(path, f"cannot be read because {e}") - return None - - except ValueError: - if path.is_symlink(): - report.path_ignored(path, f"is a symbolic link that points outside {root}") - return None - - raise - - return normalized_path - - -def gen_python_files( - paths: Iterable[Path], - root: Path, - include: Optional[Pattern[str]], - exclude: Pattern[str], - force_exclude: Optional[Pattern[str]], - report: "Report", - gitignore: PathSpec, -) -> Iterator[Path]: - """Generate all files under `path` whose paths are not excluded by the - `exclude_regex` or `force_exclude` regexes, but are included by the `include` regex. - - Symbolic links pointing outside of the `root` directory are ignored. - - `report` is where output about exclusions goes. - """ - assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" - for child in paths: - normalized_path = normalize_path_maybe_ignore(child, root, report) - if normalized_path is None: - continue - - # First ignore files matching .gitignore - if gitignore.match_file(normalized_path): - report.path_ignored(child, "matches the .gitignore file content") - continue - - # Then ignore with `--exclude` and `--force-exclude` options. - normalized_path = "/" + normalized_path - if child.is_dir(): - normalized_path += "/" - - exclude_match = exclude.search(normalized_path) if exclude else None - if exclude_match and exclude_match.group(0): - report.path_ignored(child, "matches the --exclude regular expression") - continue - - force_exclude_match = ( - force_exclude.search(normalized_path) if force_exclude else None - ) - if force_exclude_match and force_exclude_match.group(0): - report.path_ignored(child, "matches the --force-exclude regular expression") - continue - - if child.is_dir(): - yield from gen_python_files( - child.iterdir(), - root, - include, - exclude, - force_exclude, - report, - gitignore, - ) - - elif child.is_file(): - include_match = include.search(normalized_path) if include else True - if include_match: - yield child - - -@lru_cache() -def find_project_root(srcs: Iterable[str]) -> Path: - """Return a directory containing .git, .hg, or pyproject.toml. - - That directory will be a common parent of all files and directories - passed in `srcs`. - - If no directory in the tree contains a marker that would specify it's the - project root, the root of the file system is returned. - """ - if not srcs: - return Path("/").resolve() - - path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] - - # A list of lists of parents for each 'src'. 'src' is included as a - # "parent" of itself if it is a directory - src_parents = [ - list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs - ] - - common_base = max( - set.intersection(*(set(parents) for parents in src_parents)), - key=lambda path: path.parts, - ) - - for directory in (common_base, *common_base.parents): - if (directory / ".git").exists(): - return directory - - if (directory / ".hg").is_dir(): - return directory - - if (directory / "pyproject.toml").is_file(): - return directory - - return directory - - -@dataclass -class Report: - """Provides a reformatting counter. Can be rendered with `str(report)`.""" - - check: bool = False - diff: bool = False - quiet: bool = False - verbose: bool = False - change_count: int = 0 - same_count: int = 0 - failure_count: int = 0 - - def done(self, src: Path, changed: Changed) -> None: - """Increment the counter for successful reformatting. Write out a message.""" - if changed is Changed.YES: - reformatted = "would reformat" if self.check or self.diff else "reformatted" - if self.verbose or not self.quiet: - out(f"{reformatted} {src}") - self.change_count += 1 - else: - if self.verbose: - if changed is Changed.NO: - msg = f"{src} already well formatted, good job." - else: - msg = f"{src} wasn't modified on disk since last run." - out(msg, bold=False) - self.same_count += 1 - - def failed(self, src: Path, message: str) -> None: - """Increment the counter for failed reformatting. Write out a message.""" - err(f"error: cannot format {src}: {message}") - self.failure_count += 1 - - def path_ignored(self, path: Path, message: str) -> None: - if self.verbose: - out(f"{path} ignored: {message}", bold=False) - - @property - def return_code(self) -> int: - """Return the exit code that the app should use. - - This considers the current state of changed files and failures: - - if there were any failures, return 123; - - if any files were changed and --check is being used, return 1; - - otherwise return 0. - """ - # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with - # 126 we have special return codes reserved by the shell. - if self.failure_count: - return 123 - - elif self.change_count and self.check: - return 1 - - return 0 - - def __str__(self) -> str: - """Render a color report of the current state. - - Use `click.unstyle` to remove colors. - """ - if self.check or self.diff: - reformatted = "would be reformatted" - unchanged = "would be left unchanged" - failed = "would fail to reformat" - else: - reformatted = "reformatted" - unchanged = "left unchanged" - failed = "failed to reformat" - report = [] - if self.change_count: - s = "s" if self.change_count > 1 else "" - report.append( - click.style(f"{self.change_count} file{s} {reformatted}", bold=True) - ) - if self.same_count: - s = "s" if self.same_count > 1 else "" - report.append(f"{self.same_count} file{s} {unchanged}") - if self.failure_count: - s = "s" if self.failure_count > 1 else "" - report.append( - click.style(f"{self.failure_count} file{s} {failed}", fg="red") - ) - return ", ".join(report) + "." - - -def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]: - filename = "" - if sys.version_info >= (3, 8): - # TODO: support Python 4+ ;) - for minor_version in range(sys.version_info[1], 4, -1): - try: - return ast.parse(src, filename, feature_version=(3, minor_version)) - except SyntaxError: - continue - else: - for feature_version in (7, 6): - try: - return ast3.parse(src, filename, feature_version=feature_version) - except SyntaxError: - continue - - return ast27.parse(src) - - -def _fixup_ast_constants( - node: Union[ast.AST, ast3.AST, ast27.AST] -) -> Union[ast.AST, ast3.AST, ast27.AST]: - """Map ast nodes deprecated in 3.8 to Constant.""" - if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)): - return ast.Constant(value=node.s) - - if isinstance(node, (ast.Num, ast3.Num, ast27.Num)): - return ast.Constant(value=node.n) - - if isinstance(node, (ast.NameConstant, ast3.NameConstant)): - return ast.Constant(value=node.value) - - return node - - -def _stringify_ast( - node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0 -) -> Iterator[str]: - """Simple visitor generating strings to compare ASTs by content.""" - - node = _fixup_ast_constants(node) - - yield f"{' ' * depth}{node.__class__.__name__}(" - - for field in sorted(node._fields): # noqa: F402 - # TypeIgnore has only one field 'lineno' which breaks this comparison - type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore) - if sys.version_info >= (3, 8): - type_ignore_classes += (ast.TypeIgnore,) - if isinstance(node, type_ignore_classes): - break - - try: - value = getattr(node, field) - except AttributeError: - continue - - yield f"{' ' * (depth+1)}{field}=" - - if isinstance(value, list): - for item in value: - # Ignore nested tuples within del statements, because we may insert - # parentheses and they change the AST. - if ( - field == "targets" - and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete)) - and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple)) - ): - for item in item.elts: - yield from _stringify_ast(item, depth + 2) - - elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)): - yield from _stringify_ast(item, depth + 2) - - elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)): - yield from _stringify_ast(value, depth + 2) - - else: - # Constant strings may be indented across newlines, if they are - # docstrings; fold spaces after newlines when comparing. Similarly, - # trailing and leading space may be removed. - if ( - isinstance(node, ast.Constant) - and field == "value" - and isinstance(value, str) - ): - normalized = re.sub(r" *\n[ \t]*", "\n", value).strip() - else: - normalized = value - yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" - - yield f"{' ' * depth}) # /{node.__class__.__name__}" - - def assert_equivalent(src: str, dst: str) -> None: """Raise AssertionError if `src` and `dst` aren't equivalent.""" try: src_ast = parse_ast(src) except Exception as exc: raise AssertionError( - "cannot use --safe with this file; failed to parse source file. AST" - f" error message: {exc}" - ) + "cannot use --safe with this file; failed to parse source file AST: " + f"{exc}\n" + "This could be caused by running Black with an older Python version " + "that does not support new syntax used in your source file." + ) from exc try: dst_ast = parse_ast(dst) except Exception as exc: log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) raise AssertionError( - f"INTERNAL ERROR: Black produced invalid code: {exc}. Please report a bug" - " on https://github.com/psf/black/issues. This invalid output might be" - f" helpful: {log}" + f"INTERNAL ERROR: Black produced invalid code: {exc}. " + "Please report a bug on https://github.com/psf/black/issues. " + f"This invalid output might be helpful: {log}" ) from None - src_ast_str = "\n".join(_stringify_ast(src_ast)) - dst_ast_str = "\n".join(_stringify_ast(dst_ast)) + src_ast_str = "\n".join(stringify_ast(src_ast)) + dst_ast_str = "\n".join(stringify_ast(dst_ast)) if src_ast_str != dst_ast_str: log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) raise AssertionError( "INTERNAL ERROR: Black produced code that is not equivalent to the" - " source. Please report a bug on https://github.com/psf/black/issues. " - f" This diff might be helpful: {log}" + " source. Please report a bug on " + f"https://github.com/psf/black/issues. This diff might be helpful: {log}" ) from None def assert_stable(src: str, dst: str, mode: Mode) -> None: """Raise AssertionError if `dst` reformats differently the second time.""" - newdst = format_str(dst, mode=mode) + # We shouldn't call format_str() here, because that formats the string + # twice and may hide a bug where we bounce back and forth between two + # versions. + newdst = _format_str_once(dst, mode=mode) if dst != newdst: log = dump_to_file( str(mode), @@ -6174,19 +1331,6 @@ def assert_stable(src: str, dst: str, mode: Mode) -> None: ) from None -@mypyc_attr(patchable=True) -def dump_to_file(*output: str) -> str: - """Dump `output` to a temporary file. Return path to the file.""" - with tempfile.NamedTemporaryFile( - mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" - ) as f: - for lines in output: - f.write(lines) - if lines and lines[-1] != "\n": - f.write("\n") - return f.name - - @contextmanager def nullcontext() -> Iterator[None]: """Return an empty context manager. @@ -6196,391 +1340,8 @@ def nullcontext() -> Iterator[None]: yield -def diff(a: str, b: str, a_name: str, b_name: str) -> str: - """Return a unified diff string between strings `a` and `b`.""" - import difflib - - a_lines = [line + "\n" for line in a.splitlines()] - b_lines = [line + "\n" for line in b.splitlines()] - return "".join( - difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5) - ) - - -def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: - """asyncio signal handler that cancels all `tasks` and reports to stderr.""" - err("Aborted!") - for task in tasks: - task.cancel() - - -def shutdown(loop: asyncio.AbstractEventLoop) -> None: - """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" - try: - if sys.version_info[:2] >= (3, 7): - all_tasks = asyncio.all_tasks - else: - all_tasks = asyncio.Task.all_tasks - # This part is borrowed from asyncio/runners.py in Python 3.7b2. - to_cancel = [task for task in all_tasks(loop) if not task.done()] - if not to_cancel: - return - - for task in to_cancel: - task.cancel() - loop.run_until_complete( - asyncio.gather(*to_cancel, loop=loop, return_exceptions=True) - ) - finally: - # `concurrent.futures.Future` objects cannot be cancelled once they - # are already running. There might be some when the `shutdown()` happened. - # Silence their logger's spew about the event loop being closed. - cf_logger = logging.getLogger("concurrent.futures") - cf_logger.setLevel(logging.CRITICAL) - loop.close() - - -def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: - """Replace `regex` with `replacement` twice on `original`. - - This is used by string normalization to perform replaces on - overlapping matches. - """ - return regex.sub(replacement, regex.sub(replacement, original)) - - -def re_compile_maybe_verbose(regex: str) -> Pattern[str]: - """Compile a regular expression string in `regex`. - - If it contains newlines, use verbose mode. - """ - if "\n" in regex: - regex = "(?x)" + regex - compiled: Pattern[str] = re.compile(regex) - return compiled - - -def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: - """Like `reversed(enumerate(sequence))` if that were possible.""" - index = len(sequence) - 1 - for element in reversed(sequence): - yield (index, element) - index -= 1 - - -def enumerate_with_length( - line: Line, reversed: bool = False -) -> Iterator[Tuple[Index, Leaf, int]]: - """Return an enumeration of leaves with their length. - - Stops prematurely on multiline strings and standalone comments. - """ - op = cast( - Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], - enumerate_reversed if reversed else enumerate, - ) - for index, leaf in op(line.leaves): - length = len(leaf.prefix) + len(leaf.value) - if "\n" in leaf.value: - return # Multiline strings, we can't continue. - - for comment in line.comments_after(leaf): - length += len(comment.value) - - yield index, leaf, length - - -def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: - """Return True if `line` is no longer than `line_length`. - - Uses the provided `line_str` rendering, if any, otherwise computes a new one. - """ - if not line_str: - line_str = line_to_string(line) - return ( - len(line_str) <= line_length - and "\n" not in line_str # multiline strings - and not line.contains_standalone_comments() - ) - - -def can_be_split(line: Line) -> bool: - """Return False if the line cannot be split *for sure*. - - This is not an exhaustive search but a cheap heuristic that we can use to - avoid some unfortunate formattings (mostly around wrapping unsplittable code - in unnecessary parentheses). - """ - leaves = line.leaves - if len(leaves) < 2: - return False - - if leaves[0].type == token.STRING and leaves[1].type == token.DOT: - call_count = 0 - dot_count = 0 - next = leaves[-1] - for leaf in leaves[-2::-1]: - if leaf.type in OPENING_BRACKETS: - if next.type not in CLOSING_BRACKETS: - return False - - call_count += 1 - elif leaf.type == token.DOT: - dot_count += 1 - elif leaf.type == token.NAME: - if not (next.type == token.DOT or next.type in OPENING_BRACKETS): - return False - - elif leaf.type not in CLOSING_BRACKETS: - return False - - if dot_count > 1 and call_count > 1: - return False - - return True - - -def can_omit_invisible_parens( - line: Line, - line_length: int, - omit_on_explode: Collection[LeafID] = (), -) -> bool: - """Does `line` have a shape safe to reformat without optional parens around it? - - Returns True for only a subset of potentially nice looking formattings but - the point is to not return false positives that end up producing lines that - are too long. - """ - bt = line.bracket_tracker - if not bt.delimiters: - # Without delimiters the optional parentheses are useless. - return True - - max_priority = bt.max_delimiter_priority() - if bt.delimiter_count_with_priority(max_priority) > 1: - # With more than one delimiter of a kind the optional parentheses read better. - return False - - if max_priority == DOT_PRIORITY: - # A single stranded method call doesn't require optional parentheses. - return True - - assert len(line.leaves) >= 2, "Stranded delimiter" - - # With a single delimiter, omit if the expression starts or ends with - # a bracket. - first = line.leaves[0] - second = line.leaves[1] - if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: - if _can_omit_opening_paren(line, first=first, line_length=line_length): - return True - - # Note: we are not returning False here because a line might have *both* - # a leading opening bracket and a trailing closing bracket. If the - # opening bracket doesn't match our rule, maybe the closing will. - - penultimate = line.leaves[-2] - last = line.leaves[-1] - if line.should_explode: - try: - penultimate, last = last_two_except(line.leaves, omit=omit_on_explode) - except LookupError: - # Turns out we'd omit everything. We cannot skip the optional parentheses. - return False - - if ( - last.type == token.RPAR - or last.type == token.RBRACE - or ( - # don't use indexing for omitting optional parentheses; - # it looks weird - last.type == token.RSQB - and last.parent - and last.parent.type != syms.trailer - ) - ): - if penultimate.type in OPENING_BRACKETS: - # Empty brackets don't help. - return False - - if is_multiline_string(first): - # Additional wrapping of a multiline string in this situation is - # unnecessary. - return True - - if line.should_explode and penultimate.type == token.COMMA: - # The rightmost non-omitted bracket pair is the one we want to explode on. - return True - - if _can_omit_closing_paren(line, last=last, line_length=line_length): - return True - - return False - - -def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: - """See `can_omit_invisible_parens`.""" - remainder = False - length = 4 * line.depth - _index = -1 - for _index, leaf, leaf_length in enumerate_with_length(line): - if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: - remainder = True - if remainder: - length += leaf_length - if length > line_length: - break - - if leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - remainder = False - - else: - # checked the entire string and line length wasn't exceeded - if len(line.leaves) == _index + 1: - return True - - return False - - -def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: - """See `can_omit_invisible_parens`.""" - length = 4 * line.depth - seen_other_brackets = False - for _index, leaf, leaf_length in enumerate_with_length(line): - length += leaf_length - if leaf is last.opening_bracket: - if seen_other_brackets or length <= line_length: - return True - - elif leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - seen_other_brackets = True - - return False - - -def last_two_except(leaves: List[Leaf], omit: Collection[LeafID]) -> Tuple[Leaf, Leaf]: - """Return (penultimate, last) leaves skipping brackets in `omit` and contents.""" - stop_after = None - last = None - for leaf in reversed(leaves): - if stop_after: - if leaf is stop_after: - stop_after = None - continue - - if last: - return leaf, last - - if id(leaf) in omit: - stop_after = leaf.opening_bracket - else: - last = leaf - else: - raise LookupError("Last two leaves were also skipped") - - -def run_transformer( - line: Line, - transform: Transformer, - mode: Mode, - features: Collection[Feature], - *, - line_str: str = "", -) -> List[Line]: - if not line_str: - line_str = line_to_string(line) - result: List[Line] = [] - for transformed_line in transform(line, features): - if str(transformed_line).strip("\n") == line_str: - raise CannotTransform("Line transformer returned an unchanged result") - - result.extend(transform_line(transformed_line, mode=mode, features=features)) - - if not ( - transform.__name__ == "rhs" - and line.bracket_tracker.invisible - and not any(bracket.value for bracket in line.bracket_tracker.invisible) - and not line.contains_multiline_strings() - and not result[0].contains_uncollapsable_type_comments() - and not result[0].contains_unsplittable_type_ignore() - and not is_line_short_enough(result[0], line_length=mode.line_length) - ): - return result - - line_copy = line.clone() - append_leaves(line_copy, line, line.leaves) - features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES} - second_opinion = run_transformer( - line_copy, transform, mode, features_fop, line_str=line_str - ) - if all( - is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion - ): - result = second_opinion - return result - - -def get_cache_file(mode: Mode) -> Path: - return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" - - -def read_cache(mode: Mode) -> Cache: - """Read the cache if it exists and is well formed. - - If it is not well formed, the call to write_cache later should resolve the issue. - """ - cache_file = get_cache_file(mode) - if not cache_file.exists(): - return {} - - with cache_file.open("rb") as fobj: - try: - cache: Cache = pickle.load(fobj) - except (pickle.UnpicklingError, ValueError): - return {} - - return cache - - -def get_cache_info(path: Path) -> CacheInfo: - """Return the information used to check if a file is already formatted or not.""" - stat = path.stat() - return stat.st_mtime, stat.st_size - - -def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: - """Split an iterable of paths in `sources` into two sets. - - The first contains paths of files that modified on disk or are not in the - cache. The other contains paths to non-modified files. - """ - todo, done = set(), set() - for src in sources: - src = src.resolve() - if cache.get(src) != get_cache_info(src): - todo.add(src) - else: - done.add(src) - return todo, done - - -def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None: - """Update the cache file.""" - cache_file = get_cache_file(mode) - try: - CACHE_DIR.mkdir(parents=True, exist_ok=True) - new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}} - with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: - pickle.dump(new_cache, f, protocol=4) - os.replace(f.name, cache_file) - except OSError: - pass - - def patch_click() -> None: - """Make Click not crash. + """Make Click not crash on Python 3.6 with LANG=C. On certain misconfigured environments, Python 3 selects the ASCII encoding as the default which restricts paths that it can access during the lifetime of the @@ -6590,67 +1351,39 @@ def patch_click() -> None: file paths is minimal since it's Python source code. Moreover, this crash was spurious on Python 3.7 thanks to PEP 538 and PEP 540. """ + modules: List[Any] = [] try: from click import core + except ImportError: + pass + else: + modules.append(core) + try: + # Removed in Click 8.1.0 and newer; we keep this around for users who have + # older versions installed. from click import _unicodefun # type: ignore - except ModuleNotFoundError: - return + except ImportError: + pass + else: + modules.append(_unicodefun) - for module in (core, _unicodefun): + for module in modules: if hasattr(module, "_verify_python3_env"): - module._verify_python3_env = lambda: None + module._verify_python3_env = lambda: None # type: ignore + if hasattr(module, "_verify_python_env"): + module._verify_python_env = lambda: None # type: ignore def patched_main() -> None: - freeze_support() - patch_click() - main() - - -def is_docstring(leaf: Leaf) -> bool: - if not is_multiline_string(leaf): - # For the purposes of docstring re-indentation, we don't need to do anything - # with single-line docstrings. - return False - - if prev_siblings_are( - leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] - ): - return True + # PyInstaller patches multiprocessing to need freeze_support() even in non-Windows + # environments so just assume we always need to call it if frozen. + if getattr(sys, "frozen", False): + from multiprocessing import freeze_support - # Multiline docstring on the same line as the `def`. - if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): - # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python - # grammar. We're safe to return True without further checks. - return True + freeze_support() - return False - - -def fix_docstring(docstring: str, prefix: str) -> str: - # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation - if not docstring: - return "" - # Convert tabs to spaces (following the normal Python rules) - # and split into a list of lines: - lines = docstring.expandtabs().splitlines() - # Determine minimum indentation (first line doesn't count): - indent = sys.maxsize - for line in lines[1:]: - stripped = line.lstrip() - if stripped: - indent = min(indent, len(line) - len(stripped)) - # Remove indentation (first line is special): - trimmed = [lines[0].strip()] - if indent < sys.maxsize: - last_line_idx = len(lines) - 2 - for i, line in enumerate(lines[1:]): - stripped_line = line[indent:].rstrip() - if stripped_line or i == last_line_idx: - trimmed.append(prefix + stripped_line) - else: - trimmed.append("") - return "\n".join(trimmed) + patch_click() + main() if __name__ == "__main__": diff --git a/src/black/brackets.py b/src/black/brackets.py new file mode 100644 index 00000000000..3566f5b6c37 --- /dev/null +++ b/src/black/brackets.py @@ -0,0 +1,342 @@ +"""Builds on top of nodes.py to track brackets.""" + +import sys +from dataclasses import dataclass, field +from typing import Dict, Iterable, List, Optional, Tuple, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.nodes import ( + BRACKET, + CLOSING_BRACKETS, + COMPARATORS, + LOGIC_OPERATORS, + MATH_OPERATORS, + OPENING_BRACKETS, + UNPACKING_PARENTS, + VARARGS_PARENTS, + is_vararg, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] +Depth = int +LeafID = int +NodeType = int +Priority = int + + +COMPREHENSION_PRIORITY: Final = 20 +COMMA_PRIORITY: Final = 18 +TERNARY_PRIORITY: Final = 16 +LOGIC_PRIORITY: Final = 14 +STRING_PRIORITY: Final = 12 +COMPARATOR_PRIORITY: Final = 10 +MATH_PRIORITIES: Final = { + token.VBAR: 9, + token.CIRCUMFLEX: 8, + token.AMPER: 7, + token.LEFTSHIFT: 6, + token.RIGHTSHIFT: 6, + token.PLUS: 5, + token.MINUS: 5, + token.STAR: 4, + token.SLASH: 4, + token.DOUBLESLASH: 4, + token.PERCENT: 4, + token.AT: 4, + token.TILDE: 3, + token.DOUBLESTAR: 2, +} +DOT_PRIORITY: Final = 1 + + +class BracketMatchError(Exception): + """Raised when an opening bracket is unable to be matched to a closing bracket.""" + + +@dataclass +class BracketTracker: + """Keeps track of brackets on a line.""" + + depth: int = 0 + bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) + delimiters: Dict[LeafID, Priority] = field(default_factory=dict) + previous: Optional[Leaf] = None + _for_loop_depths: List[int] = field(default_factory=list) + _lambda_argument_depths: List[int] = field(default_factory=list) + invisible: List[Leaf] = field(default_factory=list) + + def mark(self, leaf: Leaf) -> None: + """Mark `leaf` with bracket-related metadata. Keep track of delimiters. + + All leaves receive an int `bracket_depth` field that stores how deep + within brackets a given leaf is. 0 means there are no enclosing brackets + that started on this line. + + If a leaf is itself a closing bracket, it receives an `opening_bracket` + field that it forms a pair with. This is a one-directional link to + avoid reference cycles. + + If a leaf is a delimiter (a token on which Black can split the line if + needed) and it's on depth 0, its `id()` is stored in the tracker's + `delimiters` field. + """ + if leaf.type == token.COMMENT: + return + + self.maybe_decrement_after_for_loop_variable(leaf) + self.maybe_decrement_after_lambda_arguments(leaf) + if leaf.type in CLOSING_BRACKETS: + self.depth -= 1 + try: + opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) + except KeyError as e: + raise BracketMatchError( + "Unable to match a closing bracket to the following opening" + f" bracket: {leaf}" + ) from e + leaf.opening_bracket = opening_bracket + if not leaf.value: + self.invisible.append(leaf) + leaf.bracket_depth = self.depth + if self.depth == 0: + delim = is_split_before_delimiter(leaf, self.previous) + if delim and self.previous is not None: + self.delimiters[id(self.previous)] = delim + else: + delim = is_split_after_delimiter(leaf, self.previous) + if delim: + self.delimiters[id(leaf)] = delim + if leaf.type in OPENING_BRACKETS: + self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf + self.depth += 1 + if not leaf.value: + self.invisible.append(leaf) + self.previous = leaf + self.maybe_increment_lambda_arguments(leaf) + self.maybe_increment_for_loop_variable(leaf) + + def any_open_brackets(self) -> bool: + """Return True if there is an yet unmatched open bracket on the line.""" + return bool(self.bracket_match) + + def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: + """Return the highest priority of a delimiter found on the line. + + Values are consistent with what `is_split_*_delimiter()` return. + Raises ValueError on no delimiters. + """ + return max(v for k, v in self.delimiters.items() if k not in exclude) + + def delimiter_count_with_priority(self, priority: Priority = 0) -> int: + """Return the number of delimiters with the given `priority`. + + If no `priority` is passed, defaults to max priority on the line. + """ + if not self.delimiters: + return 0 + + priority = priority or self.max_delimiter_priority() + return sum(1 for p in self.delimiters.values() if p == priority) + + def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: + """In a for loop, or comprehension, the variables are often unpacks. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `for` and `in`. + """ + if leaf.type == token.NAME and leaf.value == "for": + self.depth += 1 + self._for_loop_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: + """See `maybe_increment_for_loop_variable` above for explanation.""" + if ( + self._for_loop_depths + and self._for_loop_depths[-1] == self.depth + and leaf.type == token.NAME + and leaf.value == "in" + ): + self.depth -= 1 + self._for_loop_depths.pop() + return True + + return False + + def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: + """In a lambda expression, there might be more than one argument. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `lambda` and `:`. + """ + if leaf.type == token.NAME and leaf.value == "lambda": + self.depth += 1 + self._lambda_argument_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: + """See `maybe_increment_lambda_arguments` above for explanation.""" + if ( + self._lambda_argument_depths + and self._lambda_argument_depths[-1] == self.depth + and leaf.type == token.COLON + ): + self.depth -= 1 + self._lambda_argument_depths.pop() + return True + + return False + + def get_open_lsqb(self) -> Optional[Leaf]: + """Return the most recent opening square bracket (if any).""" + return self.bracket_match.get((self.depth - 1, token.RSQB)) + + +def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break after it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break after themselves. + + Higher numbers are higher priority. + """ + if leaf.type == token.COMMA: + return COMMA_PRIORITY + + return 0 + + +def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break before it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break before themselves. + + Higher numbers are higher priority. + """ + if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): + # * and ** might also be MATH_OPERATORS but in this case they are not. + # Don't treat them as a delimiter. + return 0 + + if ( + leaf.type == token.DOT + and leaf.parent + and leaf.parent.type not in {syms.import_from, syms.dotted_name} + and (previous is None or previous.type in CLOSING_BRACKETS) + ): + return DOT_PRIORITY + + if ( + leaf.type in MATH_OPERATORS + and leaf.parent + and leaf.parent.type not in {syms.factor, syms.star_expr} + ): + return MATH_PRIORITIES[leaf.type] + + if leaf.type in COMPARATORS: + return COMPARATOR_PRIORITY + + if ( + leaf.type == token.STRING + and previous is not None + and previous.type == token.STRING + ): + return STRING_PRIORITY + + if leaf.type not in {token.NAME, token.ASYNC}: + return 0 + + if ( + leaf.value == "for" + and leaf.parent + and leaf.parent.type in {syms.comp_for, syms.old_comp_for} + or leaf.type == token.ASYNC + ): + if ( + not isinstance(leaf.prev_sibling, Leaf) + or leaf.prev_sibling.value != "async" + ): + return COMPREHENSION_PRIORITY + + if ( + leaf.value == "if" + and leaf.parent + and leaf.parent.type in {syms.comp_if, syms.old_comp_if} + ): + return COMPREHENSION_PRIORITY + + if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: + return TERNARY_PRIORITY + + if leaf.value == "is": + return COMPARATOR_PRIORITY + + if ( + leaf.value == "in" + and leaf.parent + and leaf.parent.type in {syms.comp_op, syms.comparison} + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "not" + ) + ): + return COMPARATOR_PRIORITY + + if ( + leaf.value == "not" + and leaf.parent + and leaf.parent.type == syms.comp_op + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "is" + ) + ): + return COMPARATOR_PRIORITY + + if leaf.value in LOGIC_OPERATORS and leaf.parent: + return LOGIC_PRIORITY + + return 0 + + +def max_delimiter_priority_in_atom(node: LN) -> Priority: + """Return maximum delimiter priority inside `node`. + + This is specific to atoms with contents contained in a pair of parentheses. + If `node` isn't an atom or there are no enclosing parentheses, returns 0. + """ + if node.type != syms.atom: + return 0 + + first = node.children[0] + last = node.children[-1] + if not (first.type == token.LPAR and last.type == token.RPAR): + return 0 + + bt = BracketTracker() + for c in node.children[1:-1]: + if isinstance(c, Leaf): + bt.mark(c) + else: + for leaf in c.leaves(): + bt.mark(leaf) + try: + return bt.max_delimiter_priority() + + except ValueError: + return 0 diff --git a/src/black/cache.py b/src/black/cache.py new file mode 100644 index 00000000000..9455ff44772 --- /dev/null +++ b/src/black/cache.py @@ -0,0 +1,97 @@ +"""Caching of formatted files with feature-based invalidation.""" + +import os +import pickle +import tempfile +from pathlib import Path +from typing import Dict, Iterable, Set, Tuple + +from platformdirs import user_cache_dir + +from _black_version import version as __version__ +from black.mode import Mode + +# types +Timestamp = float +FileSize = int +CacheInfo = Tuple[Timestamp, FileSize] +Cache = Dict[str, CacheInfo] + + +def get_cache_dir() -> Path: + """Get the cache directory used by black. + + Users can customize this directory on all systems using `BLACK_CACHE_DIR` + environment variable. By default, the cache directory is the user cache directory + under the black application. + + This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid + repeated calls. + """ + # NOTE: Function mostly exists as a clean way to test getting the cache directory. + default_cache_dir = user_cache_dir("black", version=__version__) + cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir)) + return cache_dir + + +CACHE_DIR = get_cache_dir() + + +def read_cache(mode: Mode) -> Cache: + """Read the cache if it exists and is well formed. + + If it is not well formed, the call to write_cache later should resolve the issue. + """ + cache_file = get_cache_file(mode) + if not cache_file.exists(): + return {} + + with cache_file.open("rb") as fobj: + try: + cache: Cache = pickle.load(fobj) + except (pickle.UnpicklingError, ValueError, IndexError): + return {} + + return cache + + +def get_cache_file(mode: Mode) -> Path: + return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" + + +def get_cache_info(path: Path) -> CacheInfo: + """Return the information used to check if a file is already formatted or not.""" + stat = path.stat() + return stat.st_mtime, stat.st_size + + +def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: + """Split an iterable of paths in `sources` into two sets. + + The first contains paths of files that modified on disk or are not in the + cache. The other contains paths to non-modified files. + """ + todo, done = set(), set() + for src in sources: + res_src = src.resolve() + if cache.get(str(res_src)) != get_cache_info(res_src): + todo.add(src) + else: + done.add(src) + return todo, done + + +def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None: + """Update the cache file.""" + cache_file = get_cache_file(mode) + try: + CACHE_DIR.mkdir(parents=True, exist_ok=True) + new_cache = { + **cache, + **{str(src.resolve()): get_cache_info(src) for src in sources}, + } + with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: + pickle.dump(new_cache, f, protocol=4) + os.replace(f.name, cache_file) + except OSError: + pass diff --git a/src/black/comments.py b/src/black/comments.py new file mode 100644 index 00000000000..dce83abf1bb --- /dev/null +++ b/src/black/comments.py @@ -0,0 +1,332 @@ +import re +import sys +from dataclasses import dataclass +from functools import lru_cache +from typing import Iterator, List, Optional, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from black.nodes import ( + CLOSING_BRACKETS, + STANDALONE_COMMENT, + WHITESPACE, + container_of, + first_leaf_of, + preceding_leaf, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] + +FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"} +FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"} +FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP} +FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"} + +COMMENT_EXCEPTIONS = {True: " !:#'", False: " !:#'%"} + + +@dataclass +class ProtoComment: + """Describes a piece of syntax that is a comment. + + It's not a :class:`blib2to3.pytree.Leaf` so that: + + * it can be cached (`Leaf` objects should not be reused more than once as + they store their lineno, column, prefix, and parent information); + * `newlines` and `consumed` fields are kept separate from the `value`. This + simplifies handling of special marker comments like ``# fmt: off/on``. + """ + + type: int # token.COMMENT or STANDALONE_COMMENT + value: str # content of the comment + newlines: int # how many newlines before the comment + consumed: int # how many characters of the original leaf's prefix did we consume + + +def generate_comments(leaf: LN, *, preview: bool) -> Iterator[Leaf]: + """Clean the prefix of the `leaf` and generate comments from it, if any. + + Comments in lib2to3 are shoved into the whitespace prefix. This happens + in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation + move because it does away with modifying the grammar to include all the + possible places in which comments can be placed. + + The sad consequence for us though is that comments don't "belong" anywhere. + This is why this function generates simple parentless Leaf objects for + comments. We simply don't know what the correct parent should be. + + No matter though, we can live without this. We really only need to + differentiate between inline and standalone comments. The latter don't + share the line with any code. + + Inline comments are emitted as regular token.COMMENT leaves. Standalone + are emitted with a fake STANDALONE_COMMENT token identifier. + """ + for pc in list_comments( + leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, preview=preview + ): + yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) + + +@lru_cache(maxsize=4096) +def list_comments( + prefix: str, *, is_endmarker: bool, preview: bool +) -> List[ProtoComment]: + """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" + result: List[ProtoComment] = [] + if not prefix or "#" not in prefix: + return result + + consumed = 0 + nlines = 0 + ignored_lines = 0 + for index, line in enumerate(re.split("\r?\n", prefix)): + consumed += len(line) + 1 # adding the length of the split '\n' + line = line.lstrip() + if not line: + nlines += 1 + if not line.startswith("#"): + # Escaped newlines outside of a comment are not really newlines at + # all. We treat a single-line comment following an escaped newline + # as a simple trailing comment. + if line.endswith("\\"): + ignored_lines += 1 + continue + + if index == ignored_lines and not is_endmarker: + comment_type = token.COMMENT # simple trailing comment + else: + comment_type = STANDALONE_COMMENT + comment = make_comment(line, preview=preview) + result.append( + ProtoComment( + type=comment_type, value=comment, newlines=nlines, consumed=consumed + ) + ) + nlines = 0 + return result + + +def make_comment(content: str, *, preview: bool) -> str: + """Return a consistently formatted comment from the given `content` string. + + All comments (except for "##", "#!", "#:", '#'") should have a single + space between the hash sign and the content. + + If `content` didn't start with a hash sign, one is provided. + """ + content = content.rstrip() + if not content: + return "#" + + if content[0] == "#": + content = content[1:] + NON_BREAKING_SPACE = " " + if ( + content + and content[0] == NON_BREAKING_SPACE + and not content.lstrip().startswith("type:") + ): + content = " " + content[1:] # Replace NBSP by a simple space + if content and content[0] not in COMMENT_EXCEPTIONS[preview]: + content = " " + content + return "#" + content + + +def normalize_fmt_off(node: Node, *, preview: bool) -> None: + """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" + try_again = True + while try_again: + try_again = convert_one_fmt_off_pair(node, preview=preview) + + +def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool: + """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. + + Returns True if a pair was converted. + """ + for leaf in node.leaves(): + previous_consumed = 0 + for comment in list_comments(leaf.prefix, is_endmarker=False, preview=preview): + if comment.value not in FMT_PASS: + previous_consumed = comment.consumed + continue + # We only want standalone comments. If there's no previous leaf or + # the previous leaf is indentation, it's a standalone comment in + # disguise. + if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT: + prev = preceding_leaf(leaf) + if prev: + if comment.value in FMT_OFF and prev.type not in WHITESPACE: + continue + if comment.value in FMT_SKIP and prev.type in WHITESPACE: + continue + + ignored_nodes = list(generate_ignored_nodes(leaf, comment, preview=preview)) + if not ignored_nodes: + continue + + first = ignored_nodes[0] # Can be a container node with the `leaf`. + parent = first.parent + prefix = first.prefix + if comment.value in FMT_OFF: + first.prefix = prefix[comment.consumed :] + if comment.value in FMT_SKIP: + first.prefix = "" + standalone_comment_prefix = prefix + else: + standalone_comment_prefix = ( + prefix[:previous_consumed] + "\n" * comment.newlines + ) + hidden_value = "".join(str(n) for n in ignored_nodes) + if comment.value in FMT_OFF: + hidden_value = comment.value + "\n" + hidden_value + if comment.value in FMT_SKIP: + hidden_value += " " + comment.value + if hidden_value.endswith("\n"): + # That happens when one of the `ignored_nodes` ended with a NEWLINE + # leaf (possibly followed by a DEDENT). + hidden_value = hidden_value[:-1] + first_idx: Optional[int] = None + for ignored in ignored_nodes: + index = ignored.remove() + if first_idx is None: + first_idx = index + assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" + assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" + parent.insert_child( + first_idx, + Leaf( + STANDALONE_COMMENT, + hidden_value, + prefix=standalone_comment_prefix, + ), + ) + return True + + return False + + +def generate_ignored_nodes( + leaf: Leaf, comment: ProtoComment, *, preview: bool +) -> Iterator[LN]: + """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. + + If comment is skip, returns leaf only. + Stops at the end of the block. + """ + if comment.value in FMT_SKIP: + yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, preview=preview) + return + container: Optional[LN] = container_of(leaf) + while container is not None and container.type != token.ENDMARKER: + if is_fmt_on(container, preview=preview): + return + + # fix for fmt: on in children + if children_contains_fmt_on(container, preview=preview): + for child in container.children: + if isinstance(child, Leaf) and is_fmt_on(child, preview=preview): + if child.type in CLOSING_BRACKETS: + # This means `# fmt: on` is placed at a different bracket level + # than `# fmt: off`. This is an invalid use, but as a courtesy, + # we include this closing bracket in the ignored nodes. + # The alternative is to fail the formatting. + yield child + return + if children_contains_fmt_on(child, preview=preview): + return + yield child + else: + if container.type == token.DEDENT and container.next_sibling is None: + # This can happen when there is no matching `# fmt: on` comment at the + # same level as `# fmt: on`. We need to keep this DEDENT. + return + yield container + container = container.next_sibling + + +def _generate_ignored_nodes_from_fmt_skip( + leaf: Leaf, comment: ProtoComment, *, preview: bool +) -> Iterator[LN]: + """Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`.""" + prev_sibling = leaf.prev_sibling + parent = leaf.parent + # Need to properly format the leaf prefix to compare it to comment.value, + # which is also formatted + comments = list_comments(leaf.prefix, is_endmarker=False, preview=preview) + if not comments or comment.value != comments[0].value: + return + if prev_sibling is not None: + leaf.prefix = "" + siblings = [prev_sibling] + while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None: + prev_sibling = prev_sibling.prev_sibling + siblings.insert(0, prev_sibling) + yield from siblings + elif ( + parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE + ): + # The `# fmt: skip` is on the colon line of the if/while/def/class/... + # statements. The ignored nodes should be previous siblings of the + # parent suite node. + leaf.prefix = "" + ignored_nodes: List[LN] = [] + parent_sibling = parent.prev_sibling + while parent_sibling is not None and parent_sibling.type != syms.suite: + ignored_nodes.insert(0, parent_sibling) + parent_sibling = parent_sibling.prev_sibling + # Special case for `async_stmt` where the ASYNC token is on the + # grandparent node. + grandparent = parent.parent + if ( + grandparent is not None + and grandparent.prev_sibling is not None + and grandparent.prev_sibling.type == token.ASYNC + ): + ignored_nodes.insert(0, grandparent.prev_sibling) + yield from iter(ignored_nodes) + + +def is_fmt_on(container: LN, preview: bool) -> bool: + """Determine whether formatting is switched on within a container. + Determined by whether the last `# fmt:` comment is `on` or `off`. + """ + fmt_on = False + for comment in list_comments(container.prefix, is_endmarker=False, preview=preview): + if comment.value in FMT_ON: + fmt_on = True + elif comment.value in FMT_OFF: + fmt_on = False + return fmt_on + + +def children_contains_fmt_on(container: LN, *, preview: bool) -> bool: + """Determine if children have formatting switched on.""" + for child in container.children: + leaf = first_leaf_of(child) + if leaf is not None and is_fmt_on(leaf, preview=preview): + return True + + return False + + +def contains_pragma_comment(comment_list: List[Leaf]) -> bool: + """ + Returns: + True iff one of the comments in @comment_list is a pragma used by one + of the more common static analysis tools for python (e.g. mypy, flake8, + pylint). + """ + for comment in comment_list: + if comment.value.startswith(("# type:", "# noqa", "# pylint:")): + return True + + return False diff --git a/src/black/concurrency.py b/src/black/concurrency.py new file mode 100644 index 00000000000..10e288f4f93 --- /dev/null +++ b/src/black/concurrency.py @@ -0,0 +1,191 @@ +""" +Formatting many files at once via multiprocessing. Contains entrypoint and utilities. + +NOTE: this module is only imported if we need to format several files at once. +""" + +import asyncio +import logging +import os +import signal +import sys +from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor +from multiprocessing import Manager +from pathlib import Path +from typing import Any, Iterable, Optional, Set + +from mypy_extensions import mypyc_attr + +from black import WriteBack, format_file_in_place +from black.cache import Cache, filter_cached, read_cache, write_cache +from black.mode import Mode +from black.output import err +from black.report import Changed, Report + + +def maybe_install_uvloop() -> None: + """If our environment has uvloop installed we use it. + + This is called only from command-line entry points to avoid + interfering with the parent process if Black is used as a library. + """ + try: + import uvloop + + uvloop.install() + except ImportError: + pass + + +def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: + """asyncio signal handler that cancels all `tasks` and reports to stderr.""" + err("Aborted!") + for task in tasks: + task.cancel() + + +def shutdown(loop: asyncio.AbstractEventLoop) -> None: + """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" + try: + if sys.version_info[:2] >= (3, 7): + all_tasks = asyncio.all_tasks + else: + all_tasks = asyncio.Task.all_tasks + # This part is borrowed from asyncio/runners.py in Python 3.7b2. + to_cancel = [task for task in all_tasks(loop) if not task.done()] + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + finally: + # `concurrent.futures.Future` objects cannot be cancelled once they + # are already running. There might be some when the `shutdown()` happened. + # Silence their logger's spew about the event loop being closed. + cf_logger = logging.getLogger("concurrent.futures") + cf_logger.setLevel(logging.CRITICAL) + loop.close() + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_many( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: Report, + workers: Optional[int], +) -> None: + """Reformat multiple files using a ProcessPoolExecutor.""" + maybe_install_uvloop() + + executor: Executor + if workers is None: + workers = os.cpu_count() or 1 + if sys.platform == "win32": + # Work around https://bugs.python.org/issue26903 + workers = min(workers, 60) + try: + executor = ProcessPoolExecutor(max_workers=workers) + except (ImportError, NotImplementedError, OSError): + # we arrive here if the underlying system does not support multi-processing + # like in AWS Lambda or Termux, in which case we gracefully fallback to + # a ThreadPoolExecutor with just a single worker (more workers would not do us + # any good due to the Global Interpreter Lock) + executor = ThreadPoolExecutor(max_workers=1) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + schedule_formatting( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + loop=loop, + executor=executor, + ) + ) + finally: + try: + shutdown(loop) + finally: + asyncio.set_event_loop(None) + if executor is not None: + executor.shutdown() + + +async def schedule_formatting( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: "Report", + loop: asyncio.AbstractEventLoop, + executor: "Executor", +) -> None: + """Run formatting of `sources` in parallel using the provided `executor`. + + (Use ProcessPoolExecutors for actual parallelism.) + + `write_back`, `fast`, and `mode` options are passed to + :func:`format_file_in_place`. + """ + cache: Cache = {} + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + cache = read_cache(mode) + sources, cached = filter_cached(cache, sources) + for src in sorted(cached): + report.done(src, Changed.CACHED) + if not sources: + return + + cancelled = [] + sources_to_cache = [] + lock = None + if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + # For diff output, we need locks to ensure we don't interleave output + # from different processes. + manager = Manager() + lock = manager.Lock() + tasks = { + asyncio.ensure_future( + loop.run_in_executor( + executor, format_file_in_place, src, fast, mode, write_back, lock + ) + ): src + for src in sorted(sources) + } + pending = tasks.keys() + try: + loop.add_signal_handler(signal.SIGINT, cancel, pending) + loop.add_signal_handler(signal.SIGTERM, cancel, pending) + except NotImplementedError: + # There are no good alternatives for these on Windows. + pass + while pending: + done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + for task in done: + src = tasks.pop(task) + if task.cancelled(): + cancelled.append(task) + elif task.exception(): + report.failed(src, str(task.exception())) + else: + changed = Changed.YES if task.result() else Changed.NO + # If the file was written back or was successfully checked as + # well-formatted, store this information in the cache. + if write_back is WriteBack.YES or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + sources_to_cache.append(src) + report.done(src, changed) + if cancelled: + await asyncio.gather(*cancelled, return_exceptions=True) + if sources_to_cache: + write_cache(cache, sources_to_cache, mode) diff --git a/src/black/const.py b/src/black/const.py new file mode 100644 index 00000000000..0e13f31517d --- /dev/null +++ b/src/black/const.py @@ -0,0 +1,4 @@ +DEFAULT_LINE_LENGTH = 88 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|\.ipynb_checkpoints|_build|buck-out|build|dist|__pypackages__)/" # noqa: B950 +DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$" +STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" diff --git a/src/black/debug.py b/src/black/debug.py new file mode 100644 index 00000000000..150b44842dd --- /dev/null +++ b/src/black/debug.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass +from typing import Iterator, TypeVar, Union + +from black.nodes import Visitor +from black.output import out +from black.parsing import lib2to3_parse +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node, type_repr + +LN = Union[Leaf, Node] +T = TypeVar("T") + + +@dataclass +class DebugVisitor(Visitor[T]): + tree_depth: int = 0 + + def visit_default(self, node: LN) -> Iterator[T]: + indent = " " * (2 * self.tree_depth) + if isinstance(node, Node): + _type = type_repr(node.type) + out(f"{indent}{_type}", fg="yellow") + self.tree_depth += 1 + for child in node.children: + yield from self.visit(child) + + self.tree_depth -= 1 + out(f"{indent}/{_type}", fg="yellow", bold=False) + else: + _type = token.tok_name.get(node.type, str(node.type)) + out(f"{indent}{_type}", fg="blue", nl=False) + if node.prefix: + # We don't have to handle prefixes for `Node` objects since + # that delegates to the first child anyway. + out(f" {node.prefix!r}", fg="green", bold=False, nl=False) + out(f" {node.value!r}", fg="blue", bold=False) + + @classmethod + def show(cls, code: Union[str, Leaf, Node]) -> None: + """Pretty-print the lib2to3 AST of a given string of `code`. + + Convenience method for debugging. + """ + v: DebugVisitor[None] = DebugVisitor() + if isinstance(code, str): + code = lib2to3_parse(code) + list(v.visit(code)) diff --git a/src/black/files.py b/src/black/files.py new file mode 100644 index 00000000000..ed503f5fec7 --- /dev/null +++ b/src/black/files.py @@ -0,0 +1,287 @@ +import io +import os +import sys +from functools import lru_cache +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Sequence, + Tuple, + Union, +) + +from mypy_extensions import mypyc_attr +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError + +if sys.version_info >= (3, 11): + try: + import tomllib + except ImportError: + # Help users on older alphas + if not TYPE_CHECKING: + import tomli as tomllib +else: + import tomli as tomllib + +from black.handle_ipynb_magics import jupyter_dependencies_are_installed +from black.output import err +from black.report import Report + +if TYPE_CHECKING: + import colorama # noqa: F401 + + +@lru_cache() +def find_project_root( + srcs: Sequence[str], stdin_filename: Optional[str] = None +) -> Tuple[Path, str]: + """Return a directory containing .git, .hg, or pyproject.toml. + + That directory will be a common parent of all files and directories + passed in `srcs`. + + If no directory in the tree contains a marker that would specify it's the + project root, the root of the file system is returned. + + Returns a two-tuple with the first element as the project root path and + the second element as a string describing the method by which the + project root was discovered. + """ + if stdin_filename is not None: + srcs = tuple(stdin_filename if s == "-" else s for s in srcs) + if not srcs: + srcs = [str(Path.cwd().resolve())] + + path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): + if (directory / ".git").exists(): + return directory, ".git directory" + + if (directory / ".hg").is_dir(): + return directory, ".hg directory" + + if (directory / "pyproject.toml").is_file(): + return directory, "pyproject.toml" + + return directory, "file system root" + + +def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]: + """Find the absolute filepath to a pyproject.toml if it exists""" + path_project_root, _ = find_project_root(path_search_start) + path_pyproject_toml = path_project_root / "pyproject.toml" + if path_pyproject_toml.is_file(): + return str(path_pyproject_toml) + + try: + path_user_pyproject_toml = find_user_pyproject_toml() + return ( + str(path_user_pyproject_toml) + if path_user_pyproject_toml.is_file() + else None + ) + except (PermissionError, RuntimeError) as e: + # We do not have access to the user-level config directory, so ignore it. + err(f"Ignoring user configuration directory due to {e!r}") + return None + + +@mypyc_attr(patchable=True) +def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: + """Parse a pyproject toml file, pulling out relevant parts for Black + + If parsing fails, will raise a tomllib.TOMLDecodeError + """ + with open(path_config, "rb") as f: + pyproject_toml = tomllib.load(f) + config = pyproject_toml.get("tool", {}).get("black", {}) + return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} + + +@lru_cache() +def find_user_pyproject_toml() -> Path: + r"""Return the path to the top-level user configuration for black. + + This looks for ~\.black on Windows and ~/.config/black on Linux and other + Unix systems. + + May raise: + - RuntimeError: if the current user has no homedir + - PermissionError: if the current process cannot access the user's homedir + """ + if sys.platform == "win32": + # Windows + user_config_path = Path.home() / ".black" + else: + config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config") + user_config_path = Path(config_root).expanduser() / "black" + return user_config_path.resolve() + + +@lru_cache() +def get_gitignore(root: Path) -> PathSpec: + """Return a PathSpec matching gitignore content if present.""" + gitignore = root / ".gitignore" + lines: List[str] = [] + if gitignore.is_file(): + with gitignore.open(encoding="utf-8") as gf: + lines = gf.readlines() + try: + return PathSpec.from_lines("gitwildmatch", lines) + except GitWildMatchPatternError as e: + err(f"Could not parse {gitignore}: {e}") + raise + + +def normalize_path_maybe_ignore( + path: Path, + root: Path, + report: Optional[Report] = None, +) -> Optional[str]: + """Normalize `path`. May return `None` if `path` was ignored. + + `report` is where "path ignored" output goes. + """ + try: + abspath = path if path.is_absolute() else Path.cwd() / path + normalized_path = abspath.resolve() + try: + root_relative_path = normalized_path.relative_to(root).as_posix() + except ValueError: + if report: + report.path_ignored( + path, f"is a symbolic link that points outside {root}" + ) + return None + + except OSError as e: + if report: + report.path_ignored(path, f"cannot be read because {e}") + return None + + return root_relative_path + + +def path_is_excluded( + normalized_path: str, + pattern: Optional[Pattern[str]], +) -> bool: + match = pattern.search(normalized_path) if pattern else None + return bool(match and match.group(0)) + + +def gen_python_files( + paths: Iterable[Path], + root: Path, + include: Pattern[str], + exclude: Pattern[str], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + report: Report, + gitignore: Optional[PathSpec], + *, + verbose: bool, + quiet: bool, +) -> Iterator[Path]: + """Generate all files under `path` whose paths are not excluded by the + `exclude_regex`, `extend_exclude`, or `force_exclude` regexes, + but are included by the `include` regex. + + Symbolic links pointing outside of the `root` directory are ignored. + + `report` is where output about exclusions goes. + """ + assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" + for child in paths: + normalized_path = normalize_path_maybe_ignore(child, root, report) + if normalized_path is None: + continue + + # First ignore files matching .gitignore, if passed + if gitignore is not None and gitignore.match_file(normalized_path): + report.path_ignored(child, "matches the .gitignore file content") + continue + + # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. + normalized_path = "/" + normalized_path + if child.is_dir(): + normalized_path += "/" + + if path_is_excluded(normalized_path, exclude): + report.path_ignored(child, "matches the --exclude regular expression") + continue + + if path_is_excluded(normalized_path, extend_exclude): + report.path_ignored( + child, "matches the --extend-exclude regular expression" + ) + continue + + if path_is_excluded(normalized_path, force_exclude): + report.path_ignored(child, "matches the --force-exclude regular expression") + continue + + if child.is_dir(): + # If gitignore is None, gitignore usage is disabled, while a Falsey + # gitignore is when the directory doesn't have a .gitignore file. + yield from gen_python_files( + child.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore + get_gitignore(child) if gitignore is not None else None, + verbose=verbose, + quiet=quiet, + ) + + elif child.is_file(): + if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + include_match = include.search(normalized_path) if include else True + if include_match: + yield child + + +def wrap_stream_for_windows( + f: io.TextIOWrapper, +) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]: + """ + Wrap stream with colorama's wrap_stream so colors are shown on Windows. + + If `colorama` is unavailable, the original stream is returned unmodified. + Otherwise, the `wrap_stream()` function determines whether the stream needs + to be wrapped for a Windows environment and will accordingly either return + an `AnsiToWin32` wrapper or the original stream. + """ + try: + from colorama.initialise import wrap_stream + except ImportError: + return f + else: + # Set `strip=False` to avoid needing to modify test_express_diff_with_color. + return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) diff --git a/src/black/handle_ipynb_magics.py b/src/black/handle_ipynb_magics.py new file mode 100644 index 00000000000..693f1a68bd4 --- /dev/null +++ b/src/black/handle_ipynb_magics.py @@ -0,0 +1,459 @@ +"""Functions to process IPython magics with.""" + +import ast +import collections +import dataclasses +import secrets +import sys +from functools import lru_cache +from typing import Dict, List, Optional, Tuple + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from black.output import out +from black.report import NothingChanged + +TRANSFORMED_MAGICS = frozenset( + ( + "get_ipython().run_cell_magic", + "get_ipython().system", + "get_ipython().getoutput", + "get_ipython().run_line_magic", + ) +) +TOKENS_TO_IGNORE = frozenset( + ( + "ENDMARKER", + "NL", + "NEWLINE", + "COMMENT", + "DEDENT", + "UNIMPORTANT_WS", + "ESCAPED_NL", + ) +) +PYTHON_CELL_MAGICS = frozenset( + ( + "capture", + "prun", + "pypy", + "python", + "python3", + "time", + "timeit", + ) +) +TOKEN_HEX = secrets.token_hex + + +@dataclasses.dataclass(frozen=True) +class Replacement: + mask: str + src: str + + +@lru_cache() +def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: + try: + import IPython # noqa:F401 + import tokenize_rt # noqa:F401 + except ModuleNotFoundError: + if verbose or not quiet: + msg = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``" + ) + out(msg) + return False + else: + return True + + +def remove_trailing_semicolon(src: str) -> Tuple[str, bool]: + """Remove trailing semicolon from Jupyter notebook cell. + + For example, + + fig, ax = plt.subplots() + ax.plot(x_data, y_data); # plot data + + would become + + fig, ax = plt.subplots() + ax.plot(x_data, y_data) # plot data + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + trailing_semicolon = False + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + if token.name == "OP" and token.src == ";": + del tokens[idx] + trailing_semicolon = True + break + if not trailing_semicolon: + return src, False + return tokens_to_src(tokens), True + + +def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str: + """Put trailing semicolon back if cell originally had it. + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + if not has_trailing_semicolon: + return src + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + tokens[idx] = token._replace(src=token.src + ";") + break + else: # pragma: nocover + raise AssertionError( + "INTERNAL ERROR: Was not able to reinstate trailing semicolon. " + "Please report a bug on https://github.com/psf/black/issues. " + ) from None + return str(tokens_to_src(tokens)) + + +def mask_cell(src: str) -> Tuple[str, List[Replacement]]: + """Mask IPython magics so content becomes parseable Python code. + + For example, + + %matplotlib inline + 'foo' + + becomes + + "25716f358c32750e" + 'foo' + + The replacements are returned, along with the transformed code. + """ + replacements: List[Replacement] = [] + try: + ast.parse(src) + except SyntaxError: + # Might have IPython magics, will process below. + pass + else: + # Syntax is fine, nothing to mask, early return. + return src, replacements + + from IPython.core.inputtransformer2 import TransformerManager + + transformer_manager = TransformerManager() + transformed = transformer_manager.transform_cell(src) + transformed, cell_magic_replacements = replace_cell_magics(transformed) + replacements += cell_magic_replacements + transformed = transformer_manager.transform_cell(transformed) + transformed, magic_replacements = replace_magics(transformed) + if len(transformed.splitlines()) != len(src.splitlines()): + # Multi-line magic, not supported. + raise NothingChanged + replacements += magic_replacements + return transformed, replacements + + +def get_token(src: str, magic: str) -> str: + """Return randomly generated token to mask IPython magic with. + + For example, if 'magic' was `%matplotlib inline`, then a possible + token to mask it with would be `"43fdd17f7e5ddc83"`. The token + will be the same length as the magic, and we make sure that it was + not already present anywhere else in the cell. + """ + assert magic + nbytes = max(len(magic) // 2 - 1, 1) + token = TOKEN_HEX(nbytes) + counter = 0 + while token in src: + token = TOKEN_HEX(nbytes) + counter += 1 + if counter > 100: + raise AssertionError( + "INTERNAL ERROR: Black was not able to replace IPython magic. " + "Please report a bug on https://github.com/psf/black/issues. " + f"The magic might be helpful: {magic}" + ) from None + if len(token) + 2 < len(magic): + token = f"{token}." + return f'"{token}"' + + +def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace cell magic with token. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, + + get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n') + + becomes + + "a794." + ls =!ls + + The replacement, along with the transformed code, is returned. + """ + replacements: List[Replacement] = [] + + tree = ast.parse(src) + + cell_magic_finder = CellMagicFinder() + cell_magic_finder.visit(tree) + if cell_magic_finder.cell_magic is None: + return src, replacements + header = cell_magic_finder.cell_magic.header + mask = get_token(src, header) + replacements.append(Replacement(mask=mask, src=header)) + return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements + + +def replace_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace magics within body of cell. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, this + + get_ipython().run_line_magic('matplotlib', 'inline') + 'foo' + + becomes + + "5e67db56d490fd39" + 'foo' + + The replacement, along with the transformed code, are returned. + """ + replacements = [] + magic_finder = MagicFinder() + magic_finder.visit(ast.parse(src)) + new_srcs = [] + for i, line in enumerate(src.splitlines(), start=1): + if i in magic_finder.magics: + offsets_and_magics = magic_finder.magics[i] + if len(offsets_and_magics) != 1: # pragma: nocover + raise AssertionError( + f"Expecting one magic per line, got: {offsets_and_magics}\n" + "Please report a bug on https://github.com/psf/black/issues." + ) + col_offset, magic = ( + offsets_and_magics[0].col_offset, + offsets_and_magics[0].magic, + ) + mask = get_token(src, magic) + replacements.append(Replacement(mask=mask, src=magic)) + line = line[:col_offset] + mask + new_srcs.append(line) + return "\n".join(new_srcs), replacements + + +def unmask_cell(src: str, replacements: List[Replacement]) -> str: + """Remove replacements from cell. + + For example + + "9b20" + foo = bar + + becomes + + %%time + foo = bar + """ + for replacement in replacements: + src = src.replace(replacement.mask, replacement.src) + return src + + +def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: + """Check if attribute is IPython magic. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + """ + return ( + isinstance(node, ast.Attribute) + and isinstance(node.value, ast.Call) + and isinstance(node.value.func, ast.Name) + and node.value.func.id == "get_ipython" + ) + + +def _get_str_args(args: List[ast.expr]) -> List[str]: + str_args = [] + for arg in args: + assert isinstance(arg, ast.Str) + str_args.append(arg.s) + return str_args + + +@dataclasses.dataclass(frozen=True) +class CellMagic: + name: str + params: Optional[str] + body: str + + @property + def header(self) -> str: + if self.params: + return f"%%{self.name} {self.params}" + return f"%%{self.name}" + + +# ast.NodeVisitor + dataclass = breakage under mypyc. +class CellMagicFinder(ast.NodeVisitor): + """Find cell magics. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %%time\nfoo() + + would have been transformed to + + get_ipython().run_cell_magic('time', '', 'foo()\\n') + + and we look for instances of the latter. + """ + + def __init__(self, cell_magic: Optional[CellMagic] = None) -> None: + self.cell_magic = cell_magic + + def visit_Expr(self, node: ast.Expr) -> None: + """Find cell magic, extract header and body.""" + if ( + isinstance(node.value, ast.Call) + and _is_ipython_magic(node.value.func) + and node.value.func.attr == "run_cell_magic" + ): + args = _get_str_args(node.value.args) + self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2]) + self.generic_visit(node) + + +@dataclasses.dataclass(frozen=True) +class OffsetAndMagic: + col_offset: int + magic: str + + +# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here +# as mypyc will generate broken code. +class MagicFinder(ast.NodeVisitor): + """Visit cell to look for get_ipython calls. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %matplotlib inline + + would have been transformed to + + get_ipython().run_line_magic('matplotlib', 'inline') + + and we look for instances of the latter (and likewise for other + types of magics). + """ + + def __init__(self) -> None: + self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list) + + def visit_Assign(self, node: ast.Assign) -> None: + """Look for system assign magics. + + For example, + + black_version = !black --version + env = %env var + + would have been (respectively) transformed to + + black_version = get_ipython().getoutput('black --version') + env = get_ipython().run_line_magic('env', 'var') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "getoutput": + src = f"!{args[0]}" + elif node.value.func.attr == "run_line_magic": + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + else: + raise AssertionError( + f"Unexpected IPython magic {node.value.func.attr!r} found. " + "Please report a bug on https://github.com/psf/black/issues." + ) from None + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) + + def visit_Expr(self, node: ast.Expr) -> None: + """Look for magics in body of cell. + + For examples, + + !ls + !!ls + ?ls + ??ls + + would (respectively) get transformed to + + get_ipython().system('ls') + get_ipython().getoutput('ls') + get_ipython().run_line_magic('pinfo', 'ls') + get_ipython().run_line_magic('pinfo2', 'ls') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "run_line_magic": + if args[0] == "pinfo": + src = f"?{args[1]}" + elif args[0] == "pinfo2": + src = f"??{args[1]}" + else: + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + elif node.value.func.attr == "system": + src = f"!{args[0]}" + elif node.value.func.attr == "getoutput": + src = f"!!{args[0]}" + else: + raise NothingChanged # unsupported magic. + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) diff --git a/src/black/linegen.py b/src/black/linegen.py new file mode 100644 index 00000000000..a2e41bf5912 --- /dev/null +++ b/src/black/linegen.py @@ -0,0 +1,1294 @@ +""" +Generating lines of code. +""" +import sys +from functools import partial, wraps +from typing import Collection, Iterator, List, Optional, Set, Union, cast + +from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, max_delimiter_priority_in_atom +from black.comments import FMT_OFF, generate_comments, list_comments +from black.lines import ( + Line, + append_leaves, + can_be_split, + can_omit_invisible_parens, + is_line_short_enough, + line_to_string, +) +from black.mode import Feature, Mode, Preview +from black.nodes import ( + ASSIGNMENTS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + RARROW, + STANDALONE_COMMENT, + STATEMENT, + WHITESPACE, + Visitor, + ensure_visible, + is_arith_like, + is_atom_with_invisible_parens, + is_docstring, + is_empty_tuple, + is_lpar_token, + is_multiline_string, + is_name_token, + is_one_sequence_between, + is_one_tuple, + is_rpar_token, + is_stub_body, + is_stub_suite, + is_vararg, + is_walrus_assignment, + is_yield, + syms, + wrap_in_parentheses, +) +from black.numerics import normalize_numeric_literal +from black.strings import ( + fix_docstring, + get_string_prefix, + normalize_string_prefix, + normalize_string_quotes, +) +from black.trans import ( + CannotTransform, + StringMerger, + StringParenStripper, + StringParenWrapper, + StringSplitter, + Transformer, + hug_power_op, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LeafID = int +LN = Union[Leaf, Node] + + +class CannotSplit(CannotTransform): + """A readable split that fits the allotted line length is impossible.""" + + +# This isn't a dataclass because @dataclass + Generic breaks mypyc. +# See also https://github.com/mypyc/mypyc/issues/827. +class LineGenerator(Visitor[Line]): + """Generates reformatted Line objects. Empty lines are not emitted. + + Note: destroys the tree it's visiting by mutating prefixes of its leaves + in ways that will no longer stringify to valid Python code on the tree. + """ + + def __init__(self, mode: Mode) -> None: + self.mode = mode + self.current_line: Line + self.__post_init__() + + def line(self, indent: int = 0) -> Iterator[Line]: + """Generate a line. + + If the line is empty, only emit if it makes sense. + If the line is too long, split it first and then generate. + + If any lines were generated, set up a new current_line. + """ + if not self.current_line: + self.current_line.depth += indent + return # Line is empty, don't emit. Creating a new one unnecessary. + + complete_line = self.current_line + self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) + yield complete_line + + def visit_default(self, node: LN) -> Iterator[Line]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Leaf): + any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() + for comment in generate_comments(node, preview=self.mode.preview): + if any_open_brackets: + # any comment within brackets is subject to splitting + self.current_line.append(comment) + elif comment.type == token.COMMENT: + # regular trailing comment + self.current_line.append(comment) + yield from self.line() + + else: + # regular standalone comment + yield from self.line() + + self.current_line.append(comment) + yield from self.line() + + normalize_prefix(node, inside_brackets=any_open_brackets) + if self.mode.string_normalization and node.type == token.STRING: + node.value = normalize_string_prefix(node.value) + node.value = normalize_string_quotes(node.value) + if node.type == token.NUMBER: + normalize_numeric_literal(node) + if node.type not in WHITESPACE: + self.current_line.append(node) + yield from super().visit_default(node) + + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: + """Increase indentation level, maybe yield a line.""" + # In blib2to3 INDENT never holds comments. + yield from self.line(+1) + yield from self.visit_default(node) + + def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: + """Decrease indentation level, maybe yield a line.""" + # The current line might still wait for trailing comments. At DEDENT time + # there won't be any (they would be prefixes on the preceding NEWLINE). + # Emit the line then. + yield from self.line() + + # While DEDENT has no value, its prefix may contain standalone comments + # that belong to the current indentation level. Get 'em. + yield from self.visit_default(node) + + # Finally, emit the dedent. + yield from self.line(-1) + + def visit_stmt( + self, node: Node, keywords: Set[str], parens: Set[str] + ) -> Iterator[Line]: + """Visit a statement. + + This implementation is shared for `if`, `while`, `for`, `try`, `except`, + `def`, `with`, `class`, `assert`, and assignments. + + The relevant Python language `keywords` for a given statement will be + NAME leaves within it. This methods puts those on a separate line. + + `parens` holds a set of string leaf values immediately after which + invisible parens should be put. + """ + normalize_invisible_parens(node, parens_after=parens, preview=self.mode.preview) + for child in node.children: + if is_name_token(child) and child.value in keywords: + yield from self.line() + + yield from self.visit(child) + + def visit_funcdef(self, node: Node) -> Iterator[Line]: + """Visit function definition.""" + if Preview.annotation_parens not in self.mode: + yield from self.visit_stmt(node, keywords={"def"}, parens=set()) + else: + yield from self.line() + + # Remove redundant brackets around return type annotation. + is_return_annotation = False + for child in node.children: + if child.type == token.RARROW: + is_return_annotation = True + elif is_return_annotation: + if child.type == syms.atom and child.children[0].type == token.LPAR: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=False, + ): + wrap_in_parentheses(node, child, visible=False) + else: + wrap_in_parentheses(node, child, visible=False) + is_return_annotation = False + + for child in node.children: + yield from self.visit(child) + + def visit_match_case(self, node: Node) -> Iterator[Line]: + """Visit either a match or case statement.""" + normalize_invisible_parens(node, parens_after=set(), preview=self.mode.preview) + + yield from self.line() + for child in node.children: + yield from self.visit(child) + + def visit_suite(self, node: Node) -> Iterator[Line]: + """Visit a suite.""" + if self.mode.is_pyi and is_stub_suite(node): + yield from self.visit(node.children[2]) + else: + yield from self.visit_default(node) + + def visit_simple_stmt(self, node: Node) -> Iterator[Line]: + """Visit a statement without nested statements.""" + prev_type: Optional[int] = None + for child in node.children: + if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child): + wrap_in_parentheses(node, child, visible=False) + prev_type = child.type + + is_suite_like = node.parent and node.parent.type in STATEMENT + if is_suite_like: + if self.mode.is_pyi and is_stub_body(node): + yield from self.visit_default(node) + else: + yield from self.line(+1) + yield from self.visit_default(node) + yield from self.line(-1) + + else: + if ( + not self.mode.is_pyi + or not node.parent + or not is_stub_suite(node.parent) + ): + yield from self.line() + yield from self.visit_default(node) + + def visit_async_stmt(self, node: Node) -> Iterator[Line]: + """Visit `async def`, `async for`, `async with`.""" + yield from self.line() + + children = iter(node.children) + for child in children: + yield from self.visit(child) + + if child.type == token.ASYNC or child.type == STANDALONE_COMMENT: + # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async + # line. + break + + internal_stmt = next(children) + for child in internal_stmt.children: + yield from self.visit(child) + + def visit_decorators(self, node: Node) -> Iterator[Line]: + """Visit decorators.""" + for child in node.children: + yield from self.line() + yield from self.visit(child) + + def visit_power(self, node: Node) -> Iterator[Line]: + for idx, leaf in enumerate(node.children[:-1]): + next_leaf = node.children[idx + 1] + + if not isinstance(leaf, Leaf): + continue + + value = leaf.value.lower() + if ( + leaf.type == token.NUMBER + and next_leaf.type == syms.trailer + # Ensure that we are in an attribute trailer + and next_leaf.children[0].type == token.DOT + # It shouldn't wrap hexadecimal, binary and octal literals + and not value.startswith(("0x", "0b", "0o")) + # It shouldn't wrap complex literals + and "j" not in value + ): + wrap_in_parentheses(node, leaf) + + if Preview.remove_redundant_parens in self.mode: + remove_await_parens(node) + + yield from self.visit_default(node) + + def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: + """Remove a semicolon and put the other statement on a separate line.""" + yield from self.line() + + def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: + """End of file. Process outstanding comments and end with a newline.""" + yield from self.visit_default(leaf) + yield from self.line() + + def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: + if not self.current_line.bracket_tracker.any_open_brackets(): + yield from self.line() + yield from self.visit_default(leaf) + + def visit_factor(self, node: Node) -> Iterator[Line]: + """Force parentheses between a unary op and a binary power: + + -2 ** 8 -> -(2 ** 8) + """ + _operator, operand = node.children + if ( + operand.type == syms.power + and len(operand.children) == 3 + and operand.children[1].type == token.DOUBLESTAR + ): + lpar = Leaf(token.LPAR, "(") + rpar = Leaf(token.RPAR, ")") + index = operand.remove() or 0 + node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) + yield from self.visit_default(node) + + def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + if is_docstring(leaf) and "\\\n" not in leaf.value: + # We're ignoring docstrings with backslash newline escapes because changing + # indentation of those changes the AST representation of the code. + if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode: + # There was a bug where --skip-string-normalization wouldn't stop us + # from normalizing docstring prefixes. To maintain stability, we can + # only address this buggy behaviour while the preview style is enabled. + if self.mode.string_normalization: + docstring = normalize_string_prefix(leaf.value) + # visit_default() does handle string normalization for us, but + # since this method acts differently depending on quote style (ex. + # see padding logic below), there's a possibility for unstable + # formatting as visit_default() is called *after*. To avoid a + # situation where this function formats a docstring differently on + # the second pass, normalize it early. + docstring = normalize_string_quotes(docstring) + else: + docstring = leaf.value + else: + # ... otherwise, we'll keep the buggy behaviour >.< + docstring = normalize_string_prefix(leaf.value) + prefix = get_string_prefix(docstring) + docstring = docstring[len(prefix) :] # Remove the prefix + quote_char = docstring[0] + # A natural way to remove the outer quotes is to do: + # docstring = docstring.strip(quote_char) + # but that breaks on """""x""" (which is '""x'). + # So we actually need to remove the first character and the next two + # characters but only if they are the same as the first. + quote_len = 1 if docstring[1] != quote_char else 3 + docstring = docstring[quote_len:-quote_len] + docstring_started_empty = not docstring + indent = " " * 4 * self.current_line.depth + + if is_multiline_string(leaf): + docstring = fix_docstring(docstring, indent) + else: + docstring = docstring.strip() + + if docstring: + # Add some padding if the docstring starts / ends with a quote mark. + if docstring[0] == quote_char: + docstring = " " + docstring + if docstring[-1] == quote_char: + docstring += " " + if docstring[-1] == "\\": + backslash_count = len(docstring) - len(docstring.rstrip("\\")) + if backslash_count % 2: + # Odd number of tailing backslashes, add some padding to + # avoid escaping the closing string quote. + docstring += " " + elif not docstring_started_empty: + docstring = " " + + # We could enforce triple quotes at this point. + quote = quote_char * quote_len + + # It's invalid to put closing single-character quotes on a new line. + if Preview.long_docstring_quotes_on_newline in self.mode and quote_len == 3: + # We need to find the length of the last line of the docstring + # to find if we can add the closing quotes to the line without + # exceeding the maximum line length. + # If docstring is one line, then we need to add the length + # of the indent, prefix, and starting quotes. Ending quotes are + # handled later. + lines = docstring.splitlines() + last_line_length = len(lines[-1]) if docstring else 0 + + if len(lines) == 1: + last_line_length += len(indent) + len(prefix) + quote_len + + # If adding closing quotes would cause the last line to exceed + # the maximum line length then put a line break before the + # closing quotes + if last_line_length + quote_len > self.mode.line_length: + leaf.value = prefix + quote + docstring + "\n" + indent + quote + else: + leaf.value = prefix + quote + docstring + quote + else: + leaf.value = prefix + quote + docstring + quote + + yield from self.visit_default(leaf) + + def __post_init__(self) -> None: + """You are in a twisty little maze of passages.""" + self.current_line = Line(mode=self.mode) + + v = self.visit_stmt + Ø: Set[str] = set() + self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) + self.visit_if_stmt = partial( + v, keywords={"if", "else", "elif"}, parens={"if", "elif"} + ) + self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) + self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) + self.visit_try_stmt = partial( + v, keywords={"try", "except", "else", "finally"}, parens=Ø + ) + if self.mode.preview: + self.visit_except_clause = partial( + v, keywords={"except"}, parens={"except"} + ) + self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) + else: + self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) + self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) + self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) + self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) + self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) + self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) + self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) + self.visit_async_funcdef = self.visit_async_stmt + self.visit_decorated = self.visit_decorators + + # PEP 634 + self.visit_match_stmt = self.visit_match_case + self.visit_case_block = self.visit_match_case + + +def transform_line( + line: Line, mode: Mode, features: Collection[Feature] = () +) -> Iterator[Line]: + """Transform a `line`, potentially splitting it into many lines. + + They should fit in the allotted `line_length` but might not be able to. + + `features` are syntactical features that may be used in the output. + """ + if line.is_comment: + yield line + return + + line_str = line_to_string(line) + + ll = mode.line_length + sn = mode.string_normalization + string_merge = StringMerger(ll, sn) + string_paren_strip = StringParenStripper(ll, sn) + string_split = StringSplitter(ll, sn) + string_paren_wrap = StringParenWrapper(ll, sn) + + transformers: List[Transformer] + if ( + not line.contains_uncollapsable_type_comments() + and not line.should_split_rhs + and not line.magic_trailing_comma + and ( + is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) + or line.contains_unsplittable_type_ignore() + ) + and not (line.inside_brackets and line.contains_standalone_comments()) + ): + # Only apply basic string preprocessing, since lines shouldn't be split here. + if Preview.string_processing in mode: + transformers = [string_merge, string_paren_strip] + else: + transformers = [] + elif line.is_def: + transformers = [left_hand_split] + else: + + def _rhs( + self: object, line: Line, features: Collection[Feature] + ) -> Iterator[Line]: + """Wraps calls to `right_hand_split`. + + The calls increasingly `omit` right-hand trailers (bracket pairs with + content), meaning the trailers get glued together to split on another + bracket pair instead. + """ + for omit in generate_trailers_to_omit(line, mode.line_length): + lines = list( + right_hand_split(line, mode.line_length, features, omit=omit) + ) + # Note: this check is only able to figure out if the first line of the + # *current* transformation fits in the line length. This is true only + # for simple cases. All others require running more transforms via + # `transform_line()`. This check doesn't know if those would succeed. + if is_line_short_enough(lines[0], line_length=mode.line_length): + yield from lines + return + + # All splits failed, best effort split with no omits. + # This mostly happens to multiline strings that are by definition + # reported as not fitting a single line, as well as lines that contain + # trailing commas (those have to be exploded). + yield from right_hand_split( + line, line_length=mode.line_length, features=features + ) + + # HACK: nested functions (like _rhs) compiled by mypyc don't retain their + # __name__ attribute which is needed in `run_transformer` further down. + # Unfortunately a nested class breaks mypyc too. So a class must be created + # via type ... https://github.com/mypyc/mypyc/issues/884 + rhs = type("rhs", (), {"__call__": _rhs})() + + if Preview.string_processing in mode: + if line.inside_brackets: + transformers = [ + string_merge, + string_paren_strip, + string_split, + delimiter_split, + standalone_comment_split, + string_paren_wrap, + rhs, + ] + else: + transformers = [ + string_merge, + string_paren_strip, + string_split, + string_paren_wrap, + rhs, + ] + else: + if line.inside_brackets: + transformers = [delimiter_split, standalone_comment_split, rhs] + else: + transformers = [rhs] + # It's always safe to attempt hugging of power operations and pretty much every line + # could match. + transformers.append(hug_power_op) + + for transform in transformers: + # We are accumulating lines in `result` because we might want to abort + # mission and return the original line in the end, or attempt a different + # split altogether. + try: + result = run_transformer(line, transform, mode, features, line_str=line_str) + except CannotTransform: + continue + else: + yield from result + break + + else: + yield line + + +def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: + """Split line into many lines, starting with the first matching bracket pair. + + Note: this usually looks weird, only use this for function definitions. + Prefer RHS otherwise. This is why this function is not symmetrical with + :func:`right_hand_split` which also handles optional parentheses. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = head_leaves + matching_bracket: Optional[Leaf] = None + for leaf in line.leaves: + if ( + current_leaves is body_leaves + and leaf.type in CLOSING_BRACKETS + and leaf.opening_bracket is matching_bracket + and isinstance(matching_bracket, Leaf) + ): + ensure_visible(leaf) + ensure_visible(matching_bracket) + current_leaves = tail_leaves if body_leaves else head_leaves + current_leaves.append(leaf) + if current_leaves is head_leaves: + if leaf.type in OPENING_BRACKETS: + matching_bracket = leaf + current_leaves = body_leaves + if not matching_bracket: + raise CannotSplit("No brackets found") + + head = bracket_split_build_line(head_leaves, line, matching_bracket) + body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) + tail = bracket_split_build_line(tail_leaves, line, matching_bracket) + bracket_split_succeeded_or_raise(head, body, tail) + for result in (head, body, tail): + if result: + yield result + + +def right_hand_split( + line: Line, + line_length: int, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + """Split line into many lines, starting with the last matching bracket pair. + + If the split was by optional parentheses, attempt splitting without them, too. + `omit` is a collection of closing bracket IDs that shouldn't be considered for + this split. + + Note: running this function modifies `bracket_depth` on the leaves of `line`. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = tail_leaves + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + for leaf in reversed(line.leaves): + if current_leaves is body_leaves: + if leaf is opening_bracket: + current_leaves = head_leaves if body_leaves else tail_leaves + current_leaves.append(leaf) + if current_leaves is tail_leaves: + if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + current_leaves = body_leaves + if not (opening_bracket and closing_bracket and head_leaves): + # If there is no opening or closing_bracket that means the split failed and + # all content is in the tail. Otherwise, if `head_leaves` are empty, it means + # the matching `opening_bracket` wasn't available on `line` anymore. + raise CannotSplit("No brackets found") + + tail_leaves.reverse() + body_leaves.reverse() + head_leaves.reverse() + head = bracket_split_build_line(head_leaves, line, opening_bracket) + body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) + tail = bracket_split_build_line(tail_leaves, line, opening_bracket) + bracket_split_succeeded_or_raise(head, body, tail) + if ( + Feature.FORCE_OPTIONAL_PARENTHESES not in features + # the opening bracket is an optional paren + and opening_bracket.type == token.LPAR + and not opening_bracket.value + # the closing bracket is an optional paren + and closing_bracket.type == token.RPAR + and not closing_bracket.value + # it's not an import (optional parens are the only thing we can split on + # in this case; attempting a split without them is a waste of time) + and not line.is_import + # there are no standalone comments in the body + and not body.contains_standalone_comments(0) + # and we can actually remove the parens + and can_omit_invisible_parens(body, line_length) + ): + omit = {id(closing_bracket), *omit} + try: + yield from right_hand_split(line, line_length, features=features, omit=omit) + return + + except CannotSplit as e: + if not ( + can_be_split(body) + or is_line_short_enough(body, line_length=line_length) + ): + raise CannotSplit( + "Splitting failed, body is still too long and can't be split." + ) from e + + elif head.contains_multiline_strings() or tail.contains_multiline_strings(): + raise CannotSplit( + "The current optional pair of parentheses is bound to fail to" + " satisfy the splitting algorithm because the head or the tail" + " contains multiline strings which by definition never fit one" + " line." + ) from e + + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + for result in (head, body, tail): + if result: + yield result + + +def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: + """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. + + Do nothing otherwise. + + A left- or right-hand split is based on a pair of brackets. Content before + (and including) the opening bracket is left on one line, content inside the + brackets is put on a separate line, and finally content starting with and + following the closing bracket is put on a separate line. + + Those are called `head`, `body`, and `tail`, respectively. If the split + produced the same line (all content in `head`) or ended up with an empty `body` + and the `tail` is just the closing bracket, then it's considered failed. + """ + tail_len = len(str(tail).strip()) + if not body: + if tail_len == 0: + raise CannotSplit("Splitting brackets produced the same line") + + elif tail_len < 3: + raise CannotSplit( + f"Splitting brackets on an empty body to save {tail_len} characters is" + " not worth it" + ) + + +def bracket_split_build_line( + leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False +) -> Line: + """Return a new line with given `leaves` and respective comments from `original`. + + If `is_body` is True, the result line is one-indented inside brackets and as such + has its first leaf's prefix normalized and a trailing comma added when expected. + """ + result = Line(mode=original.mode, depth=original.depth) + if is_body: + result.inside_brackets = True + result.depth += 1 + if leaves: + # Since body is a new indent level, remove spurious leading whitespace. + normalize_prefix(leaves[0], inside_brackets=True) + # Ensure a trailing comma for imports and standalone function arguments, but + # be careful not to add one after any comments or within type annotations. + no_commas = ( + original.is_def + and opening_bracket.value == "(" + and not any(leaf.type == token.COMMA for leaf in leaves) + # In particular, don't add one within a parenthesized return annotation. + # Unfortunately the indicator we're in a return annotation (RARROW) may + # be defined directly in the parent node, the parent of the parent ... + # and so on depending on how complex the return annotation is. + # This isn't perfect and there's some false negatives but they are in + # contexts were a comma is actually fine. + and not any( + node.prev_sibling.type == RARROW + for node in ( + leaves[0].parent, + getattr(leaves[0].parent, "parent", None), + ) + if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf) + ) + ) + + if original.is_import or no_commas: + for i in range(len(leaves) - 1, -1, -1): + if leaves[i].type == STANDALONE_COMMENT: + continue + + if leaves[i].type != token.COMMA: + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) + break + + # Populate the line + for leaf in leaves: + result.append(leaf, preformatted=True) + for comment_after in original.comments_after(leaf): + result.append(comment_after, preformatted=True) + if is_body and should_split_line(result, opening_bracket): + result.should_split_rhs = True + return result + + +def dont_increase_indentation(split_func: Transformer) -> Transformer: + """Normalize prefix of the first leaf in every line returned by `split_func`. + + This is a decorator over relevant split functions. + """ + + @wraps(split_func) + def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + for split_line in split_func(line, features): + normalize_prefix(split_line.leaves[0], inside_brackets=True) + yield split_line + + return split_wrapper + + +@dont_increase_indentation +def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + """Split according to delimiters of the highest priority. + + If the appropriate Features are given, the split will add trailing commas + also in function signatures and calls that contain `*` and `**`. + """ + try: + last_leaf = line.leaves[-1] + except IndexError: + raise CannotSplit("Line empty") from None + + bt = line.bracket_tracker + try: + delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) + except ValueError: + raise CannotSplit("No delimiters found") from None + + if delimiter_priority == DOT_PRIORITY: + if bt.delimiter_count_with_priority(delimiter_priority) == 1: + raise CannotSplit("Splitting a single attribute from its owner looks wrong") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + lowest_depth = sys.maxsize + trailing_comma_safe = True + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + lowest_depth = min(lowest_depth, leaf.bracket_depth) + if leaf.bracket_depth == lowest_depth: + if is_vararg(leaf, within={syms.typedargslist}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features + ) + elif is_vararg(leaf, within={syms.arglist, syms.argument}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features + ) + + leaf_priority = bt.delimiters.get(id(leaf)) + if leaf_priority == delimiter_priority: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + if current_line: + if ( + trailing_comma_safe + and delimiter_priority == COMMA_PRIORITY + and current_line.leaves[-1].type != token.COMMA + and current_line.leaves[-1].type != STANDALONE_COMMENT + ): + new_comma = Leaf(token.COMMA, ",") + current_line.append(new_comma) + yield current_line + + +@dont_increase_indentation +def standalone_comment_split( + line: Line, features: Collection[Feature] = () +) -> Iterator[Line]: + """Split standalone comments from the rest of the line.""" + if not line.contains_standalone_comments(0): + raise CannotSplit("Line does not have any standalone comments") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + if current_line: + yield current_line + + +def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: + """Leave existing extra newlines if not `inside_brackets`. Remove everything + else. + + Note: don't use backslashes for formatting or you'll lose your voting rights. + """ + if not inside_brackets: + spl = leaf.prefix.split("#") + if "\\" not in spl[0]: + nl_count = spl[-1].count("\n") + if len(spl) > 1: + nl_count -= 1 + leaf.prefix = "\n" * nl_count + return + + leaf.prefix = "" + + +def normalize_invisible_parens( + node: Node, parens_after: Set[str], *, preview: bool +) -> None: + """Make existing optional parentheses invisible or create new ones. + + `parens_after` is a set of string leaf values immediately after which parens + should be put. + + Standardizes on visible parentheses for single-element tuples, and keeps + existing visible parentheses for other tuples and generator expressions. + """ + for pc in list_comments(node.prefix, is_endmarker=False, preview=preview): + if pc.value in FMT_OFF: + # This `node` has a prefix with `# fmt: off`, don't mess with parens. + return + check_lpar = False + for index, child in enumerate(list(node.children)): + # Fixes a bug where invisible parens are not properly stripped from + # assignment statements that contain type annotations. + if isinstance(child, Node) and child.type == syms.annassign: + normalize_invisible_parens( + child, parens_after=parens_after, preview=preview + ) + + # Add parentheses around long tuple unpacking in assignments. + if ( + index == 0 + and isinstance(child, Node) + and child.type == syms.testlist_star_expr + ): + check_lpar = True + + if check_lpar: + if ( + preview + and child.type == syms.atom + and node.type == syms.for_stmt + and isinstance(child.prev_sibling, Leaf) + and child.prev_sibling.type == token.NAME + and child.prev_sibling.value == "for" + ): + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, child, visible=False) + elif preview and isinstance(child, Node) and node.type == syms.with_stmt: + remove_with_parens(child, node) + elif child.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + ): + wrap_in_parentheses(node, child, visible=False) + elif is_one_tuple(child): + wrap_in_parentheses(node, child, visible=True) + elif node.type == syms.import_from: + # "import from" nodes store parentheses directly as part of + # the statement + if is_lpar_token(child): + assert is_rpar_token(node.children[-1]) + # make parentheses invisible + child.value = "" + node.children[-1].value = "" + elif child.type != token.STAR: + # insert invisible parentheses + node.insert_child(index, Leaf(token.LPAR, "")) + node.append_child(Leaf(token.RPAR, "")) + break + elif ( + index == 1 + and child.type == token.STAR + and node.type == syms.except_clause + ): + # In except* (PEP 654), the star is actually part of + # of the keyword. So we need to skip the insertion of + # invisible parentheses to work more precisely. + continue + + elif not (isinstance(child, Leaf) and is_multiline_string(child)): + wrap_in_parentheses(node, child, visible=False) + + comma_check = child.type == token.COMMA if preview else False + + check_lpar = isinstance(child, Leaf) and ( + child.value in parens_after or comma_check + ) + + +def remove_await_parens(node: Node) -> None: + if node.children[0].type == token.AWAIT and len(node.children) > 1: + if ( + node.children[1].type == syms.atom + and node.children[1].children[0].type == token.LPAR + ): + if maybe_make_parens_invisible_in_atom( + node.children[1], + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[1], visible=False) + + # Since await is an expression we shouldn't remove + # brackets in cases where this would change + # the AST due to operator precedence. + # Therefore we only aim to remove brackets around + # power nodes that aren't also await expressions themselves. + # https://peps.python.org/pep-0492/#updated-operator-precedence-table + # N.B. We've still removed any redundant nested brackets though :) + opening_bracket = cast(Leaf, node.children[1].children[0]) + closing_bracket = cast(Leaf, node.children[1].children[-1]) + bracket_contents = cast(Node, node.children[1].children[1]) + if bracket_contents.type != syms.power: + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + elif ( + bracket_contents.type == syms.power + and bracket_contents.children[0].type == token.AWAIT + ): + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + # If we are in a nested await then recurse down. + remove_await_parens(bracket_contents) + + +def remove_with_parens(node: Node, parent: Node) -> None: + """Recursively hide optional parens in `with` statements.""" + # Removing all unnecessary parentheses in with statements in one pass is a tad + # complex as different variations of bracketed statements result in pretty + # different parse trees: + # + # with (open("file")) as f: # this is an asexpr_test + # ... + # + # with (open("file") as f): # this is an atom containing an + # ... # asexpr_test + # + # with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA, + # ... # asexpr_test + # + # with (open("file") as f, open("file") as f): # an atom containing a + # ... # testlist_gexp which then + # # contains multiple asexpr_test(s) + if node.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + node, + parent=parent, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(parent, node, visible=False) + if isinstance(node.children[1], Node): + remove_with_parens(node.children[1], node) + elif node.type == syms.testlist_gexp: + for child in node.children: + if isinstance(child, Node): + remove_with_parens(child, node) + elif node.type == syms.asexpr_test and not any( + leaf.type == token.COLONEQUAL for leaf in node.leaves() + ): + if maybe_make_parens_invisible_in_atom( + node.children[0], + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[0], visible=False) + + +def maybe_make_parens_invisible_in_atom( + node: LN, + parent: LN, + remove_brackets_around_comma: bool = False, +) -> bool: + """If it's safe, make the parens in the atom `node` invisible, recursively. + Additionally, remove repeated, adjacent invisible parens from the atom `node` + as they are redundant. + + Returns whether the node should itself be wrapped in invisible parentheses. + """ + if ( + node.type != syms.atom + or is_empty_tuple(node) + or is_one_tuple(node) + or (is_yield(node) and parent.type != syms.expr_stmt) + or ( + # This condition tries to prevent removing non-optional brackets + # around a tuple, however, can be a bit overzealous so we provide + # and option to skip this check for `for` and `with` statements. + not remove_brackets_around_comma + and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY + ) + ): + return False + + if is_walrus_assignment(node): + if parent.type in [ + syms.annassign, + syms.expr_stmt, + syms.assert_stmt, + syms.return_stmt, + # these ones aren't useful to end users, but they do please fuzzers + syms.for_stmt, + syms.del_stmt, + ]: + return False + + first = node.children[0] + last = node.children[-1] + if is_lpar_token(first) and is_rpar_token(last): + middle = node.children[1] + # make parentheses invisible + first.value = "" + last.value = "" + maybe_make_parens_invisible_in_atom( + middle, + parent=parent, + remove_brackets_around_comma=remove_brackets_around_comma, + ) + + if is_atom_with_invisible_parens(middle): + # Strip the invisible parens from `middle` by replacing + # it with the child in-between the invisible parens + middle.replace(middle.children[1]) + + return False + + return True + + +def should_split_line(line: Line, opening_bracket: Leaf) -> bool: + """Should `line` be immediately split with `delimiter_split()` after RHS?""" + + if not (opening_bracket.parent and opening_bracket.value in "[{("): + return False + + # We're essentially checking if the body is delimited by commas and there's more + # than one of them (we're excluding the trailing comma and if the delimiter priority + # is still commas, that means there's more). + exclude = set() + trailing_comma = False + try: + last_leaf = line.leaves[-1] + if last_leaf.type == token.COMMA: + trailing_comma = True + exclude.add(id(last_leaf)) + max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) + except (IndexError, ValueError): + return False + + return max_priority == COMMA_PRIORITY and ( + (line.mode.magic_trailing_comma and trailing_comma) + # always explode imports + or opening_bracket.parent.type in {syms.atom, syms.import_from} + ) + + +def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: + """Generate sets of closing bracket IDs that should be omitted in a RHS. + + Brackets can be omitted if the entire trailer up to and including + a preceding closing bracket fits in one line. + + Yielded sets are cumulative (contain results of previous yields, too). First + set is empty, unless the line should explode, in which case bracket pairs until + the one that needs to explode are omitted. + """ + + omit: Set[LeafID] = set() + if not line.magic_trailing_comma: + yield omit + + length = 4 * line.depth + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + inner_brackets: Set[LeafID] = set() + for index, leaf, leaf_length in line.enumerate_with_length(reversed=True): + length += leaf_length + if length > line_length: + break + + has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) + if leaf.type == STANDALONE_COMMENT or has_inline_comment: + break + + if opening_bracket: + if leaf is opening_bracket: + opening_bracket = None + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between( + leaf.opening_bracket, leaf, line.leaves + ) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + inner_brackets.add(id(leaf)) + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if prev and prev.type in OPENING_BRACKETS: + # Empty brackets would fail a split so treat them as "inner" + # brackets (e.g. only add them to the `omit` set if another + # pair of brackets was good enough. + inner_brackets.add(id(leaf)) + continue + + if closing_bracket: + omit.add(id(closing_bracket)) + omit.update(inner_brackets) + inner_brackets.clear() + yield omit + + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + if leaf.value: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + + +def run_transformer( + line: Line, + transform: Transformer, + mode: Mode, + features: Collection[Feature], + *, + line_str: str = "", +) -> List[Line]: + if not line_str: + line_str = line_to_string(line) + result: List[Line] = [] + for transformed_line in transform(line, features): + if str(transformed_line).strip("\n") == line_str: + raise CannotTransform("Line transformer returned an unchanged result") + + result.extend(transform_line(transformed_line, mode=mode, features=features)) + + if ( + transform.__class__.__name__ != "rhs" + or not line.bracket_tracker.invisible + or any(bracket.value for bracket in line.bracket_tracker.invisible) + or line.contains_multiline_strings() + or result[0].contains_uncollapsable_type_comments() + or result[0].contains_unsplittable_type_ignore() + or is_line_short_enough(result[0], line_length=mode.line_length) + # If any leaves have no parents (which _can_ occur since + # `transform(line)` potentially destroys the line's underlying node + # structure), then we can't proceed. Doing so would cause the below + # call to `append_leaves()` to fail. + or any(leaf.parent is None for leaf in line.leaves) + ): + return result + + line_copy = line.clone() + append_leaves(line_copy, line, line.leaves) + features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES} + second_opinion = run_transformer( + line_copy, transform, mode, features_fop, line_str=line_str + ) + if all( + is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion + ): + result = second_opinion + return result diff --git a/src/black/lines.py b/src/black/lines.py new file mode 100644 index 00000000000..dd11dde7b55 --- /dev/null +++ b/src/black/lines.py @@ -0,0 +1,802 @@ +import itertools +import sys +from dataclasses import dataclass, field +from typing import Callable, Dict, Iterator, List, Optional, Sequence, Tuple, TypeVar, cast + +from black.brackets import DOT_PRIORITY, BracketTracker +from black.mode import Mode, Preview +from black.nodes import ( + BRACKETS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + TEST_DESCENDANTS, + child_towards, + is_import, + is_multiline_string, + is_one_sequence_between, + is_type_comment, + replace_child, + syms, + whitespace, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + + +# types +T = TypeVar("T") +Index = int +LeafID = int + + +@dataclass +class Line: + """Holds leaves and comments. Can be printed with `str(line)`.""" + + mode: Mode + depth: int = 0 + leaves: List[Leaf] = field(default_factory=list) + # keys ordered like `leaves` + comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) + bracket_tracker: BracketTracker = field(default_factory=BracketTracker) + inside_brackets: bool = False + should_split_rhs: bool = False + magic_trailing_comma: Optional[Leaf] = None + + def append(self, leaf: Leaf, preformatted: bool = False) -> None: + """Add a new `leaf` to the end of the line. + + Unless `preformatted` is True, the `leaf` will receive a new consistent + whitespace prefix and metadata applied by :class:`BracketTracker`. + Trailing commas are maybe removed, unpacked for loop variables are + demoted from being delimiters. + + Inline comments are put aside. + """ + has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) + if not has_value: + return + + if token.COLON == leaf.type and self.is_class_paren_empty: + del self.leaves[-2:] + if self.leaves and not preformatted: + # Note: at this point leaf.prefix should be empty except for + # imports, for which we only preserve newlines. + leaf.prefix += whitespace( + leaf, complex_subscript=self.is_complex_subscript(leaf) + ) + if self.inside_brackets or not preformatted: + self.bracket_tracker.mark(leaf) + if self.mode.magic_trailing_comma: + if self.has_magic_trailing_comma(leaf): + self.magic_trailing_comma = leaf + elif self.has_magic_trailing_comma(leaf, ensure_removable=True): + self.remove_trailing_comma() + if not self.append_comment(leaf): + self.leaves.append(leaf) + + def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: + """Like :func:`append()` but disallow invalid standalone comment structure. + + Raises ValueError when any `leaf` is appended after a standalone comment + or when a standalone comment is not the first leaf on the line. + """ + if self.bracket_tracker.depth == 0: + if self.is_comment: + raise ValueError("cannot append to standalone comments") + + if self.leaves and leaf.type == STANDALONE_COMMENT: + raise ValueError( + "cannot append standalone comments to a populated line" + ) + + self.append(leaf, preformatted=preformatted) + + @property + def is_comment(self) -> bool: + """Is this line a standalone comment?""" + return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT + + @property + def is_decorator(self) -> bool: + """Is this line a decorator?""" + return bool(self) and self.leaves[0].type == token.AT + + @property + def is_import(self) -> bool: + """Is this an import line?""" + return bool(self) and is_import(self.leaves[0]) + + @property + def is_class(self) -> bool: + """Is this line a class definition?""" + return ( + bool(self) + and self.leaves[0].type == token.NAME + and self.leaves[0].value == "class" + ) + + @property + def is_stub_class(self) -> bool: + """Is this line a class definition with a body consisting only of "..."?""" + return self.is_class and self.leaves[-3:] == [ + Leaf(token.DOT, ".") for _ in range(3) + ] + + @property + def is_def(self) -> bool: + """Is this a function definition? (Also returns True for async defs.)""" + try: + first_leaf = self.leaves[0] + except IndexError: + return False + + try: + second_leaf: Optional[Leaf] = self.leaves[1] + except IndexError: + second_leaf = None + return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( + first_leaf.type == token.ASYNC + and second_leaf is not None + and second_leaf.type == token.NAME + and second_leaf.value == "def" + ) + + @property + def is_class_paren_empty(self) -> bool: + """Is this a class with no base classes but using parentheses? + + Those are unnecessary and should be removed. + """ + return ( + bool(self) + and len(self.leaves) == 4 + and self.is_class + and self.leaves[2].type == token.LPAR + and self.leaves[2].value == "(" + and self.leaves[3].type == token.RPAR + and self.leaves[3].value == ")" + ) + + @property + def is_triple_quoted_string(self) -> bool: + """Is the line a triple quoted string?""" + return ( + bool(self) + and self.leaves[0].type == token.STRING + and self.leaves[0].value.startswith(('"""', "'''")) + ) + + @property + def opens_block(self) -> bool: + """Does this line open a new level of indentation.""" + if len(self.leaves) == 0: + return False + return self.leaves[-1].type == token.COLON + + def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: + """If so, needs to be split before emitting.""" + for leaf in self.leaves: + if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: + return True + + return False + + def contains_uncollapsable_type_comments(self) -> bool: + ignored_ids = set() + try: + last_leaf = self.leaves[-1] + ignored_ids.add(id(last_leaf)) + if last_leaf.type == token.COMMA or ( + last_leaf.type == token.RPAR and not last_leaf.value + ): + # When trailing commas or optional parens are inserted by Black for + # consistency, comments after the previous last element are not moved + # (they don't have to, rendering will still be correct). So we ignore + # trailing commas and invisible. + last_leaf = self.leaves[-2] + ignored_ids.add(id(last_leaf)) + except IndexError: + return False + + # A type comment is uncollapsable if it is attached to a leaf + # that isn't at the end of the line (since that could cause it + # to get associated to a different argument) or if there are + # comments before it (since that could cause it to get hidden + # behind a comment. + comment_seen = False + for leaf_id, comments in self.comments.items(): + for comment in comments: + if is_type_comment(comment): + if comment_seen or ( + not is_type_comment(comment, " ignore") + and leaf_id not in ignored_ids + ): + return True + + comment_seen = True + + return False + + def contains_unsplittable_type_ignore(self) -> bool: + if not self.leaves: + return False + + # If a 'type: ignore' is attached to the end of a line, we + # can't split the line, because we can't know which of the + # subexpressions the ignore was meant to apply to. + # + # We only want this to apply to actual physical lines from the + # original source, though: we don't want the presence of a + # 'type: ignore' at the end of a multiline expression to + # justify pushing it all onto one line. Thus we + # (unfortunately) need to check the actual source lines and + # only report an unsplittable 'type: ignore' if this line was + # one line in the original code. + + # Grab the first and last line numbers, skipping generated leaves + first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) + last_line = next( + (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 + ) + + if first_line == last_line: + # We look at the last two leaves since a comma or an + # invisible paren could have been added at the end of the + # line. + for node in self.leaves[-2:]: + for comment in self.comments.get(id(node), []): + if is_type_comment(comment, " ignore"): + return True + + return False + + def contains_multiline_strings(self) -> bool: + return any(is_multiline_string(leaf) for leaf in self.leaves) + + def has_magic_trailing_comma( + self, closing: Leaf, ensure_removable: bool = False + ) -> bool: + """Return True if we have a magic trailing comma, that is when: + - there's a trailing comma here + - it's not a one-tuple + - it's not a single-element subscript + Additionally, if ensure_removable: + - it's not from square bracket indexing + (specifically, single-element square bracket indexing with + Preview.skip_magic_trailing_comma_in_subscript) + """ + if not ( + closing.type in CLOSING_BRACKETS + and self.leaves + and self.leaves[-1].type == token.COMMA + ): + return False + + if closing.type == token.RBRACE: + return True + + if closing.type == token.RSQB: + if ( + Preview.one_element_subscript in self.mode + and closing.parent + and closing.parent.type == syms.trailer + and closing.opening_bracket + and is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ): + return False + + if not ensure_removable: + return True + + comma = self.leaves[-1] + if comma.parent is None: + return False + if Preview.skip_magic_trailing_comma_in_subscript in self.mode: + return ( + comma.parent.type != syms.subscriptlist + or closing.opening_bracket is None + or not is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ) + return comma.parent.type == syms.listmaker + + if self.is_import: + return True + + if closing.opening_bracket is not None and not is_one_sequence_between( + closing.opening_bracket, closing, self.leaves + ): + return True + + return False + + def append_comment(self, comment: Leaf) -> bool: + """Add an inline or standalone comment to the line.""" + if ( + comment.type == STANDALONE_COMMENT + and self.bracket_tracker.any_open_brackets() + ): + comment.prefix = "" + return False + + if comment.type != token.COMMENT: + return False + + if not self.leaves: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-1] + if ( + last_leaf.type == token.RPAR + and not last_leaf.value + and last_leaf.parent + and len(list(last_leaf.parent.leaves())) <= 3 + and not is_type_comment(comment) + ): + # Comments on an optional parens wrapping a single leaf should belong to + # the wrapped node except if it's a type comment. Pinning the comment like + # this avoids unstable formatting caused by comment migration. + if len(self.leaves) < 2: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-2] + self.comments.setdefault(id(last_leaf), []).append(comment) + return True + + def comments_after(self, leaf: Leaf) -> List[Leaf]: + """Generate comments that should appear directly after `leaf`.""" + return self.comments.get(id(leaf), []) + + def remove_trailing_comma(self) -> None: + """Remove the trailing comma and moves the comments attached to it.""" + trailing_comma = self.leaves.pop() + trailing_comma_comments = self.comments.pop(id(trailing_comma), []) + self.comments.setdefault(id(self.leaves[-1]), []).extend( + trailing_comma_comments + ) + + def is_complex_subscript(self, leaf: Leaf) -> bool: + """Return True iff `leaf` is part of a slice with non-trivial exprs.""" + open_lsqb = self.bracket_tracker.get_open_lsqb() + if open_lsqb is None: + return False + + subscript_start = open_lsqb.next_sibling + + if isinstance(subscript_start, Node): + if subscript_start.type == syms.listmaker: + return False + + if subscript_start.type == syms.subscriptlist: + subscript_start = child_towards(subscript_start, leaf) + return subscript_start is not None and any( + n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() + ) + + def enumerate_with_length( + self, reversed: bool = False + ) -> Iterator[Tuple[Index, Leaf, int]]: + """Return an enumeration of leaves with their length. + + Stops prematurely on multiline strings and standalone comments. + """ + op = cast( + Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], + enumerate_reversed if reversed else enumerate, + ) + for index, leaf in op(self.leaves): + length = len(leaf.prefix) + len(leaf.value) + if "\n" in leaf.value: + return # Multiline strings, we can't continue. + + for comment in self.comments_after(leaf): + length += len(comment.value) + + yield index, leaf, length + + def clone(self) -> "Line": + return Line( + mode=self.mode, + depth=self.depth, + inside_brackets=self.inside_brackets, + should_split_rhs=self.should_split_rhs, + magic_trailing_comma=self.magic_trailing_comma, + ) + + def __str__(self) -> str: + """Render the line.""" + if not self: + return "\n" + + indent = " " * self.depth + leaves = iter(self.leaves) + first = next(leaves) + res = f"{first.prefix}{indent}{first.value}" + for leaf in leaves: + res += str(leaf) + for comment in itertools.chain.from_iterable(self.comments.values()): + res += str(comment) + + return res + "\n" + + def __bool__(self) -> bool: + """Return True if the line has leaves or comments.""" + return bool(self.leaves or self.comments) + + +@dataclass +class EmptyLineTracker: + """Provides a stateful method that returns the number of potential extra + empty lines needed before and after the currently processed line. + + Note: this tracker works on lines that haven't been split yet. It assumes + the prefix of the first leaf consists of optional newlines. Those newlines + are consumed by `maybe_empty_lines()` and included in the computation. + """ + + is_pyi: bool = False + previous_line: Optional[Line] = None + previous_after: int = 0 + previous_defs: List[int] = field(default_factory=list) + + def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + """Return the number of extra empty lines before and after the `current_line`. + + This is for separating `def`, `async def` and `class` with extra empty + lines (two on module-level). + """ + before, after = self._maybe_empty_lines(current_line) + before = ( + # Black should not insert empty lines at the beginning + # of the file + 0 + if self.previous_line is None + else before - self.previous_after + ) + self.previous_after = after + self.previous_line = current_line + return before, after + + def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + max_allowed = 1 + if current_line.depth == 0: + max_allowed = 1 if self.is_pyi else 2 + if current_line.leaves: + # Consume the first leaf's extra newlines. + first_leaf = current_line.leaves[0] + before = first_leaf.prefix.count("\n") + before = min(before, max_allowed) + first_leaf.prefix = "" + else: + before = 0 + depth = current_line.depth + while self.previous_defs and self.previous_defs[-1] >= depth: + if self.is_pyi: + assert self.previous_line is not None + if depth and not current_line.is_def and self.previous_line.is_def: + # Empty lines between attributes and methods should be preserved. + before = min(1, before) + elif depth: + before = 0 + else: + before = 1 + else: + if depth: + before = 1 + elif ( + not depth + and self.previous_defs[-1] + and current_line.leaves[-1].type == token.COLON + and ( + current_line.leaves[0].value + not in ("with", "try", "for", "while", "if", "match") + ) + ): + # We shouldn't add two newlines between an indented function and + # a dependent non-indented clause. This is to avoid issues with + # conditional function definitions that are technically top-level + # and therefore get two trailing newlines, but look weird and + # inconsistent when they're followed by elif, else, etc. This is + # worse because these functions only get *one* preceding newline + # already. + before = 1 + else: + before = 2 + self.previous_defs.pop() + if current_line.is_decorator or current_line.is_def or current_line.is_class: + return self._maybe_empty_lines_for_class_or_def(current_line, before) + + if ( + self.previous_line + and self.previous_line.is_import + and not current_line.is_import + and depth == self.previous_line.depth + ): + return (before or 1), 0 + + if ( + self.previous_line + and self.previous_line.is_class + and current_line.is_triple_quoted_string + ): + return before, 1 + + if ( + Preview.remove_block_trailing_newline in current_line.mode + and self.previous_line + and self.previous_line.opens_block + ): + return 0, 0 + return before, 0 + + def _maybe_empty_lines_for_class_or_def( + self, current_line: Line, before: int + ) -> Tuple[int, int]: + if not current_line.is_decorator: + self.previous_defs.append(current_line.depth) + if self.previous_line is None: + # Don't insert empty lines before the first line in the file. + return 0, 0 + + if self.previous_line.is_decorator: + if self.is_pyi and current_line.is_stub_class: + # Insert an empty line after a decorated stub class + return 0, 1 + + return 0, 0 + + if self.previous_line.depth < current_line.depth and ( + self.previous_line.is_class or self.previous_line.is_def + ): + return 0, 0 + + if ( + self.previous_line.is_comment + and self.previous_line.depth == current_line.depth + and before == 0 + ): + return 0, 0 + + if self.is_pyi: + if current_line.is_class or self.previous_line.is_class: + if self.previous_line.depth < current_line.depth: + newlines = 0 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + elif current_line.is_stub_class and self.previous_line.is_stub_class: + # No blank line between classes with an empty body + newlines = 0 + else: + newlines = 1 + elif ( + current_line.is_def or current_line.is_decorator + ) and not self.previous_line.is_def: + if current_line.depth: + # In classes empty lines between attributes and methods should + # be preserved. + newlines = min(1, before) + else: + # Blank line between a block of functions (maybe with preceding + # decorators) and a block of non-functions + newlines = 1 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + else: + newlines = 0 + else: + newlines = 1 if current_line.depth else 2 + return newlines, 0 + + +def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: + """Like `reversed(enumerate(sequence))` if that were possible.""" + index = len(sequence) - 1 + for element in reversed(sequence): + yield (index, element) + index -= 1 + + +def append_leaves( + new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False +) -> None: + """ + Append leaves (taken from @old_line) to @new_line, making sure to fix the + underlying Node structure where appropriate. + + All of the leaves in @leaves are duplicated. The duplicates are then + appended to @new_line and used to replace their originals in the underlying + Node structure. Any comments attached to the old leaves are reattached to + the new leaves. + + Pre-conditions: + set(@leaves) is a subset of set(@old_line.leaves). + """ + for old_leaf in leaves: + new_leaf = Leaf(old_leaf.type, old_leaf.value) + replace_child(old_leaf, new_leaf) + new_line.append(new_leaf, preformatted=preformatted) + + for comment_leaf in old_line.comments_after(old_leaf): + new_line.append(comment_leaf, preformatted=True) + + +def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: + """Return True if `line` is no longer than `line_length`. + + Uses the provided `line_str` rendering, if any, otherwise computes a new one. + """ + if not line_str: + line_str = line_to_string(line) + return ( + len(line_str) <= line_length + and "\n" not in line_str # multiline strings + and not line.contains_standalone_comments() + ) + + +def can_be_split(line: Line) -> bool: + """Return False if the line cannot be split *for sure*. + + This is not an exhaustive search but a cheap heuristic that we can use to + avoid some unfortunate formattings (mostly around wrapping unsplittable code + in unnecessary parentheses). + """ + leaves = line.leaves + if len(leaves) < 2: + return False + + if leaves[0].type == token.STRING and leaves[1].type == token.DOT: + call_count = 0 + dot_count = 0 + next = leaves[-1] + for leaf in leaves[-2::-1]: + if leaf.type in OPENING_BRACKETS: + if next.type not in CLOSING_BRACKETS: + return False + + call_count += 1 + elif leaf.type == token.DOT: + dot_count += 1 + elif leaf.type == token.NAME: + if not (next.type == token.DOT or next.type in OPENING_BRACKETS): + return False + + elif leaf.type not in CLOSING_BRACKETS: + return False + + if dot_count > 1 and call_count > 1: + return False + + return True + + +def can_omit_invisible_parens( + line: Line, + line_length: int, +) -> bool: + """Does `line` have a shape safe to reformat without optional parens around it? + + Returns True for only a subset of potentially nice looking formattings but + the point is to not return false positives that end up producing lines that + are too long. + """ + bt = line.bracket_tracker + if not bt.delimiters: + # Without delimiters the optional parentheses are useless. + return True + + max_priority = bt.max_delimiter_priority() + if bt.delimiter_count_with_priority(max_priority) > 1: + # With more than one delimiter of a kind the optional parentheses read better. + return False + + if max_priority == DOT_PRIORITY: + # A single stranded method call doesn't require optional parentheses. + return True + + assert len(line.leaves) >= 2, "Stranded delimiter" + + # With a single delimiter, omit if the expression starts or ends with + # a bracket. + first = line.leaves[0] + second = line.leaves[1] + if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: + if _can_omit_opening_paren(line, first=first, line_length=line_length): + return True + + # Note: we are not returning False here because a line might have *both* + # a leading opening bracket and a trailing closing bracket. If the + # opening bracket doesn't match our rule, maybe the closing will. + + penultimate = line.leaves[-2] + last = line.leaves[-1] + + if ( + last.type == token.RPAR + or last.type == token.RBRACE + or ( + # don't use indexing for omitting optional parentheses; + # it looks weird + last.type == token.RSQB + and last.parent + and last.parent.type != syms.trailer + ) + ): + if penultimate.type in OPENING_BRACKETS: + # Empty brackets don't help. + return False + + if is_multiline_string(first): + # Additional wrapping of a multiline string in this situation is + # unnecessary. + return True + + if _can_omit_closing_paren(line, last=last, line_length=line_length): + return True + + return False + + +def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + remainder = False + length = 4 * line.depth + _index = -1 + for _index, leaf, leaf_length in line.enumerate_with_length(): + if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: + remainder = True + if remainder: + length += leaf_length + if length > line_length: + break + + if leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + remainder = False + + else: + # checked the entire string and line length wasn't exceeded + if len(line.leaves) == _index + 1: + return True + + return False + + +def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + length = 4 * line.depth + seen_other_brackets = False + for _index, leaf, leaf_length in line.enumerate_with_length(): + length += leaf_length + if leaf is last.opening_bracket: + if seen_other_brackets or length <= line_length: + return True + + elif leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + seen_other_brackets = True + + return False + + +def line_to_string(line: Line) -> str: + """Returns the string representation of @line. + + WARNING: This is known to be computationally expensive. + """ + return str(line).strip("\n") diff --git a/src/black/mode.py b/src/black/mode.py new file mode 100644 index 00000000000..6c0847e8bcc --- /dev/null +++ b/src/black/mode.py @@ -0,0 +1,216 @@ +"""Data structures configuring Black behavior. + +Mostly around Python language feature support per version and Black configuration +chosen by the user. +""" + +import sys +from dataclasses import dataclass, field +from enum import Enum, auto +from hashlib import sha256 +from operator import attrgetter +from typing import Dict, Set +from warnings import warn + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.const import DEFAULT_LINE_LENGTH + + +class TargetVersion(Enum): + PY33 = 3 + PY34 = 4 + PY35 = 5 + PY36 = 6 + PY37 = 7 + PY38 = 8 + PY39 = 9 + PY310 = 10 + PY311 = 11 + + +class Feature(Enum): + F_STRINGS = 2 + NUMERIC_UNDERSCORES = 3 + TRAILING_COMMA_IN_CALL = 4 + TRAILING_COMMA_IN_DEF = 5 + # The following two feature-flags are mutually exclusive, and exactly one should be + # set for every version of python. + ASYNC_IDENTIFIERS = 6 + ASYNC_KEYWORDS = 7 + ASSIGNMENT_EXPRESSIONS = 8 + POS_ONLY_ARGUMENTS = 9 + RELAXED_DECORATORS = 10 + PATTERN_MATCHING = 11 + UNPACKING_ON_FLOW = 12 + ANN_ASSIGN_EXTENDED_RHS = 13 + EXCEPT_STAR = 14 + VARIADIC_GENERICS = 15 + DEBUG_F_STRINGS = 16 + FORCE_OPTIONAL_PARENTHESES = 50 + + # __future__ flags + FUTURE_ANNOTATIONS = 51 + + +FUTURE_FLAG_TO_FEATURE: Final = { + "annotations": Feature.FUTURE_ANNOTATIONS, +} + + +VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { + TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY36: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_IDENTIFIERS, + }, + TargetVersion.PY37: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + }, + TargetVersion.PY38: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + }, + TargetVersion.PY39: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + }, + TargetVersion.PY310: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PATTERN_MATCHING, + }, + TargetVersion.PY311: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + }, +} + + +def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: + return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) + + +class Preview(Enum): + """Individual preview style features.""" + + annotation_parens = auto() + long_docstring_quotes_on_newline = auto() + normalize_docstring_quotes_and_prefixes_properly = auto() + one_element_subscript = auto() + remove_block_trailing_newline = auto() + remove_redundant_parens = auto() + string_processing = auto() + skip_magic_trailing_comma_in_subscript = auto() + + +class Deprecated(UserWarning): + """Visible deprecation warning.""" + + +@dataclass +class Mode: + target_versions: Set[TargetVersion] = field(default_factory=set) + line_length: int = DEFAULT_LINE_LENGTH + string_normalization: bool = True + is_pyi: bool = False + is_ipynb: bool = False + magic_trailing_comma: bool = True + experimental_string_processing: bool = False + python_cell_magics: Set[str] = field(default_factory=set) + preview: bool = False + + def __post_init__(self) -> None: + if self.experimental_string_processing: + warn( + "`experimental string processing` has been included in `preview`" + " and deprecated. Use `preview` instead.", + Deprecated, + ) + + def __contains__(self, feature: Preview) -> bool: + """ + Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag. + + The argument is not checked and features are not differentiated. + They only exist to make development easier by clarifying intent. + """ + if feature is Preview.string_processing: + return self.preview or self.experimental_string_processing + return self.preview + + def get_cache_key(self) -> str: + if self.target_versions: + version_str = ",".join( + str(version.value) + for version in sorted(self.target_versions, key=attrgetter("value")) + ) + else: + version_str = "-" + parts = [ + version_str, + str(self.line_length), + str(int(self.string_normalization)), + str(int(self.is_pyi)), + str(int(self.is_ipynb)), + str(int(self.magic_trailing_comma)), + str(int(self.experimental_string_processing)), + str(int(self.preview)), + sha256((",".join(sorted(self.python_cell_magics))).encode()).hexdigest(), + ] + return ".".join(parts) diff --git a/src/black/nodes.py b/src/black/nodes.py new file mode 100644 index 00000000000..aeb2be389c8 --- /dev/null +++ b/src/black/nodes.py @@ -0,0 +1,850 @@ +""" +blib2to3 Node/Leaf transformation-related utility functions. +""" + +import sys +from typing import Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from mypy_extensions import mypyc_attr + +from black.cache import CACHE_DIR +from black.strings import has_triple_quotes +from blib2to3 import pygram +from blib2to3.pgen2 import token +from blib2to3.pytree import NL, Leaf, Node, type_repr + +pygram.initialize(CACHE_DIR) +syms: Final = pygram.python_symbols + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +LeafID = int +NodeType = int + + +WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} +STATEMENT: Final = { + syms.if_stmt, + syms.while_stmt, + syms.for_stmt, + syms.try_stmt, + syms.except_clause, + syms.with_stmt, + syms.funcdef, + syms.classdef, + syms.match_stmt, + syms.case_block, +} +STANDALONE_COMMENT: Final = 153 +token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" +LOGIC_OPERATORS: Final = {"and", "or"} +COMPARATORS: Final = { + token.LESS, + token.GREATER, + token.EQEQUAL, + token.NOTEQUAL, + token.LESSEQUAL, + token.GREATEREQUAL, +} +MATH_OPERATORS: Final = { + token.VBAR, + token.CIRCUMFLEX, + token.AMPER, + token.LEFTSHIFT, + token.RIGHTSHIFT, + token.PLUS, + token.MINUS, + token.STAR, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.AT, + token.TILDE, + token.DOUBLESTAR, +} +STARS: Final = {token.STAR, token.DOUBLESTAR} +VARARGS_SPECIALS: Final = STARS | {token.SLASH} +VARARGS_PARENTS: Final = { + syms.arglist, + syms.argument, # double star in arglist + syms.trailer, # single argument to call + syms.typedargslist, + syms.varargslist, # lambdas +} +UNPACKING_PARENTS: Final = { + syms.atom, # single element of a list or set literal + syms.dictsetmaker, + syms.listmaker, + syms.testlist_gexp, + syms.testlist_star_expr, + syms.subject_expr, + syms.pattern, +} +TEST_DESCENDANTS: Final = { + syms.test, + syms.lambdef, + syms.or_test, + syms.and_test, + syms.not_test, + syms.comparison, + syms.star_expr, + syms.expr, + syms.xor_expr, + syms.and_expr, + syms.shift_expr, + syms.arith_expr, + syms.trailer, + syms.term, + syms.power, +} +TYPED_NAMES: Final = {syms.tname, syms.tname_star} +ASSIGNMENTS: Final = { + "=", + "+=", + "-=", + "*=", + "@=", + "/=", + "%=", + "&=", + "|=", + "^=", + "<<=", + ">>=", + "**=", + "//=", +} + +IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist} +BRACKET: Final = { + token.LPAR: token.RPAR, + token.LSQB: token.RSQB, + token.LBRACE: token.RBRACE, +} +OPENING_BRACKETS: Final = set(BRACKET.keys()) +CLOSING_BRACKETS: Final = set(BRACKET.values()) +BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS +ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} + +RARROW = 55 + + +@mypyc_attr(allow_interpreted_subclasses=True) +class Visitor(Generic[T]): + """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" + + def visit(self, node: LN) -> Iterator[T]: + """Main method to visit `node` and its children. + + It tries to find a `visit_*()` method for the given `node.type`, like + `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. + If no dedicated `visit_*()` method is found, chooses `visit_default()` + instead. + + Then yields objects of type `T` from the selected visitor. + """ + if node.type < 256: + name = token.tok_name[node.type] + else: + name = str(type_repr(node.type)) + # We explicitly branch on whether a visitor exists (instead of + # using self.visit_default as the default arg to getattr) in order + # to save needing to create a bound method object and so mypyc can + # generate a native call to visit_default. + visitf = getattr(self, f"visit_{name}", None) + if visitf: + yield from visitf(node) + else: + yield from self.visit_default(node) + + def visit_default(self, node: LN) -> Iterator[T]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Node): + for child in node.children: + yield from self.visit(child) + + +def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 + """Return whitespace prefix if needed for the given `leaf`. + + `complex_subscript` signals whether the given leaf is part of a subscription + which has non-trivial arguments, like arithmetic expressions or function calls. + """ + NO: Final = "" + SPACE: Final = " " + DOUBLESPACE: Final = " " + t = leaf.type + p = leaf.parent + v = leaf.value + if t in ALWAYS_NO_SPACE: + return NO + + if t == token.COMMENT: + return DOUBLESPACE + + assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" + if t == token.COLON and p.type not in { + syms.subscript, + syms.subscriptlist, + syms.sliceop, + }: + return NO + + prev = leaf.prev_sibling + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + if t == token.COLON: + if prevp.type == token.COLON: + return NO + + elif prevp.type != token.COMMA and not complex_subscript: + return NO + + return SPACE + + if prevp.type == token.EQUAL: + if prevp.parent: + if prevp.parent.type in { + syms.arglist, + syms.argument, + syms.parameters, + syms.varargslist, + }: + return NO + + elif prevp.parent.type == syms.typedargslist: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using + # that, too. + return prevp.prefix + + elif ( + prevp.type == token.STAR + and parent_type(prevp) == syms.star_expr + and parent_type(prevp.parent) == syms.subscriptlist + ): + # No space between typevar tuples. + return NO + + elif prevp.type in VARARGS_SPECIALS: + if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): + return NO + + elif prevp.type == token.COLON: + if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: + return SPACE if complex_subscript else NO + + elif ( + prevp.parent + and prevp.parent.type == syms.factor + and prevp.type in MATH_OPERATORS + ): + return NO + + elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator: + # no space in decorators + return NO + + elif prev.type in OPENING_BRACKETS: + return NO + + if p.type in {syms.parameters, syms.arglist}: + # untyped function signatures or calls + if not prev or prev.type != token.COMMA: + return NO + + elif p.type == syms.varargslist: + # lambdas + if prev and prev.type != token.COMMA: + return NO + + elif p.type == syms.typedargslist: + # typed function signatures + if not prev: + return NO + + if t == token.EQUAL: + if prev.type not in TYPED_NAMES: + return NO + + elif prev.type == token.EQUAL: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using that, too. + return prev.prefix + + elif prev.type != token.COMMA: + return NO + + elif p.type in TYPED_NAMES: + # type names + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type != token.COMMA: + return NO + + elif p.type == syms.trailer: + # attributes and calls + if t == token.LPAR or t == token.RPAR: + return NO + + if not prev: + if t == token.DOT or t == token.LSQB: + return NO + + elif prev.type != token.COMMA: + return NO + + elif p.type == syms.argument: + # single argument + if t == token.EQUAL: + return NO + + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.LPAR: + return NO + + elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: + return NO + + elif p.type == syms.decorator: + # decorators + return NO + + elif p.type == syms.dotted_name: + if prev: + return NO + + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.AT or prevp.type == token.DOT: + return NO + + elif p.type == syms.classdef: + if t == token.LPAR: + return NO + + if prev and prev.type == token.LPAR: + return NO + + elif p.type in {syms.subscript, syms.sliceop}: + # indexing + if not prev: + assert p.parent is not None, "subscripts are always parented" + if p.parent.type == syms.subscriptlist: + return SPACE + + return NO + + elif not complex_subscript: + return NO + + elif p.type == syms.atom: + if prev and t == token.DOT: + # dots, but not the first one. + return NO + + elif p.type == syms.dictsetmaker: + # dict unpacking + if prev and prev.type == token.DOUBLESTAR: + return NO + + elif p.type in {syms.factor, syms.star_expr}: + # unary ops + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + prevp_parent = prevp.parent + assert prevp_parent is not None + if prevp.type == token.COLON and prevp_parent.type in { + syms.subscript, + syms.sliceop, + }: + return NO + + elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: + return NO + + elif t in {token.NAME, token.NUMBER, token.STRING}: + return NO + + elif p.type == syms.import_from: + if t == token.DOT: + if prev and prev.type == token.DOT: + return NO + + elif t == token.NAME: + if v == "import": + return SPACE + + if prev and prev.type == token.DOT: + return NO + + elif p.type == syms.sliceop: + return NO + + elif p.type == syms.except_clause: + if t == token.STAR: + return NO + + return SPACE + + +def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: + """Return the first leaf that precedes `node`, if any.""" + while node: + res = node.prev_sibling + if res: + if isinstance(res, Leaf): + return res + + try: + return list(res.leaves())[-1] + + except IndexError: + return None + + node = node.parent + return None + + +def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: + """Return if the `node` and its previous siblings match types against the provided + list of tokens; the provided `node`has its type matched against the last element in + the list. `None` can be used as the first element to declare that the start of the + list is anchored at the start of its parent's children.""" + if not tokens: + return True + if tokens[-1] is None: + return node is None + if not node: + return False + if node.type != tokens[-1]: + return False + return prev_siblings_are(node.prev_sibling, tokens[:-1]) + + +def parent_type(node: Optional[LN]) -> Optional[NodeType]: + """ + Returns: + @node.parent.type, if @node is not None and has a parent. + OR + None, otherwise. + """ + if node is None or node.parent is None: + return None + + return node.parent.type + + +def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: + """Return the child of `ancestor` that contains `descendant`.""" + node: Optional[LN] = descendant + while node and node.parent != ancestor: + node = node.parent + return node + + +def replace_child(old_child: LN, new_child: LN) -> None: + """ + Side Effects: + * If @old_child.parent is set, replace @old_child with @new_child in + @old_child's underlying Node structure. + OR + * Otherwise, this function does nothing. + """ + parent = old_child.parent + if not parent: + return + + child_idx = old_child.remove() + if child_idx is not None: + parent.insert_child(child_idx, new_child) + + +def container_of(leaf: Leaf) -> LN: + """Return `leaf` or one of its ancestors that is the topmost container of it. + + By "container" we mean a node where `leaf` is the very first child. + """ + same_prefix = leaf.prefix + container: LN = leaf + while container: + parent = container.parent + if parent is None: + break + + if parent.children[0].prefix != same_prefix: + break + + if parent.type == syms.file_input: + break + + if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: + break + + container = parent + return container + + +def first_leaf_of(node: LN) -> Optional[Leaf]: + """Returns the first leaf of the node tree.""" + if isinstance(node, Leaf): + return node + if node.children: + return first_leaf_of(node.children[0]) + else: + return None + + +def is_arith_like(node: LN) -> bool: + """Whether node is an arithmetic or a binary arithmetic expression""" + return node.type in { + syms.arith_expr, + syms.shift_expr, + syms.xor_expr, + syms.and_expr, + } + + +def is_docstring(leaf: Leaf) -> bool: + if prev_siblings_are( + leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] + ): + return True + + # Multiline docstring on the same line as the `def`. + if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): + # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python + # grammar. We're safe to return True without further checks. + return True + + return False + + +def is_empty_tuple(node: LN) -> bool: + """Return True if `node` holds an empty tuple.""" + return ( + node.type == syms.atom + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + + +def is_one_tuple(node: LN) -> bool: + """Return True if `node` holds a tuple with one element, with or without parens.""" + if node.type == syms.atom: + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA + + return ( + node.type in IMPLICIT_TUPLE + and len(node.children) == 2 + and node.children[1].type == token.COMMA + ) + + +def is_one_sequence_between( + opening: Leaf, + closing: Leaf, + leaves: List[Leaf], + brackets: Tuple[int, int] = (token.LPAR, token.RPAR), +) -> bool: + """Return True if content between `opening` and `closing` is a one-sequence.""" + if (opening.type, closing.type) != brackets: + return False + + depth = closing.bracket_depth + 1 + for _opening_index, leaf in enumerate(leaves): + if leaf is opening: + break + + else: + raise LookupError("Opening paren not found in `leaves`") + + commas = 0 + _opening_index += 1 + for leaf in leaves[_opening_index:]: + if leaf is closing: + break + + bracket_depth = leaf.bracket_depth + if bracket_depth == depth and leaf.type == token.COMMA: + commas += 1 + if leaf.parent and leaf.parent.type in { + syms.arglist, + syms.typedargslist, + }: + commas += 1 + break + + return commas < 2 + + +def is_walrus_assignment(node: LN) -> bool: + """Return True iff `node` is of the shape ( test := test )""" + inner = unwrap_singleton_parenthesis(node) + return inner is not None and inner.type == syms.namedexpr_test + + +def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool: + """Return True iff `node` is a trailer valid in a simple decorator""" + return node.type == syms.trailer and ( + ( + len(node.children) == 2 + and node.children[0].type == token.DOT + and node.children[1].type == token.NAME + ) + # last trailer can be an argument-less parentheses pair + or ( + last + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + # last trailer can be arguments + or ( + last + and len(node.children) == 3 + and node.children[0].type == token.LPAR + # and node.children[1].type == syms.argument + and node.children[2].type == token.RPAR + ) + ) + + +def is_simple_decorator_expression(node: LN) -> bool: + """Return True iff `node` could be a 'dotted name' decorator + + This function takes the node of the 'namedexpr_test' of the new decorator + grammar and test if it would be valid under the old decorator grammar. + + The old grammar was: decorator: @ dotted_name [arguments] NEWLINE + The new grammar is : decorator: @ namedexpr_test NEWLINE + """ + if node.type == token.NAME: + return True + if node.type == syms.power: + if node.children: + return ( + node.children[0].type == token.NAME + and all(map(is_simple_decorator_trailer, node.children[1:-1])) + and ( + len(node.children) < 2 + or is_simple_decorator_trailer(node.children[-1], last=True) + ) + ) + return False + + +def is_yield(node: LN) -> bool: + """Return True if `node` holds a `yield` or `yield from` expression.""" + if node.type == syms.yield_expr: + return True + + if is_name_token(node) and node.value == "yield": + return True + + if node.type != syms.atom: + return False + + if len(node.children) != 3: + return False + + lpar, expr, rpar = node.children + if lpar.type == token.LPAR and rpar.type == token.RPAR: + return is_yield(expr) + + return False + + +def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: + """Return True if `leaf` is a star or double star in a vararg or kwarg. + + If `within` includes VARARGS_PARENTS, this applies to function signatures. + If `within` includes UNPACKING_PARENTS, it applies to right hand-side + extended iterable unpacking (PEP 3132) and additional unpacking + generalizations (PEP 448). + """ + if leaf.type not in VARARGS_SPECIALS or not leaf.parent: + return False + + p = leaf.parent + if p.type == syms.star_expr: + # Star expressions are also used as assignment targets in extended + # iterable unpacking (PEP 3132). See what its parent is instead. + if not p.parent: + return False + + p = p.parent + + return p.type in within + + +def is_multiline_string(leaf: Leaf) -> bool: + """Return True if `leaf` is a multiline string that actually spans many lines.""" + return has_triple_quotes(leaf.value) and "\n" in leaf.value + + +def is_stub_suite(node: Node) -> bool: + """Return True if `node` is a suite with a stub body.""" + if ( + len(node.children) != 4 + or node.children[0].type != token.NEWLINE + or node.children[1].type != token.INDENT + or node.children[3].type != token.DEDENT + ): + return False + + return is_stub_body(node.children[2]) + + +def is_stub_body(node: LN) -> bool: + """Return True if `node` is a simple statement containing an ellipsis.""" + if not isinstance(node, Node) or node.type != syms.simple_stmt: + return False + + if len(node.children) != 2: + return False + + child = node.children[0] + return ( + child.type == syms.atom + and len(child.children) == 3 + and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) + ) + + +def is_atom_with_invisible_parens(node: LN) -> bool: + """Given a `LN`, determines whether it's an atom `node` with invisible + parens. Useful in dedupe-ing and normalizing parens. + """ + if isinstance(node, Leaf) or node.type != syms.atom: + return False + + first, last = node.children[0], node.children[-1] + return ( + isinstance(first, Leaf) + and first.type == token.LPAR + and first.value == "" + and isinstance(last, Leaf) + and last.type == token.RPAR + and last.value == "" + ) + + +def is_empty_par(leaf: Leaf) -> bool: + return is_empty_lpar(leaf) or is_empty_rpar(leaf) + + +def is_empty_lpar(leaf: Leaf) -> bool: + return leaf.type == token.LPAR and leaf.value == "" + + +def is_empty_rpar(leaf: Leaf) -> bool: + return leaf.type == token.RPAR and leaf.value == "" + + +def is_import(leaf: Leaf) -> bool: + """Return True if the given leaf starts an import statement.""" + p = leaf.parent + t = leaf.type + v = leaf.value + return bool( + t == token.NAME + and ( + (v == "import" and p and p.type == syms.import_name) + or (v == "from" and p and p.type == syms.import_from) + ) + ) + + +def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: + """Return True if the given leaf is a special comment. + Only returns true for type comments for now.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) + + +def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: + """Wrap `child` in parentheses. + + This replaces `child` with an atom holding the parentheses and the old + child. That requires moving the prefix. + + If `visible` is False, the leaves will be valueless (and thus invisible). + """ + lpar = Leaf(token.LPAR, "(" if visible else "") + rpar = Leaf(token.RPAR, ")" if visible else "") + prefix = child.prefix + child.prefix = "" + index = child.remove() or 0 + new_child = Node(syms.atom, [lpar, child, rpar]) + new_child.prefix = prefix + parent.insert_child(index, new_child) + + +def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: + """Returns `wrapped` if `node` is of the shape ( wrapped ). + + Parenthesis can be optional. Returns None otherwise""" + if len(node.children) != 3: + return None + + lpar, wrapped, rpar = node.children + if not (lpar.type == token.LPAR and rpar.type == token.RPAR): + return None + + return wrapped + + +def ensure_visible(leaf: Leaf) -> None: + """Make sure parentheses are visible. + + They could be invisible as part of some statements (see + :func:`normalize_invisible_parens` and :func:`visit_import_from`). + """ + if leaf.type == token.LPAR: + leaf.value = "(" + elif leaf.type == token.RPAR: + leaf.value = ")" + + +def is_name_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NAME + + +def is_lpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.LPAR + + +def is_rpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.RPAR + + +def is_string_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.STRING + + +def is_number_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NUMBER diff --git a/src/black/numerics.py b/src/black/numerics.py new file mode 100644 index 00000000000..879e5b2cf36 --- /dev/null +++ b/src/black/numerics.py @@ -0,0 +1,60 @@ +""" +Formatting numeric literals. +""" +from blib2to3.pytree import Leaf + + +def format_hex(text: str) -> str: + """ + Formats a hexadecimal string like "0x12B3" + """ + before, after = text[:2], text[2:] + return f"{before}{after.upper()}" + + +def format_scientific_notation(text: str) -> str: + """Formats a numeric string utilizing scentific notation""" + before, after = text.split("e") + sign = "" + if after.startswith("-"): + after = after[1:] + sign = "-" + elif after.startswith("+"): + after = after[1:] + before = format_float_or_int_string(before) + return f"{before}e{sign}{after}" + + +def format_complex_number(text: str) -> str: + """Formats a complex string like `10j`""" + number = text[:-1] + suffix = text[-1] + return f"{format_float_or_int_string(number)}{suffix}" + + +def format_float_or_int_string(text: str) -> str: + """Formats a float string like "1.0".""" + if "." not in text: + return text + + before, after = text.split(".") + return f"{before or 0}.{after or 0}" + + +def normalize_numeric_literal(leaf: Leaf) -> None: + """Normalizes numeric (float, int, and complex) literals. + + All letters used in the representation are normalized to lowercase.""" + text = leaf.value.lower() + if text.startswith(("0o", "0b")): + # Leave octal and binary literals alone. + pass + elif text.startswith("0x"): + text = format_hex(text) + elif "e" in text: + text = format_scientific_notation(text) + elif text.endswith("j"): + text = format_complex_number(text) + else: + text = format_float_or_int_string(text) + leaf.value = text diff --git a/src/black/output.py b/src/black/output.py new file mode 100644 index 00000000000..f4c17f28ea4 --- /dev/null +++ b/src/black/output.py @@ -0,0 +1,105 @@ +"""Nice output for Black. + +The double calls are for patching purposes in tests. +""" + +import json +import tempfile +from typing import Any, Optional + +from click import echo, style +from mypy_extensions import mypyc_attr + + +@mypyc_attr(patchable=True) +def _out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "bold" not in styles: + styles["bold"] = True + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def _err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "fg" not in styles: + styles["fg"] = "red" + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _out(message, nl=nl, **styles) + + +def err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _err(message, nl=nl, **styles) + + +def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between each cell in notebooks `a` and `b`.""" + a_nb = json.loads(a) + b_nb = json.loads(b) + diff_lines = [ + diff( + "".join(a_nb["cells"][cell_number]["source"]) + "\n", + "".join(b_nb["cells"][cell_number]["source"]) + "\n", + f"{a_name}:cell_{cell_number}", + f"{b_name}:cell_{cell_number}", + ) + for cell_number, cell in enumerate(a_nb["cells"]) + if cell["cell_type"] == "code" + ] + return "".join(diff_lines) + + +def diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between strings `a` and `b`.""" + import difflib + + a_lines = a.splitlines(keepends=True) + b_lines = b.splitlines(keepends=True) + diff_lines = [] + for line in difflib.unified_diff( + a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5 + ): + # Work around https://bugs.python.org/issue2142 + # See: + # https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html + if line[-1] == "\n": + diff_lines.append(line) + else: + diff_lines.append(line + "\n") + diff_lines.append("\\ No newline at end of file\n") + return "".join(diff_lines) + + +def color_diff(contents: str) -> str: + """Inject the ANSI color codes to the diff.""" + lines = contents.split("\n") + for i, line in enumerate(lines): + if line.startswith("+++") or line.startswith("---"): + line = "\033[1m" + line + "\033[0m" # bold, reset + elif line.startswith("@@"): + line = "\033[36m" + line + "\033[0m" # cyan, reset + elif line.startswith("+"): + line = "\033[32m" + line + "\033[0m" # green, reset + elif line.startswith("-"): + line = "\033[31m" + line + "\033[0m" # red, reset + lines[i] = line + return "\n".join(lines) + + +@mypyc_attr(patchable=True) +def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str: + """Dump `output` to a temporary file. Return path to the file.""" + with tempfile.NamedTemporaryFile( + mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" + ) as f: + for lines in output: + f.write(lines) + if ensure_final_newline and lines and lines[-1] != "\n": + f.write("\n") + return f.name diff --git a/src/black/parsing.py b/src/black/parsing.py new file mode 100644 index 00000000000..64c0b1e3018 --- /dev/null +++ b/src/black/parsing.py @@ -0,0 +1,278 @@ +""" +Parse Python code and perform AST validation. +""" +import ast +import platform +import sys +from typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.mode import Feature, TargetVersion, supports_feature +from black.nodes import syms +from blib2to3 import pygram +from blib2to3.pgen2 import driver +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.parse import ParseError +from blib2to3.pgen2.tokenize import TokenError +from blib2to3.pytree import Leaf, Node + +ast3: Any + +_IS_PYPY = platform.python_implementation() == "PyPy" + +try: + from typed_ast import ast3 +except ImportError: + # Either our python version is too low, or we're on pypy + if sys.version_info < (3, 7) or (sys.version_info < (3, 8) and not _IS_PYPY): + print( + "The typed_ast package is required but not installed.\n" + "You can upgrade to Python 3.8+ or install typed_ast with\n" + "`python3 -m pip install typed-ast`.", + file=sys.stderr, + ) + sys.exit(1) + else: + ast3 = ast + + +PY2_HINT: Final = "Python 2 support was removed in version 22.0." + + +class InvalidInput(ValueError): + """Raised when input source code fails all parse attempts.""" + + +def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7+ + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar_no_print_statement_no_exec_statement, + # Python 3.10+ + pygram.python_grammar_soft_keywords, + ] + + grammars = [] + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append( + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords + ) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) + if supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + errors = {} + for grammar in grammars: + drv = driver.Driver(grammar) + try: + result = drv.parse_string(src_txt, True) + break + + except ParseError as pe: + lineno, column = pe.context[1] + lines = src_txt.splitlines() + try: + faulty_line = lines[lineno - 1] + except IndexError: + faulty_line = "" + errors[grammar.version] = InvalidInput( + f"Cannot parse: {lineno}:{column}: {faulty_line}" + ) + + except TokenError as te: + # In edge cases these are raised; and typically don't have a "faulty_line". + lineno, column = te.args[1] + errors[grammar.version] = InvalidInput( + f"Cannot parse: {lineno}:{column}: {te.args[0]}" + ) + + else: + # Choose the latest version when raising the actual parsing error. + assert len(errors) >= 1 + exc = errors[max(errors)] + + if matches_grammar(src_txt, pygram.python_grammar) or matches_grammar( + src_txt, pygram.python_grammar_no_print_statement + ): + original_msg = exc.args[0] + msg = f"{original_msg}\n{PY2_HINT}" + raise InvalidInput(msg) from None + + raise exc from None + + if isinstance(result, Leaf): + result = Node(syms.file_input, [result]) + return result + + +def matches_grammar(src_txt: str, grammar: Grammar) -> bool: + drv = driver.Driver(grammar) + try: + drv.parse_string(src_txt, True) + except (ParseError, TokenError, IndentationError): + return False + else: + return True + + +def lib2to3_unparse(node: Node) -> str: + """Given a lib2to3 node, return its string representation.""" + code = str(node) + return code + + +def parse_single_version( + src: str, version: Tuple[int, int] +) -> Union[ast.AST, ast3.AST]: + filename = "" + # typed-ast is needed because of feature version limitations in the builtin ast 3.8> + if sys.version_info >= (3, 8) and version >= (3,): + return ast.parse(src, filename, feature_version=version, type_comments=True) + + if _IS_PYPY: + # PyPy 3.7 doesn't support type comment tracking which is not ideal, but there's + # not much we can do as typed-ast won't work either. + if sys.version_info >= (3, 8): + return ast3.parse(src, filename, type_comments=True) + else: + return ast3.parse(src, filename) + else: + # Typed-ast is guaranteed to be used here and automatically tracks type + # comments separately. + return ast3.parse(src, filename, feature_version=version[1]) + + raise AssertionError("INTERNAL ERROR: Tried parsing unsupported Python version!") + + +def parse_ast(src: str) -> Union[ast.AST, ast3.AST]: + # TODO: support Python 4+ ;) + versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] + + first_error = "" + for version in sorted(versions, reverse=True): + try: + return parse_single_version(src, version) + except SyntaxError as e: + if not first_error: + first_error = str(e) + + raise SyntaxError(first_error) + + +ast3_AST: Final[Type[ast3.AST]] = ast3.AST + + +def _normalize(lineend: str, value: str) -> str: + # To normalize, we strip any leading and trailing space from + # each line... + stripped: List[str] = [i.strip() for i in value.splitlines()] + normalized = lineend.join(stripped) + # ...and remove any blank lines at the beginning and end of + # the whole string + return normalized.strip() + + +def stringify_ast(node: Union[ast.AST, ast3.AST], depth: int = 0) -> Iterator[str]: + """Simple visitor generating strings to compare ASTs by content.""" + + node = fixup_ast_constants(node) + + yield f"{' ' * depth}{node.__class__.__name__}(" + + type_ignore_classes: Tuple[Type[Any], ...] + for field in sorted(node._fields): # noqa: F402 + # TypeIgnore will not be present using pypy < 3.8, so need for this + if not (_IS_PYPY and sys.version_info < (3, 8)): + # TypeIgnore has only one field 'lineno' which breaks this comparison + type_ignore_classes = (ast3.TypeIgnore,) + if sys.version_info >= (3, 8): + type_ignore_classes += (ast.TypeIgnore,) + if isinstance(node, type_ignore_classes): + break + + try: + value: object = getattr(node, field) + except AttributeError: + continue + + yield f"{' ' * (depth+1)}{field}=" + + if isinstance(value, list): + for item in value: + # Ignore nested tuples within del statements, because we may insert + # parentheses and they change the AST. + if ( + field == "targets" + and isinstance(node, (ast.Delete, ast3.Delete)) + and isinstance(item, (ast.Tuple, ast3.Tuple)) + ): + for elt in item.elts: + yield from stringify_ast(elt, depth + 2) + + elif isinstance(item, (ast.AST, ast3.AST)): + yield from stringify_ast(item, depth + 2) + + # Note that we are referencing the typed-ast ASTs via global variables and not + # direct module attribute accesses because that breaks mypyc. It's probably + # something to do with the ast3 variables being marked as Any leading + # mypy to think this branch is always taken, leaving the rest of the code + # unanalyzed. Tighting up the types for the typed-ast AST types avoids the + # mypyc crash. + elif isinstance(value, (ast.AST, ast3_AST)): + yield from stringify_ast(value, depth + 2) + + else: + normalized: object + # Constant strings may be indented across newlines, if they are + # docstrings; fold spaces after newlines when comparing. Similarly, + # trailing and leading space may be removed. + if ( + isinstance(node, ast.Constant) + and field == "value" + and isinstance(value, str) + ): + normalized = _normalize("\n", value) + else: + normalized = value + yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" + + yield f"{' ' * depth}) # /{node.__class__.__name__}" + + +def fixup_ast_constants(node: Union[ast.AST, ast3.AST]) -> Union[ast.AST, ast3.AST]: + """Map ast nodes deprecated in 3.8 to Constant.""" + if isinstance(node, (ast.Str, ast3.Str, ast.Bytes, ast3.Bytes)): + return ast.Constant(value=node.s) + + if isinstance(node, (ast.Num, ast3.Num)): + return ast.Constant(value=node.n) + + if isinstance(node, (ast.NameConstant, ast3.NameConstant)): + return ast.Constant(value=node.value) + + return node diff --git a/src/black/py.typed b/src/black/py.typed index 8b137891791..e69de29bb2d 100644 --- a/src/black/py.typed +++ b/src/black/py.typed @@ -1 +0,0 @@ - diff --git a/src/black/report.py b/src/black/report.py new file mode 100644 index 00000000000..a507671e4c0 --- /dev/null +++ b/src/black/report.py @@ -0,0 +1,106 @@ +""" +Summarize Black runs to users. +""" +from dataclasses import dataclass +from enum import Enum +from pathlib import Path + +from click import style + +from black.output import err, out + + +class Changed(Enum): + NO = 0 + CACHED = 1 + YES = 2 + + +class NothingChanged(UserWarning): + """Raised when reformatted code is the same as source.""" + + +@dataclass +class Report: + """Provides a reformatting counter. Can be rendered with `str(report)`.""" + + check: bool = False + diff: bool = False + quiet: bool = False + verbose: bool = False + change_count: int = 0 + same_count: int = 0 + failure_count: int = 0 + + def done(self, src: Path, changed: Changed) -> None: + """Increment the counter for successful reformatting. Write out a message.""" + if changed is Changed.YES: + reformatted = "would reformat" if self.check or self.diff else "reformatted" + if self.verbose or not self.quiet: + out(f"{reformatted} {src}") + self.change_count += 1 + else: + if self.verbose: + if changed is Changed.NO: + msg = f"{src} already well formatted, good job." + else: + msg = f"{src} wasn't modified on disk since last run." + out(msg, bold=False) + self.same_count += 1 + + def failed(self, src: Path, message: str) -> None: + """Increment the counter for failed reformatting. Write out a message.""" + err(f"error: cannot format {src}: {message}") + self.failure_count += 1 + + def path_ignored(self, path: Path, message: str) -> None: + if self.verbose: + out(f"{path} ignored: {message}", bold=False) + + @property + def return_code(self) -> int: + """Return the exit code that the app should use. + + This considers the current state of changed files and failures: + - if there were any failures, return 123; + - if any files were changed and --check is being used, return 1; + - otherwise return 0. + """ + # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with + # 126 we have special return codes reserved by the shell. + if self.failure_count: + return 123 + + elif self.change_count and self.check: + return 1 + + return 0 + + def __str__(self) -> str: + """Render a color report of the current state. + + Use `click.unstyle` to remove colors. + """ + if self.check or self.diff: + reformatted = "would be reformatted" + unchanged = "would be left unchanged" + failed = "would fail to reformat" + else: + reformatted = "reformatted" + unchanged = "left unchanged" + failed = "failed to reformat" + report = [] + if self.change_count: + s = "s" if self.change_count > 1 else "" + report.append( + style(f"{self.change_count} file{s} ", bold=True, fg="blue") + + style(f"{reformatted}", bold=True) + ) + + if self.same_count: + s = "s" if self.same_count > 1 else "" + report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged) + if self.failure_count: + s = "s" if self.failure_count > 1 else "" + report.append(style(f"{self.failure_count} file{s} {failed}", fg="red")) + return ", ".join(report) + "." diff --git a/src/black/rusty.py b/src/black/rusty.py new file mode 100644 index 00000000000..84a80b5a2c2 --- /dev/null +++ b/src/black/rusty.py @@ -0,0 +1,27 @@ +"""An error-handling model influenced by that used by the Rust programming language + +See https://doc.rust-lang.org/book/ch09-00-error-handling.html. +""" +from typing import Generic, TypeVar, Union + +T = TypeVar("T") +E = TypeVar("E", bound=Exception) + + +class Ok(Generic[T]): + def __init__(self, value: T) -> None: + self._value = value + + def ok(self) -> T: + return self._value + + +class Err(Generic[E]): + def __init__(self, e: E) -> None: + self._e = e + + def err(self) -> E: + return self._e + + +Result = Union[Ok[T], Err[E]] diff --git a/src/black/strings.py b/src/black/strings.py new file mode 100644 index 00000000000..9d0e2eb8430 --- /dev/null +++ b/src/black/strings.py @@ -0,0 +1,238 @@ +""" +Simple formatting on strings. Further string formatting code is in trans.py. +""" + +import re +import sys +from functools import lru_cache +from typing import List, Pattern + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + + +STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. +STRING_PREFIX_RE: Final = re.compile( + r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL +) +FIRST_NON_WHITESPACE_RE: Final = re.compile(r"\s*\t+\s*(\S)") + + +def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: + """Replace `regex` with `replacement` twice on `original`. + + This is used by string normalization to perform replaces on + overlapping matches. + """ + return regex.sub(replacement, regex.sub(replacement, original)) + + +def has_triple_quotes(string: str) -> bool: + """ + Returns: + True iff @string starts with three quotation characters. + """ + raw_string = string.lstrip(STRING_PREFIX_CHARS) + return raw_string[:3] in {'"""', "'''"} + + +def lines_with_leading_tabs_expanded(s: str) -> List[str]: + """ + Splits string into lines and expands only leading tabs (following the normal + Python rules) + """ + lines = [] + for line in s.splitlines(): + # Find the index of the first non-whitespace character after a string of + # whitespace that includes at least one tab + match = FIRST_NON_WHITESPACE_RE.match(line) + if match: + first_non_whitespace_idx = match.start(1) + + lines.append( + line[:first_non_whitespace_idx].expandtabs() + + line[first_non_whitespace_idx:] + ) + else: + lines.append(line) + return lines + + +def fix_docstring(docstring: str, prefix: str) -> str: + # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation + if not docstring: + return "" + lines = lines_with_leading_tabs_expanded(docstring) + # Determine minimum indentation (first line doesn't count): + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + last_line_idx = len(lines) - 2 + for i, line in enumerate(lines[1:]): + stripped_line = line[indent:].rstrip() + if stripped_line or i == last_line_idx: + trimmed.append(prefix + stripped_line) + else: + trimmed.append("") + return "\n".join(trimmed) + + +def get_string_prefix(string: str) -> str: + """ + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + @string's prefix (e.g. '', 'r', 'f', or 'rf'). + """ + assert_is_leaf_string(string) + + prefix = "" + prefix_idx = 0 + while string[prefix_idx] in STRING_PREFIX_CHARS: + prefix += string[prefix_idx] + prefix_idx += 1 + + return prefix + + +def assert_is_leaf_string(string: str) -> None: + """ + Checks the pre-condition that @string has the format that you would expect + of `leaf.value` where `leaf` is some Leaf such that `leaf.type == + token.STRING`. A more precise description of the pre-conditions that are + checked are listed below. + + Pre-conditions: + * @string starts with either ', ", ', or " where + `set()` is some subset of `set(STRING_PREFIX_CHARS)`. + * @string ends with a quote character (' or "). + + Raises: + AssertionError(...) if the pre-conditions listed above are not + satisfied. + """ + dquote_idx = string.find('"') + squote_idx = string.find("'") + if -1 in [dquote_idx, squote_idx]: + quote_idx = max(dquote_idx, squote_idx) + else: + quote_idx = min(squote_idx, dquote_idx) + + assert ( + 0 <= quote_idx < len(string) - 1 + ), f"{string!r} is missing a starting quote character (' or \")." + assert string[-1] in ( + "'", + '"', + ), f"{string!r} is missing an ending quote character (' or \")." + assert set(string[:quote_idx]).issubset( + set(STRING_PREFIX_CHARS) + ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." + + +def normalize_string_prefix(s: str) -> str: + """Make all string prefixes lowercase.""" + match = STRING_PREFIX_RE.match(s) + assert match is not None, f"failed to match string {s!r}" + orig_prefix = match.group(1) + new_prefix = ( + orig_prefix.replace("F", "f") + .replace("B", "b") + .replace("U", "") + .replace("u", "") + ) + + # Python syntax guarantees max 2 prefixes and that one of them is "r" + if len(new_prefix) == 2 and "r" != new_prefix[0].lower(): + new_prefix = new_prefix[::-1] + return f"{new_prefix}{match.group(2)}" + + +# Re(gex) does actually cache patterns internally but this still improves +# performance on a long list literal of strings by 5-9% since lru_cache's +# caching overhead is much lower. +@lru_cache(maxsize=64) +def _cached_compile(pattern: str) -> Pattern[str]: + return re.compile(pattern) + + +def normalize_string_quotes(s: str) -> str: + """Prefer double quotes but only if it doesn't cause more escaping. + + Adds or removes backslashes as appropriate. Doesn't parse and fix + strings nested in f-strings. + """ + value = s.lstrip(STRING_PREFIX_CHARS) + if value[:3] == '"""': + return s + + elif value[:3] == "'''": + orig_quote = "'''" + new_quote = '"""' + elif value[0] == '"': + orig_quote = '"' + new_quote = "'" + else: + orig_quote = "'" + new_quote = '"' + first_quote_pos = s.find(orig_quote) + if first_quote_pos == -1: + return s # There's an internal error + + prefix = s[:first_quote_pos] + unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}") + escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") + escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") + body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] + if "r" in prefix.casefold(): + if unescaped_new_quote.search(body): + # There's at least one unescaped new_quote in this raw string + # so converting is impossible + return s + + # Do not introduce or remove backslashes in raw strings + new_body = body + else: + # remove unnecessary escapes + new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) + if body != new_body: + # Consider the string without unnecessary escapes as the original + body = new_body + s = f"{prefix}{orig_quote}{body}{orig_quote}" + new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) + new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) + if "f" in prefix.casefold(): + matches = re.findall( + r""" + (?:(? orig_escape_count: + return s # Do not introduce more escaping + + if new_escape_count == orig_escape_count and orig_quote == '"': + return s # Prefer double quotes + + return f"{prefix}{new_quote}{new_body}{new_quote}" diff --git a/src/black/trans.py b/src/black/trans.py new file mode 100644 index 00000000000..7e2d8e67c1a --- /dev/null +++ b/src/black/trans.py @@ -0,0 +1,2222 @@ +""" +String transformers that can split and merge strings. +""" +import re +import sys +from abc import ABC, abstractmethod +from collections import defaultdict +from dataclasses import dataclass +from typing import ( + Any, + Callable, + ClassVar, + Collection, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + TypeVar, + Union, +) + +if sys.version_info < (3, 8): + from typing_extensions import Final, Literal +else: + from typing import Literal, Final + +from mypy_extensions import trait + +from black.brackets import BracketMatchError +from black.comments import contains_pragma_comment +from black.lines import Line, append_leaves +from black.mode import Feature +from black.nodes import ( + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + is_empty_lpar, + is_empty_par, + is_empty_rpar, + parent_type, + replace_child, + syms, +) +from black.rusty import Err, Ok, Result +from black.strings import ( + assert_is_leaf_string, + get_string_prefix, + has_triple_quotes, + normalize_string_quotes, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + + +class CannotTransform(Exception): + """Base class for errors raised by Transformers.""" + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]] +Index = int +NodeType = int +ParserState = int +StringID = int +TResult = Result[T, CannotTransform] # (T)ransform Result +TMatchResult = TResult[Index] + + +def TErr(err_msg: str) -> Err[CannotTransform]: + """(T)ransform Err + + Convenience function used when working with the TResult type. + """ + cant_transform = CannotTransform(err_msg) + return Err(cant_transform) + + +def hug_power_op(line: Line, features: Collection[Feature]) -> Iterator[Line]: + """A transformer which normalizes spacing around power operators.""" + + # Performance optimization to avoid unnecessary Leaf clones and other ops. + for leaf in line.leaves: + if leaf.type == token.DOUBLESTAR: + break + else: + raise CannotTransform("No doublestar token was found in the line.") + + def is_simple_lookup(index: int, step: Literal[1, -1]) -> bool: + # Brackets and parentheses indicate calls, subscripts, etc. ... + # basically stuff that doesn't count as "simple". Only a NAME lookup + # or dotted lookup (eg. NAME.NAME) is OK. + if step == -1: + disallowed = {token.RPAR, token.RSQB} + else: + disallowed = {token.LPAR, token.LSQB} + + while 0 <= index < len(line.leaves): + current = line.leaves[index] + if current.type in disallowed: + return False + if current.type not in {token.NAME, token.DOT} or current.value == "for": + # If the current token isn't disallowed, we'll assume this is simple as + # only the disallowed tokens are semantically attached to this lookup + # expression we're checking. Also, stop early if we hit the 'for' bit + # of a comprehension. + return True + + index += step + + return True + + def is_simple_operand(index: int, kind: Literal["base", "exponent"]) -> bool: + # An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple + # lookup (see above), with or without a preceding unary operator. + start = line.leaves[index] + if start.type in {token.NAME, token.NUMBER}: + return is_simple_lookup(index, step=(1 if kind == "exponent" else -1)) + + if start.type in {token.PLUS, token.MINUS, token.TILDE}: + if line.leaves[index + 1].type in {token.NAME, token.NUMBER}: + # step is always one as bases with a preceding unary op will be checked + # for simplicity starting from the next token (so it'll hit the check + # above). + return is_simple_lookup(index + 1, step=1) + + return False + + new_line = line.clone() + should_hug = False + for idx, leaf in enumerate(line.leaves): + new_leaf = leaf.clone() + if should_hug: + new_leaf.prefix = "" + should_hug = False + + should_hug = ( + (0 < idx < len(line.leaves) - 1) + and leaf.type == token.DOUBLESTAR + and is_simple_operand(idx - 1, kind="base") + and line.leaves[idx - 1].value != "lambda" + and is_simple_operand(idx + 1, kind="exponent") + ) + if should_hug: + new_leaf.prefix = "" + + # We have to be careful to make a new line properly: + # - bracket related metadata must be maintained (handled by Line.append) + # - comments need to copied over, updating the leaf IDs they're attached to + new_line.append(new_leaf, preformatted=True) + for comment_leaf in line.comments_after(leaf): + new_line.append(comment_leaf, preformatted=True) + + yield new_line + + +class StringTransformer(ABC): + """ + An implementation of the Transformer protocol that relies on its + subclasses overriding the template methods `do_match(...)` and + `do_transform(...)`. + + This Transformer works exclusively on strings (for example, by merging + or splitting them). + + The following sections can be found among the docstrings of each concrete + StringTransformer subclass. + + Requirements: + Which requirements must be met of the given Line for this + StringTransformer to be applied? + + Transformations: + If the given Line meets all of the above requirements, which string + transformations can you expect to be applied to it by this + StringTransformer? + + Collaborations: + What contractual agreements does this StringTransformer have with other + StringTransfomers? Such collaborations should be eliminated/minimized + as much as possible. + """ + + __name__: Final = "StringTransformer" + + # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with + # `abc.ABC`. + def __init__(self, line_length: int, normalize_strings: bool) -> None: + self.line_length = line_length + self.normalize_strings = normalize_strings + + @abstractmethod + def do_match(self, line: Line) -> TMatchResult: + """ + Returns: + * Ok(string_idx) such that `line.leaves[string_idx]` is our target + string, if a match was able to be made. + OR + * Err(CannotTransform), if a match was not able to be made. + """ + + @abstractmethod + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + """ + Yields: + * Ok(new_line) where new_line is the new transformed line. + OR + * Err(CannotTransform) if the transformation failed for some reason. The + `do_match(...)` template method should usually be used to reject + the form of the given Line, but in some cases it is difficult to + know whether or not a Line meets the StringTransformer's + requirements until the transformation is already midway. + + Side Effects: + This method should NOT mutate @line directly, but it MAY mutate the + Line's underlying Node structure. (WARNING: If the underlying Node + structure IS altered, then this method should NOT be allowed to + yield an CannotTransform after that point.) + """ + + def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]: + """ + StringTransformer instances have a call signature that mirrors that of + the Transformer type. + + Raises: + CannotTransform(...) if the concrete StringTransformer class is unable + to transform @line. + """ + # Optimization to avoid calling `self.do_match(...)` when the line does + # not contain any string. + if not any(leaf.type == token.STRING for leaf in line.leaves): + raise CannotTransform("There are no strings in this line.") + + match_result = self.do_match(line) + + if isinstance(match_result, Err): + cant_transform = match_result.err() + raise CannotTransform( + f"The string transformer {self.__class__.__name__} does not recognize" + " this line as one that it can transform." + ) from cant_transform + + string_idx = match_result.ok() + + for line_result in self.do_transform(line, string_idx): + if isinstance(line_result, Err): + cant_transform = line_result.err() + raise CannotTransform( + "StringTransformer failed while attempting to transform string." + ) from cant_transform + line = line_result.ok() + yield line + + +@dataclass +class CustomSplit: + """A custom (i.e. manual) string split. + + A single CustomSplit instance represents a single substring. + + Examples: + Consider the following string: + ``` + "Hi there friend." + " This is a custom" + f" string {split}." + ``` + + This string will correspond to the following three CustomSplit instances: + ``` + CustomSplit(False, 16) + CustomSplit(False, 17) + CustomSplit(True, 16) + ``` + """ + + has_prefix: bool + break_idx: int + + +@trait +class CustomSplitMapMixin: + """ + This mixin class is used to map merged strings to a sequence of + CustomSplits, which will then be used to re-split the strings iff none of + the resultant substrings go over the configured max line length. + """ + + _Key: ClassVar = Tuple[StringID, str] + _CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict( + tuple + ) + + @staticmethod + def _get_key(string: str) -> "CustomSplitMapMixin._Key": + """ + Returns: + A unique identifier that is used internally to map @string to a + group of custom splits. + """ + return (id(string), string) + + def add_custom_splits( + self, string: str, custom_splits: Iterable[CustomSplit] + ) -> None: + """Custom Split Map Setter Method + + Side Effects: + Adds a mapping from @string to the custom splits @custom_splits. + """ + key = self._get_key(string) + self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) + + def pop_custom_splits(self, string: str) -> List[CustomSplit]: + """Custom Split Map Getter Method + + Returns: + * A list of the custom splits that are mapped to @string, if any + exist. + OR + * [], otherwise. + + Side Effects: + Deletes the mapping between @string and its associated custom + splits (which are returned to the caller). + """ + key = self._get_key(string) + + custom_splits = self._CUSTOM_SPLIT_MAP[key] + del self._CUSTOM_SPLIT_MAP[key] + + return list(custom_splits) + + def has_custom_splits(self, string: str) -> bool: + """ + Returns: + True iff @string is associated with a set of custom splits. + """ + key = self._get_key(string) + return key in self._CUSTOM_SPLIT_MAP + + +class StringMerger(StringTransformer, CustomSplitMapMixin): + """StringTransformer that merges strings together. + + Requirements: + (A) The line contains adjacent strings such that ALL of the validation checks + listed in StringMerger.__validate_msg(...)'s docstring pass. + OR + (B) The line contains a string which uses line continuation backslashes. + + Transformations: + Depending on which of the two requirements above where met, either: + + (A) The string group associated with the target string is merged. + OR + (B) All line-continuation backslashes are removed from the target string. + + Collaborations: + StringMerger provides custom split information to StringSplitter. + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + if ( + leaf.type == token.STRING + and is_valid_index(i + 1) + and LL[i + 1].type == token.STRING + ): + return Ok(i) + + if leaf.type == token.STRING and "\\\n" in leaf.value: + return Ok(i) + + return TErr("This line has no strings that need merging.") + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + new_line = line + rblc_result = self._remove_backslash_line_continuation_chars( + new_line, string_idx + ) + if isinstance(rblc_result, Ok): + new_line = rblc_result.ok() + + msg_result = self._merge_string_group(new_line, string_idx) + if isinstance(msg_result, Ok): + new_line = msg_result.ok() + + if isinstance(rblc_result, Err) and isinstance(msg_result, Err): + msg_cant_transform = msg_result.err() + rblc_cant_transform = rblc_result.err() + cant_transform = CannotTransform( + "StringMerger failed to merge any strings in this line." + ) + + # Chain the errors together using `__cause__`. + msg_cant_transform.__cause__ = rblc_cant_transform + cant_transform.__cause__ = msg_cant_transform + + yield Err(cant_transform) + else: + yield Ok(new_line) + + @staticmethod + def _remove_backslash_line_continuation_chars( + line: Line, string_idx: int + ) -> TResult[Line]: + """ + Merge strings that were split across multiple lines using + line-continuation backslashes. + + Returns: + Ok(new_line), if @line contains backslash line-continuation + characters. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + if not ( + string_leaf.type == token.STRING + and "\\\n" in string_leaf.value + and not has_triple_quotes(string_leaf.value) + ): + return TErr( + f"String leaf {string_leaf} does not contain any backslash line" + " continuation characters." + ) + + new_line = line.clone() + new_line.comments = line.comments.copy() + append_leaves(new_line, line, LL) + + new_string_leaf = new_line.leaves[string_idx] + new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") + + return Ok(new_line) + + def _merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]: + """ + Merges string group (i.e. set of adjacent strings) where the first + string in the group is `line.leaves[string_idx]`. + + Returns: + Ok(new_line), if ALL of the validation checks found in + __validate_msg(...) pass. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + vresult = self._validate_msg(line, string_idx) + if isinstance(vresult, Err): + return vresult + + # If the string group is wrapped inside an Atom node, we must make sure + # to later replace that Atom with our new (merged) string leaf. + atom_node = LL[string_idx].parent + + # We will place BREAK_MARK in between every two substrings that we + # merge. We will then later go through our final result and use the + # various instances of BREAK_MARK we find to add the right values to + # the custom split map. + BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" + + QUOTE = LL[string_idx].value[-1] + + def make_naked(string: str, string_prefix: str) -> str: + """Strip @string (i.e. make it a "naked" string) + + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + A string that is identical to @string except that + @string_prefix has been stripped, the surrounding QUOTE + characters have been removed, and any remaining QUOTE + characters have been escaped. + """ + assert_is_leaf_string(string) + + RE_EVEN_BACKSLASHES = r"(?:(?= 0 + ), "Logic error while filling the custom string breakpoint cache." + + temp_string = temp_string[mark_idx + len(BREAK_MARK) :] + breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 + custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) + + string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) + + if atom_node is not None: + # If not all children of the atom node are merged (this can happen + # when there is a standalone comment in the middle) ... + if non_string_idx - string_idx < len(atom_node.children): + # We need to replace the old STRING leaves with the new string leaf. + first_child_idx = LL[string_idx].remove() + for idx in range(string_idx + 1, non_string_idx): + LL[idx].remove() + if first_child_idx is not None: + atom_node.insert_child(first_child_idx, string_leaf) + else: + # Else replace the atom node with the new string leaf. + replace_child(atom_node, string_leaf) + + # Build the final line ('new_line') that this method will later return. + new_line = line.clone() + for i, leaf in enumerate(LL): + if i == string_idx: + new_line.append(string_leaf) + + if string_idx <= i < string_idx + num_of_strings: + for comment_leaf in line.comments_after(LL[i]): + new_line.append(comment_leaf, preformatted=True) + continue + + append_leaves(new_line, line, [leaf]) + + self.add_custom_splits(string_leaf.value, custom_splits) + return Ok(new_line) + + @staticmethod + def _validate_msg(line: Line, string_idx: int) -> TResult[None]: + """Validate (M)erge (S)tring (G)roup + + Transform-time string validation logic for __merge_string_group(...). + + Returns: + * Ok(None), if ALL validation checks (listed below) pass. + OR + * Err(CannotTransform), if any of the following are true: + - The target string group does not contain ANY stand-alone comments. + - The target string is not in a string group (i.e. it has no + adjacent strings). + - The string group has more than one inline comment. + - The string group has an inline comment that appears to be a pragma. + - The set of all string prefixes in the string group is of + length greater than one and is not equal to {"", "f"}. + - The string group consists of raw strings. + """ + # We first check for "inner" stand-alone comments (i.e. stand-alone + # comments that have a string leaf before them AND after them). + for inc in [1, -1]: + i = string_idx + found_sa_comment = False + is_valid_index = is_valid_index_factory(line.leaves) + while is_valid_index(i) and line.leaves[i].type in [ + token.STRING, + STANDALONE_COMMENT, + ]: + if line.leaves[i].type == STANDALONE_COMMENT: + found_sa_comment = True + elif found_sa_comment: + return TErr( + "StringMerger does NOT merge string groups which contain " + "stand-alone comments." + ) + + i += inc + + num_of_inline_string_comments = 0 + set_of_prefixes = set() + num_of_strings = 0 + for leaf in line.leaves[string_idx:]: + if leaf.type != token.STRING: + # If the string group is trailed by a comma, we count the + # comments trailing the comma to be one of the string group's + # comments. + if leaf.type == token.COMMA and id(leaf) in line.comments: + num_of_inline_string_comments += 1 + break + + if has_triple_quotes(leaf.value): + return TErr("StringMerger does NOT merge multiline strings.") + + num_of_strings += 1 + prefix = get_string_prefix(leaf.value).lower() + if "r" in prefix: + return TErr("StringMerger does NOT merge raw strings.") + + set_of_prefixes.add(prefix) + + if id(leaf) in line.comments: + num_of_inline_string_comments += 1 + if contains_pragma_comment(line.comments[id(leaf)]): + return TErr("Cannot merge strings which have pragma comments.") + + if num_of_strings < 2: + return TErr( + f"Not enough strings to merge (num_of_strings={num_of_strings})." + ) + + if num_of_inline_string_comments > 1: + return TErr( + f"Too many inline string comments ({num_of_inline_string_comments})." + ) + + if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: + return TErr(f"Too many different prefixes ({set_of_prefixes}).") + + return Ok(None) + + +class StringParenStripper(StringTransformer): + """StringTransformer that strips surrounding parentheses from strings. + + Requirements: + The line contains a string which is surrounded by parentheses and: + - The target string is NOT the only argument to a function call. + - The target string is NOT a "pointless" string. + - If the target string contains a PERCENT, the brackets are not + preceded or followed by an operator with higher precedence than + PERCENT. + + Transformations: + The parentheses mentioned in the 'Requirements' section are stripped. + + Collaborations: + StringParenStripper has its own inherent usefulness, but it is also + relied on to clean up the parentheses created by StringParenWrapper (in + the event that they are no longer needed). + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + for idx, leaf in enumerate(LL): + # Should be a string... + if leaf.type != token.STRING: + continue + + # If this is a "pointless" string... + if ( + leaf.parent + and leaf.parent.parent + and leaf.parent.parent.type == syms.simple_stmt + ): + continue + + # Should be preceded by a non-empty LPAR... + if ( + not is_valid_index(idx - 1) + or LL[idx - 1].type != token.LPAR + or is_empty_lpar(LL[idx - 1]) + ): + continue + + # That LPAR should NOT be preceded by a function name or a closing + # bracket (which could be a function which returns a function or a + # list/dictionary that contains a function)... + if is_valid_index(idx - 2) and ( + LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS + ): + continue + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + next_idx = string_parser.parse(LL, string_idx) + + # if the leaves in the parsed string include a PERCENT, we need to + # make sure the initial LPAR is NOT preceded by an operator with + # higher or equal precedence to PERCENT + if is_valid_index(idx - 2): + # mypy can't quite follow unless we name this + before_lpar = LL[idx - 2] + if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( + ( + before_lpar.type + in { + token.STAR, + token.AT, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.TILDE, + token.DOUBLESTAR, + token.AWAIT, + token.LSQB, + token.LPAR, + } + ) + or ( + # only unary PLUS/MINUS + before_lpar.parent + and before_lpar.parent.type == syms.factor + and (before_lpar.type in {token.PLUS, token.MINUS}) + ) + ): + continue + + # Should be followed by a non-empty RPAR... + if ( + is_valid_index(next_idx) + and LL[next_idx].type == token.RPAR + and not is_empty_rpar(LL[next_idx]) + ): + # That RPAR should NOT be followed by anything with higher + # precedence than PERCENT + if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { + token.DOUBLESTAR, + token.LSQB, + token.LPAR, + token.DOT, + }: + continue + + return Ok(string_idx) + + return TErr("This line has no strings wrapped in parens.") + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + string_parser = StringParser() + rpar_idx = string_parser.parse(LL, string_idx) + + for leaf in (LL[string_idx - 1], LL[rpar_idx]): + if line.comments_after(leaf): + yield TErr( + "Will not strip parentheses which have comments attached to them." + ) + return + + new_line = line.clone() + new_line.comments = line.comments.copy() + try: + append_leaves(new_line, line, LL[: string_idx - 1]) + except BracketMatchError: + # HACK: I believe there is currently a bug somewhere in + # right_hand_split() that is causing brackets to not be tracked + # properly by a shared BracketTracker. + append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True) + + string_leaf = Leaf(token.STRING, LL[string_idx].value) + LL[string_idx - 1].remove() + replace_child(LL[string_idx], string_leaf) + new_line.append(string_leaf) + + append_leaves( + new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :] + ) + + LL[rpar_idx].remove() + + yield Ok(new_line) + + +class BaseStringSplitter(StringTransformer): + """ + Abstract class for StringTransformers which transform a Line's strings by splitting + them or placing them on their own lines where necessary to avoid going over + the configured line length. + + Requirements: + * The target string value is responsible for the line going over the + line length limit. It follows that after all of black's other line + split methods have been exhausted, this line (or one of the resulting + lines after all line splits are performed) would still be over the + line_length limit unless we split this string. + AND + * The target string is NOT a "pointless" string (i.e. a string that has + no parent or siblings). + AND + * The target string is not followed by an inline comment that appears + to be a pragma. + AND + * The target string is not a multiline (i.e. triple-quote) string. + """ + + STRING_OPERATORS: Final = [ + token.EQEQUAL, + token.GREATER, + token.GREATEREQUAL, + token.LESS, + token.LESSEQUAL, + token.NOTEQUAL, + token.PERCENT, + token.PLUS, + token.STAR, + ] + + @abstractmethod + def do_splitter_match(self, line: Line) -> TMatchResult: + """ + BaseStringSplitter asks its clients to override this method instead of + `StringTransformer.do_match(...)`. + + Follows the same protocol as `StringTransformer.do_match(...)`. + + Refer to `help(StringTransformer.do_match)` for more information. + """ + + def do_match(self, line: Line) -> TMatchResult: + match_result = self.do_splitter_match(line) + if isinstance(match_result, Err): + return match_result + + string_idx = match_result.ok() + vresult = self._validate(line, string_idx) + if isinstance(vresult, Err): + return vresult + + return match_result + + def _validate(self, line: Line, string_idx: int) -> TResult[None]: + """ + Checks that @line meets all of the requirements listed in this classes' + docstring. Refer to `help(BaseStringSplitter)` for a detailed + description of those requirements. + + Returns: + * Ok(None), if ALL of the requirements are met. + OR + * Err(CannotTransform), if ANY of the requirements are NOT met. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + + max_string_length = self._get_max_string_length(line, string_idx) + if len(string_leaf.value) <= max_string_length: + return TErr( + "The string itself is not what is causing this line to be too long." + ) + + if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ + token.STRING, + token.NEWLINE, + ]: + return TErr( + f"This string ({string_leaf.value}) appears to be pointless (i.e. has" + " no parent)." + ) + + if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( + line.comments[id(line.leaves[string_idx])] + ): + return TErr( + "Line appears to end with an inline pragma comment. Splitting the line" + " could modify the pragma's behavior." + ) + + if has_triple_quotes(string_leaf.value): + return TErr("We cannot split multiline strings.") + + return Ok(None) + + def _get_max_string_length(self, line: Line, string_idx: int) -> int: + """ + Calculates the max string length used when attempting to determine + whether or not the target string is responsible for causing the line to + go over the line length limit. + + WARNING: This method is tightly coupled to both StringSplitter and + (especially) StringParenWrapper. There is probably a better way to + accomplish what is being done here. + + Returns: + max_string_length: such that `line.leaves[string_idx].value > + max_string_length` implies that the target string IS responsible + for causing this line to exceed the line length limit. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # We use the shorthand "WMA4" in comments to abbreviate "We must + # account for". When giving examples, we use STRING to mean some/any + # valid string. + # + # Finally, we use the following convenience variables: + # + # P: The leaf that is before the target string leaf. + # N: The leaf that is after the target string leaf. + # NN: The leaf that is after N. + + # WMA4 the whitespace at the beginning of the line. + offset = line.depth * 4 + + if is_valid_index(string_idx - 1): + p_idx = string_idx - 1 + if ( + LL[string_idx - 1].type == token.LPAR + and LL[string_idx - 1].value == "" + and string_idx >= 2 + ): + # If the previous leaf is an empty LPAR placeholder, we should skip it. + p_idx -= 1 + + P = LL[p_idx] + if P.type in self.STRING_OPERATORS: + # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). + offset += len(str(P)) + 1 + + if P.type == token.COMMA: + # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. + offset += 3 + + if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: + # This conditional branch is meant to handle dictionary keys, + # variable assignments, 'return STRING' statement lines, and + # 'else STRING' ternary expression lines. + + # WMA4 a single space. + offset += 1 + + # WMA4 the lengths of any leaves that came before that space, + # but after any closing bracket before that space. + for leaf in reversed(LL[: p_idx + 1]): + offset += len(str(leaf)) + if leaf.type in CLOSING_BRACKETS: + break + + if is_valid_index(string_idx + 1): + N = LL[string_idx + 1] + if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: + # If the next leaf is an empty RPAR placeholder, we should skip it. + N = LL[string_idx + 2] + + if N.type == token.COMMA: + # WMA4 a single comma at the end of the string (e.g `STRING,`). + offset += 1 + + if is_valid_index(string_idx + 2): + NN = LL[string_idx + 2] + + if N.type == token.DOT and NN.type == token.NAME: + # This conditional branch is meant to handle method calls invoked + # off of a string literal up to and including the LPAR character. + + # WMA4 the '.' character. + offset += 1 + + if ( + is_valid_index(string_idx + 3) + and LL[string_idx + 3].type == token.LPAR + ): + # WMA4 the left parenthesis character. + offset += 1 + + # WMA4 the length of the method's name. + offset += len(NN.value) + + has_comments = False + for comment_leaf in line.comments_after(LL[string_idx]): + if not has_comments: + has_comments = True + # WMA4 two spaces before the '#' character. + offset += 2 + + # WMA4 the length of the inline comment. + offset += len(comment_leaf.value) + + max_string_length = self.line_length - offset + return max_string_length + + @staticmethod + def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the "prefer paren wrap" statement + requirements listed in the 'Requirements' section of the StringParenWrapper + class's docstring. + OR + None, otherwise. + """ + # The line must start with a string. + if LL[0].type != token.STRING: + return None + + matching_nodes = [ + syms.listmaker, + syms.dictsetmaker, + syms.testlist_gexp, + ] + # If the string is an immediate child of a list/set/tuple literal... + if ( + parent_type(LL[0]) in matching_nodes + or parent_type(LL[0].parent) in matching_nodes + ): + # And the string is surrounded by commas (or is the first/last child)... + prev_sibling = LL[0].prev_sibling + next_sibling = LL[0].next_sibling + if ( + not prev_sibling + and not next_sibling + and parent_type(LL[0]) == syms.atom + ): + # If it's an atom string, we need to check the parent atom's siblings. + parent = LL[0].parent + assert parent is not None # For type checkers. + prev_sibling = parent.prev_sibling + next_sibling = parent.next_sibling + if (not prev_sibling or prev_sibling.type == token.COMMA) and ( + not next_sibling or next_sibling.type == token.COMMA + ): + return 0 + + return None + + +def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]: + """ + Yields spans corresponding to expressions in a given f-string. + Spans are half-open ranges (left inclusive, right exclusive). + Assumes the input string is a valid f-string, but will not crash if the input + string is invalid. + """ + stack: List[int] = [] # our curly paren stack + i = 0 + while i < len(s): + if s[i] == "{": + # if we're in a string part of the f-string, ignore escaped curly braces + if not stack and i + 1 < len(s) and s[i + 1] == "{": + i += 2 + continue + stack.append(i) + i += 1 + continue + + if s[i] == "}": + if not stack: + i += 1 + continue + j = stack.pop() + # we've made it back out of the expression! yield the span + if not stack: + yield (j, i + 1) + i += 1 + continue + + # if we're in an expression part of the f-string, fast forward through strings + # note that backslashes are not legal in the expression portion of f-strings + if stack: + delim = None + if s[i : i + 3] in ("'''", '"""'): + delim = s[i : i + 3] + elif s[i] in ("'", '"'): + delim = s[i] + if delim: + i += len(delim) + while i < len(s) and s[i : i + len(delim)] != delim: + i += 1 + i += len(delim) + continue + i += 1 + + +def fstring_contains_expr(s: str) -> bool: + return any(iter_fexpr_spans(s)) + + +class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that splits "atom" strings (i.e. strings which exist on + lines by themselves). + + Requirements: + * The line consists ONLY of a single string (possibly prefixed by a + string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE + a trailing comma. + AND + * All of the requirements listed in BaseStringSplitter's docstring. + + Transformations: + The string mentioned in the 'Requirements' section is split into as + many substrings as necessary to adhere to the configured line length. + + In the final set of substrings, no substring should be smaller than + MIN_SUBSTR_SIZE characters. + + The string will ONLY be split on spaces (i.e. each new substring should + start with a space). Note that the string will NOT be split on a space + which is escaped with a backslash. + + If the string is an f-string, it will NOT be split in the middle of an + f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x + else bar()} is an f-expression). + + If the string that is being split has an associated set of custom split + records and those custom splits will NOT result in any line going over + the configured line length, those custom splits are used. Otherwise the + string is split as late as possible (from left-to-right) while still + adhering to the transformation rules listed above. + + Collaborations: + StringSplitter relies on StringMerger to construct the appropriate + CustomSplit objects and add them to the custom split map. + """ + + MIN_SUBSTR_SIZE: Final = 6 + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if self._prefer_paren_wrap_match(LL) is not None: + return TErr("Line needs to be wrapped in parens first.") + + is_valid_index = is_valid_index_factory(LL) + + idx = 0 + + # The first two leaves MAY be the 'not in' keywords... + if ( + is_valid_index(idx) + and is_valid_index(idx + 1) + and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] + and str(LL[idx]) + str(LL[idx + 1]) == "not in" + ): + idx += 2 + # Else the first leaf MAY be a string operator symbol or the 'in' keyword... + elif is_valid_index(idx) and ( + LL[idx].type in self.STRING_OPERATORS + or LL[idx].type == token.NAME + and str(LL[idx]) == "in" + ): + idx += 1 + + # The next/first leaf MAY be an empty LPAR... + if is_valid_index(idx) and is_empty_lpar(LL[idx]): + idx += 1 + + # The next/first leaf MUST be a string... + if not is_valid_index(idx) or LL[idx].type != token.STRING: + return TErr("Line does not start with a string.") + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by an empty RPAR... + if is_valid_index(idx) and is_empty_rpar(LL[idx]): + idx += 1 + + # That string / empty RPAR leaf MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if is_valid_index(idx): + return TErr("This line does not end with a string.") + + return Ok(string_idx) + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + QUOTE = LL[string_idx].value[-1] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + prefix = get_string_prefix(LL[string_idx].value).lower() + + # We MAY choose to drop the 'f' prefix from substrings that don't + # contain any f-expressions, but ONLY if the original f-string + # contains at least one f-expression. Otherwise, we will alter the AST + # of the program. + drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( + LL[string_idx].value + ) + + first_string_line = True + + string_op_leaves = self._get_string_operator_leaves(LL) + string_op_leaves_length = ( + sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1 + if string_op_leaves + else 0 + ) + + def maybe_append_string_operators(new_line: Line) -> None: + """ + Side Effects: + If @line starts with a string operator and this is the first + line we are constructing, this function appends the string + operator to @new_line and replaces the old string operator leaf + in the node structure. Otherwise this function does nothing. + """ + maybe_prefix_leaves = string_op_leaves if first_string_line else [] + for i, prefix_leaf in enumerate(maybe_prefix_leaves): + replace_child(LL[i], prefix_leaf) + new_line.append(prefix_leaf) + + ends_with_comma = ( + is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA + ) + + def max_last_string() -> int: + """ + Returns: + The max allowed length of the string value used for the last + line we will construct. + """ + result = self.line_length + result -= line.depth * 4 + result -= 1 if ends_with_comma else 0 + result -= string_op_leaves_length + return result + + # --- Calculate Max Break Index (for string value) + # We start with the line length limit + max_break_idx = self.line_length + # The last index of a string of length N is N-1. + max_break_idx -= 1 + # Leading whitespace is not present in the string value (e.g. Leaf.value). + max_break_idx -= line.depth * 4 + if max_break_idx < 0: + yield TErr( + f"Unable to split {LL[string_idx].value} at such high of a line depth:" + f" {line.depth}" + ) + return + + # Check if StringMerger registered any custom splits. + custom_splits = self.pop_custom_splits(LL[string_idx].value) + # We use them ONLY if none of them would produce lines that exceed the + # line limit. + use_custom_breakpoints = bool( + custom_splits + and all(csplit.break_idx <= max_break_idx for csplit in custom_splits) + ) + + # Temporary storage for the remaining chunk of the string line that + # can't fit onto the line currently being constructed. + rest_value = LL[string_idx].value + + def more_splits_should_be_made() -> bool: + """ + Returns: + True iff `rest_value` (the remaining string value from the last + split), should be split again. + """ + if use_custom_breakpoints: + return len(custom_splits) > 1 + else: + return len(rest_value) > max_last_string() + + string_line_results: List[Ok[Line]] = [] + while more_splits_should_be_made(): + if use_custom_breakpoints: + # Custom User Split (manual) + csplit = custom_splits.pop(0) + break_idx = csplit.break_idx + else: + # Algorithmic Split (automatic) + max_bidx = max_break_idx - string_op_leaves_length + maybe_break_idx = self._get_break_idx(rest_value, max_bidx) + if maybe_break_idx is None: + # If we are unable to algorithmically determine a good split + # and this string has custom splits registered to it, we + # fall back to using them--which means we have to start + # over from the beginning. + if custom_splits: + rest_value = LL[string_idx].value + string_line_results = [] + first_string_line = True + use_custom_breakpoints = True + continue + + # Otherwise, we stop splitting here. + break + + break_idx = maybe_break_idx + + # --- Construct `next_value` + next_value = rest_value[:break_idx] + QUOTE + + # HACK: The following 'if' statement is a hack to fix the custom + # breakpoint index in the case of either: (a) substrings that were + # f-strings but will have the 'f' prefix removed OR (b) substrings + # that were not f-strings but will now become f-strings because of + # redundant use of the 'f' prefix (i.e. none of the substrings + # contain f-expressions but one or more of them had the 'f' prefix + # anyway; in which case, we will prepend 'f' to _all_ substrings). + # + # There is probably a better way to accomplish what is being done + # here... + # + # If this substring is an f-string, we _could_ remove the 'f' + # prefix, and the current custom split did NOT originally use a + # prefix... + if ( + next_value != self._normalize_f_string(next_value, prefix) + and use_custom_breakpoints + and not csplit.has_prefix + ): + # Then `csplit.break_idx` will be off by one after removing + # the 'f' prefix. + break_idx += 1 + next_value = rest_value[:break_idx] + QUOTE + + if drop_pointless_f_prefix: + next_value = self._normalize_f_string(next_value, prefix) + + # --- Construct `next_leaf` + next_leaf = Leaf(token.STRING, next_value) + insert_str_child(next_leaf) + self._maybe_normalize_string_quotes(next_leaf) + + # --- Construct `next_line` + next_line = line.clone() + maybe_append_string_operators(next_line) + next_line.append(next_leaf) + string_line_results.append(Ok(next_line)) + + rest_value = prefix + QUOTE + rest_value[break_idx:] + first_string_line = False + + yield from string_line_results + + if drop_pointless_f_prefix: + rest_value = self._normalize_f_string(rest_value, prefix) + + rest_leaf = Leaf(token.STRING, rest_value) + insert_str_child(rest_leaf) + + # NOTE: I could not find a test case that verifies that the following + # line is actually necessary, but it seems to be. Otherwise we risk + # not normalizing the last substring, right? + self._maybe_normalize_string_quotes(rest_leaf) + + last_line = line.clone() + maybe_append_string_operators(last_line) + + # If there are any leaves to the right of the target string... + if is_valid_index(string_idx + 1): + # We use `temp_value` here to determine how long the last line + # would be if we were to append all the leaves to the right of the + # target string to the last string line. + temp_value = rest_value + for leaf in LL[string_idx + 1 :]: + temp_value += str(leaf) + if leaf.type == token.LPAR: + break + + # Try to fit them all on the same line with the last substring... + if ( + len(temp_value) <= max_last_string() + or LL[string_idx + 1].type == token.COMMA + ): + last_line.append(rest_leaf) + append_leaves(last_line, line, LL[string_idx + 1 :]) + yield Ok(last_line) + # Otherwise, place the last substring on one line and everything + # else on a line below that... + else: + last_line.append(rest_leaf) + yield Ok(last_line) + + non_string_line = line.clone() + append_leaves(non_string_line, line, LL[string_idx + 1 :]) + yield Ok(non_string_line) + # Else the target string was the last leaf... + else: + last_line.append(rest_leaf) + last_line.comments = line.comments.copy() + yield Ok(last_line) + + def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an \\N{...} expression (which is NOT + allowed). + """ + # True - the previous backslash was unescaped + # False - the previous backslash was escaped *or* there was no backslash + previous_was_unescaped_backslash = False + it = iter(enumerate(string)) + for idx, c in it: + if c == "\\": + previous_was_unescaped_backslash = not previous_was_unescaped_backslash + continue + if not previous_was_unescaped_backslash or c != "N": + previous_was_unescaped_backslash = False + continue + previous_was_unescaped_backslash = False + + begin = idx - 1 # the position of backslash before \N{...} + for idx, c in it: + if c == "}": + end = idx + break + else: + # malformed nameescape expression? + # should have been detected by AST parsing earlier... + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + yield begin, end + + def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an f-expression (which is NOT + allowed). + """ + if "f" not in get_string_prefix(string).lower(): + return + yield from iter_fexpr_spans(string) + + def _get_illegal_split_indices(self, string: str) -> Set[Index]: + illegal_indices: Set[Index] = set() + iterators = [ + self._iter_fexpr_slices(string), + self._iter_nameescape_slices(string), + ] + for it in iterators: + for begin, end in it: + illegal_indices.update(range(begin, end + 1)) + return illegal_indices + + def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: + """ + This method contains the algorithm that StringSplitter uses to + determine which character to split each string at. + + Args: + @string: The substring that we are attempting to split. + @max_break_idx: The ideal break index. We will return this value if it + meets all the necessary conditions. In the likely event that it + doesn't we will try to find the closest index BELOW @max_break_idx + that does. If that fails, we will expand our search by also + considering all valid indices ABOVE @max_break_idx. + + Pre-Conditions: + * assert_is_leaf_string(@string) + * 0 <= @max_break_idx < len(@string) + + Returns: + break_idx, if an index is able to be found that meets all of the + conditions listed in the 'Transformations' section of this classes' + docstring. + OR + None, otherwise. + """ + is_valid_index = is_valid_index_factory(string) + + assert is_valid_index(max_break_idx) + assert_is_leaf_string(string) + + _illegal_split_indices = self._get_illegal_split_indices(string) + + def breaks_unsplittable_expression(i: Index) -> bool: + """ + Returns: + True iff returning @i would result in the splitting of an + unsplittable expression (which is NOT allowed). + """ + return i in _illegal_split_indices + + def passes_all_checks(i: Index) -> bool: + """ + Returns: + True iff ALL of the conditions listed in the 'Transformations' + section of this classes' docstring would be be met by returning @i. + """ + is_space = string[i] == " " + + is_not_escaped = True + j = i - 1 + while is_valid_index(j) and string[j] == "\\": + is_not_escaped = not is_not_escaped + j -= 1 + + is_big_enough = ( + len(string[i:]) >= self.MIN_SUBSTR_SIZE + and len(string[:i]) >= self.MIN_SUBSTR_SIZE + ) + return ( + is_space + and is_not_escaped + and is_big_enough + and not breaks_unsplittable_expression(i) + ) + + # First, we check all indices BELOW @max_break_idx. + break_idx = max_break_idx + while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): + break_idx -= 1 + + if not passes_all_checks(break_idx): + # If that fails, we check all indices ABOVE @max_break_idx. + # + # If we are able to find a valid index here, the next line is going + # to be longer than the specified line length, but it's probably + # better than doing nothing at all. + break_idx = max_break_idx + 1 + while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): + break_idx += 1 + + if not is_valid_index(break_idx) or not passes_all_checks(break_idx): + return None + + return break_idx + + def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: + if self.normalize_strings: + leaf.value = normalize_string_quotes(leaf.value) + + def _normalize_f_string(self, string: str, prefix: str) -> str: + """ + Pre-Conditions: + * assert_is_leaf_string(@string) + + Returns: + * If @string is an f-string that contains no f-expressions, we + return a string identical to @string except that the 'f' prefix + has been stripped and all double braces (i.e. '{{' or '}}') have + been normalized (i.e. turned into '{' or '}'). + OR + * Otherwise, we return @string. + """ + assert_is_leaf_string(string) + + if "f" in prefix and not fstring_contains_expr(string): + new_prefix = prefix.replace("f", "") + + temp = string[len(prefix) :] + temp = re.sub(r"\{\{", "{", temp) + temp = re.sub(r"\}\}", "}", temp) + new_string = temp + + return f"{new_prefix}{new_string}" + else: + return string + + def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]: + LL = list(leaves) + + string_op_leaves = [] + i = 0 + while LL[i].type in self.STRING_OPERATORS + [token.NAME]: + prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) + string_op_leaves.append(prefix_leaf) + i += 1 + return string_op_leaves + + +class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that wraps strings in parens and then splits at the LPAR. + + Requirements: + All of the requirements listed in BaseStringSplitter's docstring in + addition to the requirements listed below: + + * The line is a return/yield statement, which returns/yields a string. + OR + * The line is part of a ternary expression (e.g. `x = y if cond else + z`) such that the line starts with `else `, where is + some string. + OR + * The line is an assert statement, which ends with a string. + OR + * The line is an assignment statement (e.g. `x = ` or `x += + `) such that the variable is being assigned the value of some + string. + OR + * The line is a dictionary key assignment where some valid key is being + assigned the value of some string. + OR + * The line starts with an "atom" string that prefers to be wrapped in + parens. It's preferred to be wrapped when it's is an immediate child of + a list/set/tuple literal, AND the string is surrounded by commas (or is + the first/last child). + + Transformations: + The chosen string is wrapped in parentheses and then split at the LPAR. + + We then have one line which ends with an LPAR and another line that + starts with the chosen string. The latter line is then split again at + the RPAR. This results in the RPAR (and possibly a trailing comma) + being placed on its own line. + + NOTE: If any leaves exist to the right of the chosen string (except + for a trailing comma, which would be placed after the RPAR), those + leaves are placed inside the parentheses. In effect, the chosen + string is not necessarily being "wrapped" by parentheses. We can, + however, count on the LPAR being placed directly before the chosen + string. + + In other words, StringParenWrapper creates "atom" strings. These + can then be split again by StringSplitter, if necessary. + + Collaborations: + In the event that a string line split by StringParenWrapper is + changed such that it no longer needs to be given its own line, + StringParenWrapper relies on StringParenStripper to clean up the + parentheses it created. + + For "atom" strings that prefers to be wrapped in parens, it requires + StringSplitter to hold the split until the string is wrapped in parens. + """ + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if line.leaves[-1].type in OPENING_BRACKETS: + return TErr( + "Cannot wrap parens around a line that ends in an opening bracket." + ) + + string_idx = ( + self._return_match(LL) + or self._else_match(LL) + or self._assert_match(LL) + or self._assign_match(LL) + or self._dict_match(LL) + or self._prefer_paren_wrap_match(LL) + ) + + if string_idx is not None: + string_value = line.leaves[string_idx].value + # If the string has no spaces... + if " " not in string_value: + # And will still violate the line length limit when split... + max_string_length = self.line_length - ((line.depth + 1) * 4) + if len(string_value) > max_string_length: + # And has no associated custom splits... + if not self.has_custom_splits(string_value): + # Then we should NOT put this string on its own line. + return TErr( + "We do not wrap long strings in parentheses when the" + " resultant line would still be over the specified line" + " length and can't be split further by StringSplitter." + ) + return Ok(string_idx) + + return TErr("This line does not contain any non-atomic strings.") + + @staticmethod + def _return_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the return/yield statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a return/yield statement and the first leaf + # contains either the "return" or "yield" keywords... + if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ + 0 + ].value in ["return", "yield"]: + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _else_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the ternary expression + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a ternary expression and the first leaf + # contains the "else" keyword... + if ( + parent_type(LL[0]) == syms.test + and LL[0].type == token.NAME + and LL[0].value == "else" + ): + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _assert_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assert statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an assert statement and the first leaf + # contains the "assert" keyword... + if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a comma... + if leaf.type == token.COMMA: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That comma MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _assign_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assignment statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an expression statement or is a function + # argument AND the first leaf contains a variable name... + if ( + parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] + and LL[0].type == token.NAME + ): + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find either an '=' or '+=' symbol... + if leaf.type in [token.EQUAL, token.PLUSEQUAL]: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That symbol MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # The next leaf MAY be a comma iff this line is apart + # of a function argument... + if ( + parent_type(LL[0]) == syms.argument + and is_valid_index(idx) + and LL[idx].type == token.COMMA + ): + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _dict_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the dictionary key assignment + statement requirements listed in the 'Requirements' section of this + classes' docstring. + OR + None, otherwise. + """ + # If this line is apart of a dictionary key assignment... + if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]: + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a colon... + if leaf.type == token.COLON: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That colon MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + comma_idx = -1 + ends_with_comma = False + if LL[comma_idx].type == token.COMMA: + ends_with_comma = True + + leaves_to_steal_comments_from = [LL[string_idx]] + if ends_with_comma: + leaves_to_steal_comments_from.append(LL[comma_idx]) + + # --- First Line + first_line = line.clone() + left_leaves = LL[:string_idx] + + # We have to remember to account for (possibly invisible) LPAR and RPAR + # leaves that already wrapped the target string. If these leaves do + # exist, we will replace them with our own LPAR and RPAR leaves. + old_parens_exist = False + if left_leaves and left_leaves[-1].type == token.LPAR: + old_parens_exist = True + leaves_to_steal_comments_from.append(left_leaves[-1]) + left_leaves.pop() + + append_leaves(first_line, line, left_leaves) + + lpar_leaf = Leaf(token.LPAR, "(") + if old_parens_exist: + replace_child(LL[string_idx - 1], lpar_leaf) + else: + insert_str_child(lpar_leaf) + first_line.append(lpar_leaf) + + # We throw inline comments that were originally to the right of the + # target string to the top line. They will now be shown to the right of + # the LPAR. + for leaf in leaves_to_steal_comments_from: + for comment_leaf in line.comments_after(leaf): + first_line.append(comment_leaf, preformatted=True) + + yield Ok(first_line) + + # --- Middle (String) Line + # We only need to yield one (possibly too long) string line, since the + # `StringSplitter` will break it down further if necessary. + string_value = LL[string_idx].value + string_line = Line( + mode=line.mode, + depth=line.depth + 1, + inside_brackets=True, + should_split_rhs=line.should_split_rhs, + magic_trailing_comma=line.magic_trailing_comma, + ) + string_leaf = Leaf(token.STRING, string_value) + insert_str_child(string_leaf) + string_line.append(string_leaf) + + old_rpar_leaf = None + if is_valid_index(string_idx + 1): + right_leaves = LL[string_idx + 1 :] + if ends_with_comma: + right_leaves.pop() + + if old_parens_exist: + assert right_leaves and right_leaves[-1].type == token.RPAR, ( + "Apparently, old parentheses do NOT exist?!" + f" (left_leaves={left_leaves}, right_leaves={right_leaves})" + ) + old_rpar_leaf = right_leaves.pop() + + append_leaves(string_line, line, right_leaves) + + yield Ok(string_line) + + # --- Last Line + last_line = line.clone() + last_line.bracket_tracker = first_line.bracket_tracker + + new_rpar_leaf = Leaf(token.RPAR, ")") + if old_rpar_leaf is not None: + replace_child(old_rpar_leaf, new_rpar_leaf) + else: + insert_str_child(new_rpar_leaf) + last_line.append(new_rpar_leaf) + + # If the target string ended with a comma, we place this comma to the + # right of the RPAR on the last line. + if ends_with_comma: + comma_leaf = Leaf(token.COMMA, ",") + replace_child(LL[comma_idx], comma_leaf) + last_line.append(comma_leaf) + + yield Ok(last_line) + + +class StringParser: + """ + A state machine that aids in parsing a string's "trailer", which can be + either non-existent, an old-style formatting sequence (e.g. `% varX` or `% + (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, + varY)`). + + NOTE: A new StringParser object MUST be instantiated for each string + trailer we need to parse. + + Examples: + We shall assume that `line` equals the `Line` object that corresponds + to the following line of python code: + ``` + x = "Some {}.".format("String") + some_other_string + ``` + + Furthermore, we will assume that `string_idx` is some index such that: + ``` + assert line.leaves[string_idx].value == "Some {}." + ``` + + The following code snippet then holds: + ``` + string_parser = StringParser() + idx = string_parser.parse(line.leaves, string_idx) + assert line.leaves[idx].type == token.PLUS + ``` + """ + + DEFAULT_TOKEN: Final = 20210605 + + # String Parser States + START: Final = 1 + DOT: Final = 2 + NAME: Final = 3 + PERCENT: Final = 4 + SINGLE_FMT_ARG: Final = 5 + LPAR: Final = 6 + RPAR: Final = 7 + DONE: Final = 8 + + # Lookup Table for Next State + _goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = { + # A string trailer may start with '.' OR '%'. + (START, token.DOT): DOT, + (START, token.PERCENT): PERCENT, + (START, DEFAULT_TOKEN): DONE, + # A '.' MUST be followed by an attribute or method name. + (DOT, token.NAME): NAME, + # A method name MUST be followed by an '(', whereas an attribute name + # is the last symbol in the string trailer. + (NAME, token.LPAR): LPAR, + (NAME, DEFAULT_TOKEN): DONE, + # A '%' symbol can be followed by an '(' or a single argument (e.g. a + # string or variable name). + (PERCENT, token.LPAR): LPAR, + (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, + # If a '%' symbol is followed by a single argument, that argument is + # the last leaf in the string trailer. + (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, + # If present, a ')' symbol is the last symbol in a string trailer. + # (NOTE: LPARS and nested RPARS are not included in this lookup table, + # since they are treated as a special case by the parsing logic in this + # classes' implementation.) + (RPAR, DEFAULT_TOKEN): DONE, + } + + def __init__(self) -> None: + self._state = self.START + self._unmatched_lpars = 0 + + def parse(self, leaves: List[Leaf], string_idx: int) -> int: + """ + Pre-conditions: + * @leaves[@string_idx].type == token.STRING + + Returns: + The index directly after the last leaf which is apart of the string + trailer, if a "trailer" exists. + OR + @string_idx + 1, if no string "trailer" exists. + """ + assert leaves[string_idx].type == token.STRING + + idx = string_idx + 1 + while idx < len(leaves) and self._next_state(leaves[idx]): + idx += 1 + return idx + + def _next_state(self, leaf: Leaf) -> bool: + """ + Pre-conditions: + * On the first call to this function, @leaf MUST be the leaf that + was directly after the string leaf in question (e.g. if our target + string is `line.leaves[i]` then the first call to this method must + be `line.leaves[i + 1]`). + * On the next call to this function, the leaf parameter passed in + MUST be the leaf directly following @leaf. + + Returns: + True iff @leaf is apart of the string's trailer. + """ + # We ignore empty LPAR or RPAR leaves. + if is_empty_par(leaf): + return True + + next_token = leaf.type + if next_token == token.LPAR: + self._unmatched_lpars += 1 + + current_state = self._state + + # The LPAR parser state is a special case. We will return True until we + # find the matching RPAR token. + if current_state == self.LPAR: + if next_token == token.RPAR: + self._unmatched_lpars -= 1 + if self._unmatched_lpars == 0: + self._state = self.RPAR + # Otherwise, we use a lookup table to determine the next state. + else: + # If the lookup table matches the current state to the next + # token, we use the lookup table. + if (current_state, next_token) in self._goto: + self._state = self._goto[current_state, next_token] + else: + # Otherwise, we check if a the current state was assigned a + # default. + if (current_state, self.DEFAULT_TOKEN) in self._goto: + self._state = self._goto[current_state, self.DEFAULT_TOKEN] + # If no default has been assigned, then this parser has a logic + # error. + else: + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + + if self._state == self.DONE: + return False + + return True + + +def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: + """ + Factory for a convenience function that is used to orphan @string_leaf + and then insert multiple new leaves into the same part of the node + structure that @string_leaf had originally occupied. + + Examples: + Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = + string_leaf.parent`. Assume the node `N` has the following + original structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(STRING, '"foo"'), + ] + ) + + We then run the code snippet shown below. + ``` + insert_str_child = insert_str_child_factory(string_leaf) + + lpar = Leaf(token.LPAR, '(') + insert_str_child(lpar) + + bar = Leaf(token.STRING, '"bar"') + insert_str_child(bar) + + rpar = Leaf(token.RPAR, ')') + insert_str_child(rpar) + ``` + + After which point, it follows that `string_leaf.parent is None` and + the node `N` now has the following structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(LPAR, '('), + Leaf(STRING, '"bar"'), + Leaf(RPAR, ')'), + ] + ) + """ + string_parent = string_leaf.parent + string_child_idx = string_leaf.remove() + + def insert_str_child(child: LN) -> None: + nonlocal string_child_idx + + assert string_parent is not None + assert string_child_idx is not None + + string_parent.insert_child(string_child_idx, child) + string_child_idx += 1 + + return insert_str_child + + +def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: + """ + Examples: + ``` + my_list = [1, 2, 3] + + is_valid_index = is_valid_index_factory(my_list) + + assert is_valid_index(0) + assert is_valid_index(2) + + assert not is_valid_index(3) + assert not is_valid_index(-1) + ``` + """ + + def is_valid_index(idx: int) -> bool: + """ + Returns: + True iff @idx is positive AND seq[@idx] does NOT raise an + IndexError. + """ + return 0 <= idx < len(seq) + + return is_valid_index diff --git a/src/black_primer/cli.py b/src/black_primer/cli.py deleted file mode 100644 index 5903adc72d4..00000000000 --- a/src/black_primer/cli.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 - -# coding=utf8 - -import asyncio -import logging -import sys -from datetime import datetime -from pathlib import Path -from shutil import rmtree, which -from tempfile import gettempdir -from typing import Any, Union - -import click - -from black_primer import lib - - -DEFAULT_CONFIG = Path(__file__).parent / "primer.json" -_timestamp = datetime.now().strftime("%Y%m%d%H%M%S") -DEFAULT_WORKDIR = Path(gettempdir()) / f"primer.{_timestamp}" -LOG = logging.getLogger(__name__) - - -def _handle_debug( - ctx: click.core.Context, - param: Union[click.core.Option, click.core.Parameter], - debug: Union[bool, int, str], -) -> Union[bool, int, str]: - """Turn on debugging if asked otherwise INFO default""" - log_level = logging.DEBUG if debug else logging.INFO - logging.basicConfig( - format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)", - level=log_level, - ) - return debug - - -async def async_main( - config: str, - debug: bool, - keep: bool, - long_checkouts: bool, - rebase: bool, - workdir: str, - workers: int, -) -> int: - work_path = Path(workdir) - if not work_path.exists(): - LOG.debug(f"Creating {work_path}") - work_path.mkdir() - - if not which("black"): - LOG.error("Can not find 'black' executable in PATH. No point in running") - return -1 - - try: - ret_val = await lib.process_queue( - config, work_path, workers, keep, long_checkouts, rebase - ) - return int(ret_val) - finally: - if not keep and work_path.exists(): - LOG.debug(f"Removing {work_path}") - rmtree(work_path, onerror=lib.handle_PermissionError) - - return -2 - - -@click.command(context_settings={"help_option_names": ["-h", "--help"]}) -@click.option( - "-c", - "--config", - default=str(DEFAULT_CONFIG), - type=click.Path(exists=True), - show_default=True, - help="JSON config file path", -) -@click.option( - "--debug", - is_flag=True, - callback=_handle_debug, - show_default=True, - help="Turn on debug logging", -) -@click.option( - "-k", - "--keep", - is_flag=True, - show_default=True, - help="Keep workdir + repos post run", -) -@click.option( - "-L", - "--long-checkouts", - is_flag=True, - show_default=True, - help="Pull big projects to test", -) -@click.option( - "-R", - "--rebase", - is_flag=True, - show_default=True, - help="Rebase project if already checked out", -) -@click.option( - "-w", - "--workdir", - default=str(DEFAULT_WORKDIR), - type=click.Path(exists=False), - show_default=True, - help="Directory path for repo checkouts", -) -@click.option( - "-W", - "--workers", - default=2, - type=int, - show_default=True, - help="Number of parallel worker coroutines", -) -@click.pass_context -def main(ctx: click.core.Context, **kwargs: Any) -> None: - """primer - prime projects for blackening... 🏴""" - LOG.debug(f"Starting {sys.argv[0]}") - # TODO: Change to asyncio.run when Black >= 3.7 only - loop = asyncio.get_event_loop() - try: - ctx.exit(loop.run_until_complete(async_main(**kwargs))) - finally: - loop.close() - - -if __name__ == "__main__": # pragma: nocover - main() diff --git a/src/black_primer/lib.py b/src/black_primer/lib.py deleted file mode 100644 index afeb0721cc4..00000000000 --- a/src/black_primer/lib.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/env python3 - -import asyncio -import errno -import json -import logging -import os -import stat -import sys -from functools import partial -from pathlib import Path -from platform import system -from shutil import rmtree, which -from subprocess import CalledProcessError -from sys import version_info -from typing import Any, Callable, Dict, NamedTuple, Optional, Sequence, Tuple -from urllib.parse import urlparse - -import click - - -WINDOWS = system() == "Windows" -BLACK_BINARY = "black.exe" if WINDOWS else "black" -GIT_BIANRY = "git.exe" if WINDOWS else "git" -LOG = logging.getLogger(__name__) - - -# Windows needs a ProactorEventLoop if you want to exec subprocesses -# Starting with 3.8 this is the default - can remove when Black >= 3.8 -# mypy only respects sys.platform if directly in the evaluation -# https://mypy.readthedocs.io/en/latest/common_issues.html#python-version-and-system-platform-checks # noqa: B950 -if sys.platform == "win32": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - - -class Results(NamedTuple): - stats: Dict[str, int] = {} - failed_projects: Dict[str, CalledProcessError] = {} - - -async def _gen_check_output( - cmd: Sequence[str], - timeout: float = 300, - env: Optional[Dict[str, str]] = None, - cwd: Optional[Path] = None, -) -> Tuple[bytes, bytes]: - process = await asyncio.create_subprocess_exec( - *cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - env=env, - cwd=cwd, - ) - try: - (stdout, stderr) = await asyncio.wait_for(process.communicate(), timeout) - except asyncio.TimeoutError: - process.kill() - await process.wait() - raise - - if process.returncode != 0: - cmd_str = " ".join(cmd) - raise CalledProcessError( - process.returncode, cmd_str, output=stdout, stderr=stderr - ) - - return (stdout, stderr) - - -def analyze_results(project_count: int, results: Results) -> int: - failed_pct = round(((results.stats["failed"] / project_count) * 100), 2) - success_pct = round(((results.stats["success"] / project_count) * 100), 2) - - click.secho("-- primer results 📊 --\n", bold=True) - click.secho( - f"{results.stats['success']} / {project_count} succeeded ({success_pct}%) ✅", - bold=True, - fg="green", - ) - click.secho( - f"{results.stats['failed']} / {project_count} FAILED ({failed_pct}%) 💩", - bold=bool(results.stats["failed"]), - fg="red", - ) - s = "" if results.stats["disabled"] == 1 else "s" - click.echo(f" - {results.stats['disabled']} project{s} disabled by config") - s = "" if results.stats["wrong_py_ver"] == 1 else "s" - click.echo( - f" - {results.stats['wrong_py_ver']} project{s} skipped due to Python version" - ) - click.echo( - f" - {results.stats['skipped_long_checkout']} skipped due to long checkout" - ) - - if results.failed_projects: - click.secho("\nFailed projects:\n", bold=True) - - for project_name, project_cpe in results.failed_projects.items(): - print(f"## {project_name}:") - print(f" - Returned {project_cpe.returncode}") - if project_cpe.stderr: - print(f" - stderr:\n{project_cpe.stderr.decode('utf8')}") - if project_cpe.stdout: - print(f" - stdout:\n{project_cpe.stdout.decode('utf8')}") - print("") - - return results.stats["failed"] - - -async def black_run( - repo_path: Path, project_config: Dict[str, Any], results: Results -) -> None: - """Run Black and record failures""" - cmd = [str(which(BLACK_BINARY))] - if "cli_arguments" in project_config and project_config["cli_arguments"]: - cmd.extend(*project_config["cli_arguments"]) - cmd.extend(["--check", "--diff", "."]) - - try: - _stdout, _stderr = await _gen_check_output(cmd, cwd=repo_path) - except asyncio.TimeoutError: - results.stats["failed"] += 1 - LOG.error(f"Running black for {repo_path} timed out ({cmd})") - except CalledProcessError as cpe: - # TODO: Tune for smarter for higher signal - # If any other return value than 1 we raise - can disable project in config - if cpe.returncode == 1: - if not project_config["expect_formatting_changes"]: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = cpe - else: - results.stats["success"] += 1 - return - elif cpe.returncode > 1: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = cpe - return - - LOG.error(f"Unknown error with {repo_path}") - raise - - # If we get here and expect formatting changes something is up - if project_config["expect_formatting_changes"]: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = CalledProcessError( - 0, cmd, b"Expected formatting changes but didn't get any!", b"" - ) - return - - results.stats["success"] += 1 - - -async def git_checkout_or_rebase( - work_path: Path, - project_config: Dict[str, Any], - rebase: bool = False, - *, - depth: int = 1, -) -> Optional[Path]: - """git Clone project or rebase""" - git_bin = str(which(GIT_BIANRY)) - if not git_bin: - LOG.error("No git binary found") - return None - - repo_url_parts = urlparse(project_config["git_clone_url"]) - path_parts = repo_url_parts.path[1:].split("/", maxsplit=1) - - repo_path: Path = work_path / path_parts[1].replace(".git", "") - cmd = [git_bin, "clone", "--depth", str(depth), project_config["git_clone_url"]] - cwd = work_path - if repo_path.exists() and rebase: - cmd = [git_bin, "pull", "--rebase"] - cwd = repo_path - elif repo_path.exists(): - return repo_path - - try: - _stdout, _stderr = await _gen_check_output(cmd, cwd=cwd) - except (asyncio.TimeoutError, CalledProcessError) as e: - LOG.error(f"Unable to git clone / pull {project_config['git_clone_url']}: {e}") - return None - - return repo_path - - -def handle_PermissionError( - func: Callable, path: Path, exc: Tuple[Any, Any, Any] -) -> None: - """ - Handle PermissionError during shutil.rmtree. - - This checks if the erroring function is either 'os.rmdir' or 'os.unlink', and that - the error was EACCES (i.e. Permission denied). If true, the path is set writable, - readable, and executable by everyone. Finally, it tries the error causing delete - operation again. - - If the check is false, then the original error will be reraised as this function - can't handle it. - """ - excvalue = exc[1] - LOG.debug(f"Handling {excvalue} from {func.__name__}... ") - if func in (os.rmdir, os.unlink) and excvalue.errno == errno.EACCES: - LOG.debug(f"Setting {path} writable, readable, and executable by everyone... ") - os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # chmod 0777 - func(path) # Try the error causing delete operation again - else: - raise - - -async def load_projects_queue( - config_path: Path, -) -> Tuple[Dict[str, Any], asyncio.Queue]: - """Load project config and fill queue with all the project names""" - with config_path.open("r") as cfp: - config = json.load(cfp) - - # TODO: Offer more options here - # e.g. Run on X random packages or specific sub list etc. - project_names = sorted(config["projects"].keys()) - queue: asyncio.Queue = asyncio.Queue(maxsize=len(project_names)) - for project in project_names: - await queue.put(project) - - return config, queue - - -async def project_runner( - idx: int, - config: Dict[str, Any], - queue: asyncio.Queue, - work_path: Path, - results: Results, - long_checkouts: bool = False, - rebase: bool = False, - keep: bool = False, -) -> None: - """Check out project and run Black on it + record result""" - loop = asyncio.get_event_loop() - py_version = f"{version_info[0]}.{version_info[1]}" - while True: - try: - project_name = queue.get_nowait() - except asyncio.QueueEmpty: - LOG.debug(f"project_runner {idx} exiting") - return - LOG.debug(f"worker {idx} working on {project_name}") - - project_config = config["projects"][project_name] - - # Check if disabled by config - if "disabled" in project_config and project_config["disabled"]: - results.stats["disabled"] += 1 - LOG.info(f"Skipping {project_name} as it's disabled via config") - continue - - # Check if we should run on this version of Python - if ( - "all" not in project_config["py_versions"] - and py_version not in project_config["py_versions"] - ): - results.stats["wrong_py_ver"] += 1 - LOG.debug(f"Skipping {project_name} as it's not enabled for {py_version}") - continue - - # Check if we're doing big projects / long checkouts - if not long_checkouts and project_config["long_checkout"]: - results.stats["skipped_long_checkout"] += 1 - LOG.debug(f"Skipping {project_name} as it's configured as a long checkout") - continue - - repo_path = await git_checkout_or_rebase(work_path, project_config, rebase) - if not repo_path: - continue - await black_run(repo_path, project_config, results) - - if not keep: - LOG.debug(f"Removing {repo_path}") - rmtree_partial = partial( - rmtree, path=repo_path, onerror=handle_PermissionError - ) - await loop.run_in_executor(None, rmtree_partial) - - LOG.info(f"Finished {project_name}") - - -async def process_queue( - config_file: str, - work_path: Path, - workers: int, - keep: bool = False, - long_checkouts: bool = False, - rebase: bool = False, -) -> int: - """ - Process the queue with X workers and evaluate results - - Success is guaged via the config "expect_formatting_changes" - - Integer return equals the number of failed projects - """ - results = Results() - results.stats["disabled"] = 0 - results.stats["failed"] = 0 - results.stats["skipped_long_checkout"] = 0 - results.stats["success"] = 0 - results.stats["wrong_py_ver"] = 0 - - config, queue = await load_projects_queue(Path(config_file)) - project_count = queue.qsize() - s = "" if project_count == 1 else "s" - LOG.info(f"{project_count} project{s} to run Black over") - if project_count < 1: - return -1 - - s = "" if workers == 1 else "s" - LOG.debug(f"Using {workers} parallel worker{s} to run Black") - # Wait until we finish running all the projects before analyzing - await asyncio.gather( - *[ - project_runner( - i, config, queue, work_path, results, long_checkouts, rebase, keep - ) - for i in range(workers) - ] - ) - - LOG.info("Analyzing results") - return analyze_results(project_count, results) - - -if __name__ == "__main__": # pragma: nocover - raise NotImplementedError("lib is a library, funnily enough.") diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py index d79bfe75bc2..6bbc7c52086 100644 --- a/src/blackd/__init__.py +++ b/src/blackd/__init__.py @@ -1,17 +1,27 @@ import asyncio +import logging from concurrent.futures import Executor, ProcessPoolExecutor from datetime import datetime from functools import partial -import logging from multiprocessing import freeze_support from typing import Set, Tuple -from aiohttp import web -import aiohttp_cors -import black +try: + from aiohttp import web + + from .middlewares import cors +except ImportError as ie: + raise ImportError( + f"aiohttp dependency is not installed: {ie}. " + + "Please re-install black with the '[d]' extra install " + + "to obtain aiohttp_cors: `pip install black[d]`" + ) from None + import click +import black from _black_version import version as __version__ +from black.concurrency import maybe_install_uvloop # This is used internally by tests to shut down the server prematurely _stop_signal = asyncio.Event() @@ -21,6 +31,8 @@ LINE_LENGTH_HEADER = "X-Line-Length" PYTHON_VARIANT_HEADER = "X-Python-Variant" SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization" +SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma" +PREVIEW = "X-Preview" FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe" DIFF_HEADER = "X-Diff" @@ -29,6 +41,8 @@ LINE_LENGTH_HEADER, PYTHON_VARIANT_HEADER, SKIP_STRING_NORMALIZATION_HEADER, + SKIP_MAGIC_TRAILING_COMMA, + PREVIEW, FAST_OR_SAFE_HEADER, DIFF_HEADER, ] @@ -56,20 +70,11 @@ def main(bind_host: str, bind_port: int) -> None: def make_app() -> web.Application: - app = web.Application() - executor = ProcessPoolExecutor() - - cors = aiohttp_cors.setup(app) - resource = cors.add(app.router.add_resource("/")) - cors.add( - resource.add_route("POST", partial(handle, executor=executor)), - { - "*": aiohttp_cors.ResourceOptions( - allow_headers=(*BLACK_HEADERS, "Content-Type"), expose_headers="*" - ) - }, + app = web.Application( + middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))] ) - + executor = ProcessPoolExecutor() + app.add_routes([web.post("/", partial(handle, executor=executor))]) return app @@ -103,6 +108,10 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: skip_string_normalization = bool( request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False) ) + skip_magic_trailing_comma = bool( + request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False) + ) + preview = bool(request.headers.get(PREVIEW, False)) fast = False if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast": fast = True @@ -111,6 +120,8 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: is_pyi=pyi, line_length=line_length, string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, + preview=preview, ) req_bytes = await request.content.read() charset = request.charset if request.charset is not None else "utf8" @@ -122,6 +133,13 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode) ) + # Preserve CRLF line endings + if req_str[req_str.find("\n") - 1] == "\r": + formatted_str = formatted_str.replace("\n", "\r\n") + # If, after swapping line endings, nothing changed, then say so + if formatted_str == req_str: + raise black.NothingChanged + # Only output the diff in the HTTP response only_diff = bool(request.headers.get(DIFF_HEADER, False)) if only_diff: @@ -168,10 +186,8 @@ def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersi raise InvalidVariantHeader("major version must be 2 or 3") if len(rest) > 0: minor = int(rest[0]) - if major == 2 and minor != 7: - raise InvalidVariantHeader( - "minor version must be 7 for Python 2" - ) + if major == 2: + raise InvalidVariantHeader("Python 2 is not supported") else: # Default to lowest supported minor version. minor = 7 if major == 2 else 3 @@ -180,11 +196,12 @@ def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersi raise InvalidVariantHeader(f"3.{minor} is not supported") versions.add(black.TargetVersion[version_str]) except (KeyError, ValueError): - raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") + raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None return False, versions def patched_main() -> None: + maybe_install_uvloop() freeze_support() black.patch_click() main() diff --git a/src/blackd/__main__.py b/src/blackd/__main__.py new file mode 100644 index 00000000000..b5a4b137446 --- /dev/null +++ b/src/blackd/__main__.py @@ -0,0 +1,3 @@ +import blackd + +blackd.patched_main() diff --git a/src/blackd/middlewares.py b/src/blackd/middlewares.py new file mode 100644 index 00000000000..370e0ae222e --- /dev/null +++ b/src/blackd/middlewares.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar + +from aiohttp.web_request import Request +from aiohttp.web_response import StreamResponse + +if TYPE_CHECKING: + F = TypeVar("F", bound=Callable[..., Any]) + middleware: Callable[[F], F] +else: + try: + from aiohttp.web_middlewares import middleware + except ImportError: + # @middleware is deprecated and its behaviour is the default since aiohttp 4.0 + # so if it doesn't exist anymore, define a no-op for forward compatibility. + middleware = lambda x: x # noqa: E731 + +Handler = Callable[[Request], Awaitable[StreamResponse]] +Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] + + +def cors(allow_headers: Iterable[str]) -> Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + is_options = request.method == "OPTIONS" + is_preflight = is_options and "Access-Control-Request-Method" in request.headers + if is_preflight: + resp = StreamResponse() + else: + resp = await handler(request) + + origin = request.headers.get("Origin") + if not origin: + return resp + + resp.headers["Access-Control-Allow-Origin"] = "*" + resp.headers["Access-Control-Expose-Headers"] = "*" + if is_options: + resp.headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers) + resp.headers["Access-Control-Allow-Methods"] = ", ".join( + ("OPTIONS", "POST") + ) + + return resp + + return impl diff --git a/src/blib2to3/Grammar.txt b/src/blib2to3/Grammar.txt index f14e2b516bd..ac7ad7643ff 100644 --- a/src/blib2to3/Grammar.txt +++ b/src/blib2to3/Grammar.txt @@ -12,7 +12,7 @@ file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE eval_input: testlist NEWLINE* ENDMARKER -decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorator: '@' namedexpr_test NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef | async_funcdef) async_funcdef: ASYNC funcdef @@ -24,7 +24,7 @@ parameters: '(' [typedargslist] ')' # arguments = argument (',' argument)* # argument = tfpdef ['=' test] # kwargs = '**' tname [','] -# args = '*' [tname] +# args = '*' [tname_star] # kwonly_kwargs = (',' argument)* [',' [kwargs]] # args_kwonly_kwargs = args kwonly_kwargs | kwargs # poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] @@ -34,14 +34,15 @@ parameters: '(' [typedargslist] ')' # It needs to be fully expanded to allow our LL(1) parser to work on it. typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ - ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + ',' [((tfpdef ['=' test] ',')* ('*' [tname_star] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [',']) | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] - ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + ] | ((tfpdef ['=' test] ',')* ('*' [tname_star] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [',']) | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) tname: NAME [':' test] +tname_star: NAME [':' (test|star_expr)] tfpdef: tname | '(' tfplist ')' tfplist: tfpdef (',' tfpdef)* [','] @@ -77,7 +78,7 @@ small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) -annassign: ':' test ['=' test] +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') @@ -105,21 +106,20 @@ global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt | match_stmt async_stmt: ASYNC (funcdef | with_stmt | for_stmt) if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] -for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist_star_expr ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)) -with_stmt: 'with' with_item (',' with_item)* ':' suite -with_item: test ['as' expr] -with_var: 'as' expr +with_stmt: 'with' asexpr_test (',' asexpr_test)* ':' suite + # NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [(',' | 'as') test]] +except_clause: 'except' ['*'] [test [(',' | 'as') test]] suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: @@ -131,7 +131,15 @@ testlist_safe: old_test [(',' old_test)+ [',']] old_test: or_test | old_lambdef old_lambdef: 'lambda' [varargslist] ':' old_test -namedexpr_test: test [':=' test] +namedexpr_test: asexpr_test [':=' asexpr_test] + +# This is actually not a real rule, though since the parser is very +# limited in terms of the strategy about match/case rules, we are inserting +# a virtual case ( as ) as a valid expression. Unless a better +# approach is thought, the only side effect of this seem to be just allowing +# more stuff to be parser (which would fail on the ast). +asexpr_test: test ['as' test] + test: or_test ['if' or_test 'else' test] | lambdef or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* @@ -156,15 +164,15 @@ listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME -subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] +subscriptlist: (subscript|star_expr) (',' (subscript|star_expr))* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] -dictsetmaker: ( ((test ':' test | '**' expr) - (comp_for | (',' (test ':' test | '**' expr))* [','])) | - ((test | star_expr) - (comp_for | (',' (test | star_expr))* [','])) ) +dictsetmaker: ( ((test ':' asexpr_test | '**' expr) + (comp_for | (',' (test ':' asexpr_test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite @@ -179,7 +187,8 @@ arglist: argument (',' argument)* [','] # that precede iterable unpackings are blocked; etc. argument: ( test [comp_for] | test ':=' test | - test '=' test | + test 'as' test | + test '=' asexpr_test | '**' test | '*' test ) @@ -213,3 +222,31 @@ encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist_star_expr + + +# 3.10 match statement definition + +# PS: normally the grammar is much much more restricted, but +# at this moment for not trying to bother much with encoding the +# exact same DSL in a LL(1) parser, we will just accept an expression +# and let the ast.parse() step of the safe mode to reject invalid +# grammar. + +# The reason why it is more restricted is that, patterns are some +# sort of a DSL (more advanced than our LHS on assignments, but +# still in a very limited python subset). They are not really +# expressions, but who cares. If we can parse them, that is enough +# to reformat them. + +match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT + +# This is more permissive than the actual version. For example it +# accepts `match *something:`, even though single-item starred expressions +# are forbidden. +subject_expr: (namedexpr_test|star_expr) (',' (namedexpr_test|star_expr))* [','] + +# cases +case_block: "case" patterns [guard] ':' suite +guard: 'if' namedexpr_test +patterns: pattern (',' pattern)* [','] +pattern: (expr|star_expr) ['as' expr] diff --git a/src/blib2to3/README b/src/blib2to3/README index a43f15cb37d..0d3c607c9c7 100644 --- a/src/blib2to3/README +++ b/src/blib2to3/README @@ -13,4 +13,11 @@ Reasons for forking: - ability to Cythonize Change Log: -- Changes default logger used by Driver \ No newline at end of file +- Changes default logger used by Driver +- Backported the following upstream parser changes: + - "bpo-42381: Allow walrus in set literals and set comprehensions (GH-23332)" + https://github.com/python/cpython/commit/cae60187cf7a7b26281d012e1952fafe4e2e97e9 + - "bpo-42316: Allow unparenthesized walrus operator in indexes (GH-23317)" + https://github.com/python/cpython/commit/b0aba1fcdc3da952698d99aec2334faa79a8b68c +- Tweaks to help mypyc compile faster code (including inlining type information, + "Final-ing", etc.) diff --git a/src/blib2to3/pgen2/conv.py b/src/blib2to3/pgen2/conv.py index 78165217a1b..fa9825e54d6 100644 --- a/src/blib2to3/pgen2/conv.py +++ b/src/blib2to3/pgen2/conv.py @@ -29,7 +29,7 @@ """ # Python imports -import regex as re +import re # Local imports from pgen2 import grammar, token diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index 81940f78f0f..daf271dfa9a 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -16,7 +16,6 @@ __all__ = ["Driver", "load_grammar"] # Python imports -import codecs import io import os import logging @@ -24,50 +23,121 @@ import sys from typing import ( Any, - Callable, + cast, IO, Iterable, List, Optional, Text, + Iterator, Tuple, + TypeVar, + Generic, Union, - Sequence, ) +from contextlib import contextmanager +from dataclasses import dataclass, field # Pgen imports from . import grammar, parse, token, tokenize, pgen from logging import Logger -from blib2to3.pytree import _Convert, NL +from blib2to3.pytree import NL from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.tokenize import GoodTokenInfo Path = Union[str, "os.PathLike[str]"] +@dataclass +class ReleaseRange: + start: int + end: Optional[int] = None + tokens: List[Any] = field(default_factory=list) + + def lock(self) -> None: + total_eaten = len(self.tokens) + self.end = self.start + total_eaten + + +class TokenProxy: + def __init__(self, generator: Any) -> None: + self._tokens = generator + self._counter = 0 + self._release_ranges: List[ReleaseRange] = [] + + @contextmanager + def release(self) -> Iterator["TokenProxy"]: + release_range = ReleaseRange(self._counter) + self._release_ranges.append(release_range) + try: + yield self + finally: + # Lock the last release range to the final position that + # has been eaten. + release_range.lock() + + def eat(self, point: int) -> Any: + eaten_tokens = self._release_ranges[-1].tokens + if point < len(eaten_tokens): + return eaten_tokens[point] + else: + while point >= len(eaten_tokens): + token = next(self._tokens) + eaten_tokens.append(token) + return token + + def __iter__(self) -> "TokenProxy": + return self + + def __next__(self) -> Any: + # If the current position is already compromised (looked up) + # return the eaten token, if not just go further on the given + # token producer. + for release_range in self._release_ranges: + assert release_range.end is not None + + start, end = release_range.start, release_range.end + if start <= self._counter < end: + token = release_range.tokens[self._counter - start] + break + else: + token = next(self._tokens) + self._counter += 1 + return token + + def can_advance(self, to: int) -> bool: + # Try to eat, fail if it can't. The eat operation is cached + # so there wont be any additional cost of eating here + try: + self.eat(to) + except StopIteration: + return False + else: + return True + + class Driver(object): - def __init__( - self, - grammar: Grammar, - convert: Optional[_Convert] = None, - logger: Optional[Logger] = None, - ) -> None: + def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None: self.grammar = grammar if logger is None: logger = logging.getLogger(__name__) self.logger = logger - self.convert = convert - def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: + def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> NL: """Parse a series of tokens and return the syntax tree.""" # XXX Move the prefix computation into a wrapper around tokenize. - p = parse.Parser(self.grammar, self.convert) - p.setup() + proxy = TokenProxy(tokens) + + p = parse.Parser(self.grammar) + p.setup(proxy=proxy) + lineno = 1 column = 0 - indent_columns = [] + indent_columns: List[int] = [] type = value = start = end = line_text = None prefix = "" - for quintuple in tokens: + + for quintuple in proxy: type, value, start, end, line_text = quintuple if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start) @@ -89,6 +159,7 @@ def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: if type == token.OP: type = grammar.opmap[value] if debug: + assert type is not None self.logger.debug( "%s %r (prefix=%r)", token.tok_name[type], value, prefix ) @@ -100,7 +171,7 @@ def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: elif type == token.DEDENT: _indent_col = indent_columns.pop() prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col) - if p.addtoken(type, value, (prefix, start)): + if p.addtoken(cast(int, type), value, (prefix, start)): if debug: self.logger.debug("Stop.") break @@ -192,14 +263,13 @@ def load_grammar( logger = logging.getLogger(__name__) gp = _generate_pickle_name(gt) if gp is None else gp if force or not _newer(gp, gt): - logger.info("Generating grammar tables from %s", gt) g: grammar.Grammar = pgen.generate_grammar(gt) if save: - logger.info("Writing grammar tables to %s", gp) try: g.dump(gp) - except OSError as e: - logger.info("Writing failed: %s", e) + except OSError: + # Ignore error, caching is not vital. + pass else: g = grammar.Grammar() g.load(gp) diff --git a/src/blib2to3/pgen2/grammar.py b/src/blib2to3/pgen2/grammar.py index 2882cdac89b..337a64f1726 100644 --- a/src/blib2to3/pgen2/grammar.py +++ b/src/blib2to3/pgen2/grammar.py @@ -89,8 +89,10 @@ def __init__(self) -> None: self.dfas: Dict[int, DFAS] = {} self.labels: List[Label] = [(0, "EMPTY")] self.keywords: Dict[str, int] = {} + self.soft_keywords: Dict[str, int] = {} self.tokens: Dict[int, int] = {} self.symbol2label: Dict[str, int] = {} + self.version: Tuple[int, int] = (0, 0) self.start = 256 # Python 3.7+ parses async as a keyword, not an identifier self.async_keywords = False @@ -136,6 +138,7 @@ def copy(self: _P) -> _P: "number2symbol", "dfas", "keywords", + "soft_keywords", "tokens", "symbol2label", ): @@ -143,6 +146,7 @@ def copy(self: _P) -> _P: new.labels = self.labels[:] new.states = self.states[:] new.start = self.start + new.version = self.version new.async_keywords = self.async_keywords return new diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index 8c374d35b42..d6deaac6964 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -9,23 +9,30 @@ how this parsing engine works. """ +import copy +from contextlib import contextmanager # Local imports -from . import token +from . import grammar, token, tokenize from typing import ( + cast, + Any, Optional, Text, - Sequence, - Any, Union, Tuple, Dict, List, + Iterator, Callable, Set, + TYPE_CHECKING, ) from blib2to3.pgen2.grammar import Grammar -from blib2to3.pytree import NL, Context, RawNode, Leaf, Node +from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node + +if TYPE_CHECKING: + from blib2to3.driver import TokenProxy Results = Dict[Text, NL] @@ -39,6 +46,87 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL: return Node(type=node[0], children=node[3], context=node[2]) +# A placeholder node, used when parser is backtracking. +DUMMY_NODE = (-1, None, None, None) + + +def stack_copy( + stack: List[Tuple[DFAS, int, RawNode]] +) -> List[Tuple[DFAS, int, RawNode]]: + """Nodeless stack copy.""" + return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack] + + +class Recorder: + def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None: + self.parser = parser + self._ilabels = ilabels + self.context = context # not really matter + + self._dead_ilabels: Set[int] = set() + self._start_point = self.parser.stack + self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels} + + @property + def ilabels(self) -> Set[int]: + return self._dead_ilabels.symmetric_difference(self._ilabels) + + @contextmanager + def switch_to(self, ilabel: int) -> Iterator[None]: + with self.backtrack(): + self.parser.stack = self._points[ilabel] + try: + yield + except ParseError: + self._dead_ilabels.add(ilabel) + finally: + self.parser.stack = self._start_point + + @contextmanager + def backtrack(self) -> Iterator[None]: + """ + Use the node-level invariant ones for basic parsing operations (push/pop/shift). + These still will operate on the stack; but they won't create any new nodes, or + modify the contents of any other existing nodes. + + This saves us a ton of time when we are backtracking, since we + want to restore to the initial state as quick as possible, which + can only be done by having as little mutatations as possible. + """ + is_backtracking = self.parser.is_backtracking + try: + self.parser.is_backtracking = True + yield + finally: + self.parser.is_backtracking = is_backtracking + + def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: + func: Callable[..., Any] + if raw: + func = self.parser._addtoken + else: + func = self.parser.addtoken + + for ilabel in self.ilabels: + with self.switch_to(ilabel): + args = [tok_type, tok_val, self.context] + if raw: + args.insert(0, ilabel) + func(*args) + + def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]: + alive_ilabels = self.ilabels + if len(alive_ilabels) == 0: + *_, most_successful_ilabel = self._dead_ilabels + raise ParseError("bad input", most_successful_ilabel, value, self.context) + + ilabel, *rest = alive_ilabels + if force or not rest: + return ilabel + else: + return None + + class ParseError(Exception): """Exception to signal the parser is stuck.""" @@ -102,6 +190,11 @@ def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None: to be converted. The syntax tree is converted from the bottom up. + **post-note: the convert argument is ignored since for Black's + usage, convert will always be blib2to3.pytree.convert. Allowing + this to be dynamic hurts mypyc's ability to use early binding. + These docs are left for historical and informational value. + A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is @@ -114,9 +207,11 @@ def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None: """ self.grammar = grammar + # See note in docstring above. TL;DR this is ignored. self.convert = convert or lam_sub + self.is_backtracking = False - def setup(self, start: Optional[int] = None) -> None: + def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None: """Prepare for parsing. This *must* be called before starting to parse. @@ -139,11 +234,57 @@ def setup(self, start: Optional[int] = None) -> None: self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry] self.rootnode: Optional[NL] = None self.used_names: Set[str] = set() + self.proxy = proxy - def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: + def addtoken(self, type: int, value: Text, context: Context) -> bool: """Add a token; return True iff this is the end of the program.""" # Map from token to label - ilabel = self.classify(type, value, context) + ilabels = self.classify(type, value, context) + assert len(ilabels) >= 1 + + # If we have only one state to advance, we'll directly + # take it as is. + if len(ilabels) == 1: + [ilabel] = ilabels + return self._addtoken(ilabel, type, value, context) + + # If there are multiple states which we can advance (only + # happen under soft-keywords), then we will try all of them + # in parallel and as soon as one state can reach further than + # the rest, we'll choose that one. This is a pretty hacky + # and hopefully temporary algorithm. + # + # For a more detailed explanation, check out this post: + # https://tree.science/what-the-backtracking.html + + with self.proxy.release() as proxy: + counter, force = 0, False + recorder = Recorder(self, ilabels, context) + recorder.add_token(type, value, raw=True) + + next_token_value = value + while recorder.determine_route(next_token_value) is None: + if not proxy.can_advance(counter): + force = True + break + + next_token_type, next_token_value, *_ = proxy.eat(counter) + if next_token_type in (tokenize.COMMENT, tokenize.NL): + counter += 1 + continue + + if next_token_type == tokenize.OP: + next_token_type = grammar.opmap[next_token_value] + + recorder.add_token(next_token_type, next_token_value) + counter += 1 + + ilabel = cast(int, recorder.determine_route(next_token_value, force=force)) + assert ilabel is not None + + return self._addtoken(ilabel, type, value, context) + + def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] @@ -151,10 +292,18 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: arcs = states[state] # Look for a state with this label for i, newstate in arcs: - t, v = self.grammar.labels[i] - if ilabel == i: + t = self.grammar.labels[i][0] + if t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self.grammar.dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, itsdfa, newstate, context) + break # To continue the outer while loop + + elif ilabel == i: # Look it up in the list of labels - assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state @@ -168,14 +317,7 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: states, first = dfa # Done with this token return False - elif t >= 256: - # See if it's a symbol and if we're in its first set - itsdfa = self.grammar.dfas[t] - itsstates, itsfirst = itsdfa - if ilabel in itsfirst: - # Push a symbol - self.push(t, self.grammar.dfas[t], newstate, context) - break # To continue the outer while loop + else: if (0, state) in arcs: # An accepting state, pop it and try something else @@ -187,47 +329,61 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: # No success finding a transition raise ParseError("bad input", type, value, context) - def classify(self, type: int, value: Optional[Text], context: Context) -> int: - """Turn a token into a label. (Internal)""" + def classify(self, type: int, value: Text, context: Context) -> List[int]: + """Turn a token into a label. (Internal) + + Depending on whether the value is a soft-keyword or not, + this function may return multiple labels to choose from.""" if type == token.NAME: # Keep a listing of all used names - assert value is not None self.used_names.add(value) # Check for reserved words - ilabel = self.grammar.keywords.get(value) - if ilabel is not None: - return ilabel + if value in self.grammar.keywords: + return [self.grammar.keywords[value]] + elif value in self.grammar.soft_keywords: + assert type in self.grammar.tokens + return [ + self.grammar.soft_keywords[value], + self.grammar.tokens[type], + ] + ilabel = self.grammar.tokens.get(type) if ilabel is None: raise ParseError("bad token", type, value, context) - return ilabel + return [ilabel] - def shift( - self, type: int, value: Optional[Text], newstate: int, context: Context - ) -> None: + def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: """Shift a token. (Internal)""" - dfa, state, node = self.stack[-1] - assert value is not None - assert context is not None - rawnode: RawNode = (type, value, context, None) - newnode = self.convert(self.grammar, rawnode) - if newnode is not None: + if self.is_backtracking: + dfa, state, _ = self.stack[-1] + self.stack[-1] = (dfa, newstate, DUMMY_NODE) + else: + dfa, state, node = self.stack[-1] + rawnode: RawNode = (type, value, context, None) + newnode = convert(self.grammar, rawnode) assert node[-1] is not None node[-1].append(newnode) - self.stack[-1] = (dfa, newstate, node) + self.stack[-1] = (dfa, newstate, node) def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None: """Push a nonterminal. (Internal)""" - dfa, state, node = self.stack[-1] - newnode: RawNode = (type, None, context, []) - self.stack[-1] = (dfa, newstate, node) - self.stack.append((newdfa, 0, newnode)) + if self.is_backtracking: + dfa, state, _ = self.stack[-1] + self.stack[-1] = (dfa, newstate, DUMMY_NODE) + self.stack.append((newdfa, 0, DUMMY_NODE)) + else: + dfa, state, node = self.stack[-1] + newnode: RawNode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) def pop(self) -> None: """Pop a nonterminal. (Internal)""" - popdfa, popstate, popnode = self.stack.pop() - newnode = self.convert(self.grammar, popnode) - if newnode is not None: + if self.is_backtracking: + self.stack.pop() + else: + popdfa, popstate, popnode = self.stack.pop() + newnode = convert(self.grammar, popnode) if self.stack: dfa, state, node = self.stack[-1] assert node[-1] is not None diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 13ec51d1878..631682a77c9 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -8,7 +8,6 @@ Any, Dict, IO, - Iterable, Iterator, List, Optional, @@ -116,12 +115,17 @@ def make_label(self, c: PgenGrammar, label: Text) -> int: assert label[0] in ('"', "'"), label value = eval(label) if value[0].isalpha(): + if label[0] == '"': + keywords = c.soft_keywords + else: + keywords = c.keywords + # A keyword - if value in c.keywords: - return c.keywords[value] + if value in keywords: + return keywords[value] else: c.labels.append((token.NAME, value)) - c.keywords[value] = ilabel + keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) @@ -168,8 +172,7 @@ def calcfirst(self, name: Text) -> None: if symbol in inverse: raise ValueError( "rule %s is ambiguous; %s is in the first sets of %s as well" - " as %s" - % (name, symbol, label, inverse[symbol]) + " as %s" % (name, symbol, label, inverse[symbol]) ) inverse[symbol] = label self.first[name] = totalset diff --git a/src/blib2to3/pgen2/token.py b/src/blib2to3/pgen2/token.py index 5870d47a61a..1e0dec9c714 100644 --- a/src/blib2to3/pgen2/token.py +++ b/src/blib2to3/pgen2/token.py @@ -1,7 +1,12 @@ """Token constants (from "token.h").""" +import sys from typing import Dict -from typing_extensions import Final + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # Taken from Python (r53757) and modified to include some tokens # originally monkeypatched in by pgen2.tokenize diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index bad79b2dc2c..257dbef4a19 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -27,6 +27,7 @@ function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" +import sys from typing import ( Callable, Iterable, @@ -39,13 +40,19 @@ Union, cast, ) + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar __author__ = "Ka-Ping Yee " __credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro" -import regex as re +import re from codecs import BOM_UTF8, lookup from blib2to3.pgen2.token import * @@ -79,7 +86,7 @@ def _combinations(*l): Comment = r"#[^\r\n]*" Ignore = Whitespace + any(r"\\\r?\n" + Whitespace) + maybe(Comment) Name = ( # this is invalid but it's fine because Name comes after Number in all groups - r"\w+" + r"[^\s#\(\)\[\]\{\}+\-*/!@$%^&=|;:'\",\.<>/?`~\\]+" ) Binnumber = r"0[bB]_?[01]+(?:_[01]+)*" @@ -139,7 +146,7 @@ def _combinations(*l): PseudoExtras = group(r"\\\r?\n", Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) -pseudoprog = re.compile(PseudoToken, re.UNICODE) +pseudoprog: Final = re.compile(PseudoToken, re.UNICODE) single3prog = re.compile(Single3) double3prog = re.compile(Double3) @@ -149,7 +156,7 @@ def _combinations(*l): | {"u", "U", "ur", "uR", "Ur", "UR"} ) -endprogs = { +endprogs: Final = { "'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, @@ -159,12 +166,12 @@ def _combinations(*l): **{prefix: None for prefix in _strprefixes}, } -triple_quoted = ( +triple_quoted: Final = ( {"'''", '"""'} | {f"{prefix}'''" for prefix in _strprefixes} | {f'{prefix}"""' for prefix in _strprefixes} ) -single_quoted = ( +single_quoted: Final = ( {"'", '"'} | {f"{prefix}'" for prefix in _strprefixes} | {f'{prefix}"' for prefix in _strprefixes} @@ -286,7 +293,7 @@ def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)", re.ASCII) -blank_re = re.compile(br"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) +blank_re = re.compile(rb"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) def _get_normal_name(orig_enc: str) -> str: @@ -418,7 +425,7 @@ def generate_tokens( logical line; continuation lines are included. """ lnum = parenlev = continued = 0 - numchars = "0123456789" + numchars: Final = "0123456789" contstr, needcont = "", 0 contline: Optional[str] = None indents = [0] @@ -427,7 +434,7 @@ def generate_tokens( # `await` as keywords. async_keywords = False if grammar is None else grammar.async_keywords # 'stashed' and 'async_*' are used for async/await parsing - stashed = None + stashed: Optional[GoodTokenInfo] = None async_def = False async_def_indent = 0 async_def_nl = False @@ -440,7 +447,7 @@ def generate_tokens( line = readline() except StopIteration: line = "" - lnum = lnum + 1 + lnum += 1 pos, max = 0, len(line) if contstr: # continued string @@ -481,14 +488,14 @@ def generate_tokens( column = 0 while pos < max: # measure leading whitespace if line[pos] == " ": - column = column + 1 + column += 1 elif line[pos] == "\t": column = (column // tabsize + 1) * tabsize elif line[pos] == "\f": column = 0 else: break - pos = pos + 1 + pos += 1 if pos == max: break @@ -507,7 +514,7 @@ def generate_tokens( COMMENT, comment_token, (lnum, pos), - (lnum, pos + len(comment_token)), + (lnum, nl_pos), line, ) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) @@ -652,16 +659,16 @@ def generate_tokens( continued = 1 else: if initial in "([{": - parenlev = parenlev + 1 + parenlev += 1 elif initial in ")]}": - parenlev = parenlev - 1 + parenlev -= 1 if stashed: yield stashed stashed = None yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) - pos = pos + 1 + pos += 1 if stashed: yield stashed diff --git a/src/blib2to3/pygram.py b/src/blib2to3/pygram.py index b8362b81473..99012cdd9cb 100644 --- a/src/blib2to3/pygram.py +++ b/src/blib2to3/pygram.py @@ -39,12 +39,14 @@ class _python_symbols(Symbols): arglist: int argument: int arith_expr: int + asexpr_test: int assert_stmt: int async_funcdef: int async_stmt: int atom: int augassign: int break_stmt: int + case_block: int classdef: int comp_for: int comp_if: int @@ -74,6 +76,7 @@ class _python_symbols(Symbols): for_stmt: int funcdef: int global_stmt: int + guard: int if_stmt: int import_as_name: int import_as_names: int @@ -82,6 +85,7 @@ class _python_symbols(Symbols): import_stmt: int lambdef: int listmaker: int + match_stmt: int namedexpr_test: int not_test: int old_comp_for: int @@ -92,6 +96,8 @@ class _python_symbols(Symbols): or_test: int parameters: int pass_stmt: int + pattern: int + patterns: int power: int print_stmt: int raise_stmt: int @@ -101,6 +107,7 @@ class _python_symbols(Symbols): single_input: int sliceop: int small_stmt: int + subject_expr: int star_expr: int stmt: int subscript: int @@ -116,6 +123,7 @@ class _python_symbols(Symbols): tfpdef: int tfplist: int tname: int + tname_star: int trailer: int try_stmt: int typedargslist: int @@ -124,9 +132,7 @@ class _python_symbols(Symbols): vfplist: int vname: int while_stmt: int - with_item: int with_stmt: int - with_var: int xor_expr: int yield_arg: int yield_expr: int @@ -149,6 +155,7 @@ class _pattern_symbols(Symbols): python_grammar_no_print_statement_no_exec_statement_async_keywords: Grammar python_grammar_no_exec_statement: Grammar pattern_grammar: Grammar +python_grammar_soft_keywords: Grammar python_symbols: _python_symbols pattern_symbols: _pattern_symbols @@ -159,6 +166,7 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: global python_grammar_no_print_statement global python_grammar_no_print_statement_no_exec_statement global python_grammar_no_print_statement_no_exec_statement_async_keywords + global python_grammar_soft_keywords global python_symbols global pattern_grammar global pattern_symbols @@ -171,6 +179,10 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: # Python 2 python_grammar = driver.load_packaged_grammar("blib2to3", _GRAMMAR_FILE, cache_dir) + python_grammar.version = (2, 0) + + soft_keywords = python_grammar.soft_keywords.copy() + python_grammar.soft_keywords.clear() python_symbols = _python_symbols(python_grammar) @@ -182,6 +194,7 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: python_grammar_no_print_statement_no_exec_statement = python_grammar.copy() del python_grammar_no_print_statement_no_exec_statement.keywords["print"] del python_grammar_no_print_statement_no_exec_statement.keywords["exec"] + python_grammar_no_print_statement_no_exec_statement.version = (3, 0) # Python 3.7+ python_grammar_no_print_statement_no_exec_statement_async_keywords = ( @@ -190,6 +203,14 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: python_grammar_no_print_statement_no_exec_statement_async_keywords.async_keywords = ( True ) + python_grammar_no_print_statement_no_exec_statement_async_keywords.version = (3, 7) + + # Python 3.10+ + python_grammar_soft_keywords = ( + python_grammar_no_print_statement_no_exec_statement_async_keywords.copy() + ) + python_grammar_soft_keywords.soft_keywords = soft_keywords + python_grammar_soft_keywords.version = (3, 10) pattern_grammar = driver.load_packaged_grammar( "blib2to3", _PATTERN_GRAMMAR_FILE, cache_dir diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index 4b841b768e7..15a1420ef7d 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -10,11 +10,10 @@ There's also a pattern matching implementation here. """ -# mypy: allow-untyped-defs +# mypy: allow-untyped-defs, allow-incomplete-defs from typing import ( Any, - Callable, Dict, Iterator, List, @@ -25,7 +24,6 @@ Union, Set, Iterable, - Sequence, ) from blib2to3.pgen2.grammar import Grammar @@ -53,7 +51,7 @@ def type_repr(type_num: int) -> Union[Text, int]: return _type_reprs.setdefault(type_num, type_num) -_P = TypeVar("_P") +_P = TypeVar("_P", bound="Base") NL = Union["Node", "Leaf"] Context = Tuple[Text, Tuple[int, int]] @@ -93,8 +91,6 @@ def __eq__(self, other: Any) -> bool: return NotImplemented return self._eq(other) - __hash__ = None # type: Any # For Py3 compatibility. - @property def prefix(self) -> Text: raise NotImplementedError @@ -110,6 +106,9 @@ def _eq(self: _P, other: _P) -> bool: """ raise NotImplementedError + def __deepcopy__(self: _P, memo: Any) -> _P: + return self.clone() + def clone(self: _P) -> _P: """ Return a cloned (deep) copy of self. @@ -292,7 +291,7 @@ def __str__(self) -> Text: """ return "".join(map(str, self.children)) - def _eq(self, other) -> bool: + def _eq(self, other: Base) -> bool: """Compare two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) @@ -327,7 +326,7 @@ def prefix(self) -> Text: return self.children[0].prefix @prefix.setter - def prefix(self, prefix) -> None: + def prefix(self, prefix: Text) -> None: if self.children: self.children[0].prefix = prefix @@ -387,7 +386,8 @@ class Leaf(Base): value: Text fixers_applied: List[Any] bracket_depth: int - opening_bracket: "Leaf" + # Changed later in brackets.py + opening_bracket: Optional["Leaf"] = None used_names: Optional[Set[Text]] _prefix = "" # Whitespace and comments preceding this token in the input lineno: int = 0 # Line where this token starts in the input @@ -400,6 +400,7 @@ def __init__( context: Optional[Context] = None, prefix: Optional[Text] = None, fixers_applied: List[Any] = [], + opening_bracket: Optional["Leaf"] = None, ) -> None: """ Initializer. @@ -417,6 +418,7 @@ def __init__( self._prefix = prefix self.fixers_applied: Optional[List[Any]] = fixers_applied[:] self.children = [] + self.opening_bracket = opening_bracket def __repr__(self) -> str: """Return a canonical string representation.""" @@ -435,9 +437,9 @@ def __str__(self) -> Text: This reproduces the input source exactly. """ - return self.prefix + str(self.value) + return self._prefix + str(self.value) - def _eq(self, other) -> bool: + def _eq(self, other: "Leaf") -> bool: """Compare two nodes for equality.""" return (self.type, self.value) == (other.type, other.value) @@ -470,7 +472,7 @@ def prefix(self) -> Text: return self._prefix @prefix.setter - def prefix(self, prefix) -> None: + def prefix(self, prefix: Text) -> None: self.changed() self._prefix = prefix @@ -616,7 +618,7 @@ def __init__( self.content = content self.name = name - def match(self, node: NL, results=None): + def match(self, node: NL, results=None) -> bool: """Override match() to insist on a leaf node.""" if not isinstance(node, Leaf): return False @@ -670,10 +672,13 @@ def __init__( newcontent = list(content) for i, item in enumerate(newcontent): assert isinstance(item, BasePattern), (i, item) - if isinstance(item, WildcardPattern): - self.wildcards = True + # I don't even think this code is used anywhere, but it does cause + # unreachable errors from mypy. This function's signature does look + # odd though *shrug*. + if isinstance(item, WildcardPattern): # type: ignore[unreachable] + self.wildcards = True # type: ignore[unreachable] self.type = type - self.content = newcontent + self.content = newcontent # TODO: this is unbound when content is None self.name = name def _submatch(self, node, results=None) -> bool: @@ -915,7 +920,7 @@ def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]: class NegatedPattern(BasePattern): - def __init__(self, content: Optional[Any] = None) -> None: + def __init__(self, content: Optional[BasePattern] = None) -> None: """ Initializer. @@ -936,7 +941,7 @@ def match_seq(self, nodes, results=None) -> bool: # We only match an empty sequence of nodes in its entirety return len(nodes) == 0 - def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]: if self.content is None: # Return a match if there is an empty sequence if len(nodes) == 0: @@ -976,6 +981,3 @@ def generate_matches( r.update(r0) r.update(r1) yield c0 + c1, r - - -_Convert = Callable[[Grammar, RawNode], Any] diff --git a/test_requirements.txt b/test_requirements.txt new file mode 100644 index 00000000000..5bc494d5999 --- /dev/null +++ b/test_requirements.txt @@ -0,0 +1,6 @@ +coverage >= 5.3 +pre-commit +pytest >= 6.1.1 +pytest-xdist >= 2.2.1 +pytest-cov >= 2.11.1 +tox diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000000..67517268d1b --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1 @@ +pytest_plugins = ["tests.optional"] diff --git a/tests/data/fast/pep_572_do_not_remove_parens.py b/tests/data/fast/pep_572_do_not_remove_parens.py new file mode 100644 index 00000000000..20e80a69377 --- /dev/null +++ b/tests/data/fast/pep_572_do_not_remove_parens.py @@ -0,0 +1,21 @@ +# Most of the following examples are really dumb, some of them aren't even accepted by Python, +# we're fixing them only so fuzzers (which follow the grammar which actually allows these +# examples matter of fact!) don't yell at us :p + +del (a := [1]) + +try: + pass +except (a := 1) as (b := why_does_this_exist): + pass + +for (z := 124) in (x := -124): + pass + +with (y := [3, 2, 1]) as (funfunfun := indeed): + pass + + +@(please := stop) +def sigh(): + pass diff --git a/tests/data/force_pyi.py b/tests/data/force_pyi.py deleted file mode 100644 index 25246c22ca7..00000000000 --- a/tests/data/force_pyi.py +++ /dev/null @@ -1,6 +0,0 @@ -def f(): ... - -def g(): ... -# output -def f(): ... -def g(): ... diff --git a/tests/data/include_exclude_tests/.gitignore b/tests/data/include_exclude_tests/.gitignore new file mode 100644 index 00000000000..91f34560522 --- /dev/null +++ b/tests/data/include_exclude_tests/.gitignore @@ -0,0 +1 @@ +dont_exclude/ diff --git a/tests/data/include_exclude_tests/pyproject.toml b/tests/data/include_exclude_tests/pyproject.toml new file mode 100644 index 00000000000..9ba7ec26980 --- /dev/null +++ b/tests/data/include_exclude_tests/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=41.0", "setuptools-scm", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/tests/data/invalid_gitignore_tests/.gitignore b/tests/data/invalid_gitignore_tests/.gitignore new file mode 100644 index 00000000000..cdf4cb4feba --- /dev/null +++ b/tests/data/invalid_gitignore_tests/.gitignore @@ -0,0 +1 @@ +! diff --git a/tests/data/invalid_gitignore_tests/a.py b/tests/data/invalid_gitignore_tests/a.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/invalid_gitignore_tests/pyproject.toml b/tests/data/invalid_gitignore_tests/pyproject.toml new file mode 100644 index 00000000000..3908e457a9e --- /dev/null +++ b/tests/data/invalid_gitignore_tests/pyproject.toml @@ -0,0 +1 @@ +# Empty configuration file; used in tests to avoid interference from Black's own config. diff --git a/tests/data/invalid_nested_gitignore_tests/a.py b/tests/data/invalid_nested_gitignore_tests/a.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/invalid_nested_gitignore_tests/a/.gitignore b/tests/data/invalid_nested_gitignore_tests/a/.gitignore new file mode 100644 index 00000000000..cdf4cb4feba --- /dev/null +++ b/tests/data/invalid_nested_gitignore_tests/a/.gitignore @@ -0,0 +1 @@ +! diff --git a/tests/data/invalid_nested_gitignore_tests/a/a.py b/tests/data/invalid_nested_gitignore_tests/a/a.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/invalid_nested_gitignore_tests/pyproject.toml b/tests/data/invalid_nested_gitignore_tests/pyproject.toml new file mode 100644 index 00000000000..3908e457a9e --- /dev/null +++ b/tests/data/invalid_nested_gitignore_tests/pyproject.toml @@ -0,0 +1 @@ +# Empty configuration file; used in tests to avoid interference from Black's own config. diff --git a/tests/data/jupyter/non_python_notebook.ipynb b/tests/data/jupyter/non_python_notebook.ipynb new file mode 100644 index 00000000000..da5cdd8e185 --- /dev/null +++ b/tests/data/jupyter/non_python_notebook.ipynb @@ -0,0 +1 @@ +{"metadata":{"kernelspec":{"name":"ir","display_name":"R","language":"R"},"language_info":{"name":"R","codemirror_mode":"r","pygments_lexer":"r","mimetype":"text/x-r-source","file_extension":".r","version":"4.0.5"}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"library(tidyverse) ","metadata":{"_uuid":"051d70d956493feee0c6d64651c6a088724dca2a","_execution_state":"idle"},"execution_count":null,"outputs":[]}]} \ No newline at end of file diff --git a/tests/data/jupyter/notebook_empty_metadata.ipynb b/tests/data/jupyter/notebook_empty_metadata.ipynb new file mode 100644 index 00000000000..7dc1f805cd6 --- /dev/null +++ b/tests/data/jupyter/notebook_empty_metadata.ipynb @@ -0,0 +1,27 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print('foo')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/data/jupyter/notebook_no_trailing_newline.ipynb b/tests/data/jupyter/notebook_no_trailing_newline.ipynb new file mode 100644 index 00000000000..79f95bea2f6 --- /dev/null +++ b/tests/data/jupyter/notebook_no_trailing_newline.ipynb @@ -0,0 +1,39 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print('foo')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('black': venv)", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/tests/data/jupyter/notebook_trailing_newline.ipynb b/tests/data/jupyter/notebook_trailing_newline.ipynb new file mode 100644 index 00000000000..4f82869312d --- /dev/null +++ b/tests/data/jupyter/notebook_trailing_newline.ipynb @@ -0,0 +1,39 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print('foo')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('black': venv)", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/data/jupyter/notebook_which_cant_be_parsed.ipynb b/tests/data/jupyter/notebook_which_cant_be_parsed.ipynb new file mode 100644 index 00000000000..257cc5642cb --- /dev/null +++ b/tests/data/jupyter/notebook_which_cant_be_parsed.ipynb @@ -0,0 +1 @@ +foo diff --git a/tests/data/jupyter/notebook_without_changes.ipynb b/tests/data/jupyter/notebook_without_changes.ipynb new file mode 100644 index 00000000000..ac6c7e63efa --- /dev/null +++ b/tests/data/jupyter/notebook_without_changes.ipynb @@ -0,0 +1,46 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print(\"foo\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook should not be reformatted" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('black': venv)", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/tests/data/async_as_identifier.py b/tests/data/miscellaneous/async_as_identifier.py similarity index 100% rename from tests/data/async_as_identifier.py rename to tests/data/miscellaneous/async_as_identifier.py diff --git a/tests/data/blackd_diff.diff b/tests/data/miscellaneous/blackd_diff.diff similarity index 100% rename from tests/data/blackd_diff.diff rename to tests/data/miscellaneous/blackd_diff.diff diff --git a/tests/data/blackd_diff.py b/tests/data/miscellaneous/blackd_diff.py similarity index 100% rename from tests/data/blackd_diff.py rename to tests/data/miscellaneous/blackd_diff.py diff --git a/tests/data/debug_visitor.out b/tests/data/miscellaneous/debug_visitor.out similarity index 100% rename from tests/data/debug_visitor.out rename to tests/data/miscellaneous/debug_visitor.out diff --git a/tests/data/debug_visitor.py b/tests/data/miscellaneous/debug_visitor.py similarity index 100% rename from tests/data/debug_visitor.py rename to tests/data/miscellaneous/debug_visitor.py diff --git a/tests/data/miscellaneous/decorators.py b/tests/data/miscellaneous/decorators.py new file mode 100644 index 00000000000..a0f38ca7b9d --- /dev/null +++ b/tests/data/miscellaneous/decorators.py @@ -0,0 +1,182 @@ +# This file doesn't use the standard decomposition. +# Decorator syntax test cases are separated by double # comments. +# Those before the 'output' comment are valid under the old syntax. +# Those after the 'ouput' comment require PEP614 relaxed syntax. +# Do not remove the double # separator before the first test case, it allows +# the comment before the test case to be ignored. + +## + +@decorator +def f(): + ... + +## + +@decorator() +def f(): + ... + +## + +@decorator(arg) +def f(): + ... + +## + +@decorator(kwarg=0) +def f(): + ... + +## + +@decorator(*args) +def f(): + ... + +## + +@decorator(**kwargs) +def f(): + ... + +## + +@decorator(*args, **kwargs) +def f(): + ... + +## + +@decorator(*args, **kwargs,) +def f(): + ... + +## + +@dotted.decorator +def f(): + ... + +## + +@dotted.decorator(arg) +def f(): + ... + +## + +@dotted.decorator(kwarg=0) +def f(): + ... + +## + +@dotted.decorator(*args) +def f(): + ... + +## + +@dotted.decorator(**kwargs) +def f(): + ... + +## + +@dotted.decorator(*args, **kwargs) +def f(): + ... + +## + +@dotted.decorator(*args, **kwargs,) +def f(): + ... + +## + +@double.dotted.decorator +def f(): + ... + +## + +@double.dotted.decorator(arg) +def f(): + ... + +## + +@double.dotted.decorator(kwarg=0) +def f(): + ... + +## + +@double.dotted.decorator(*args) +def f(): + ... + +## + +@double.dotted.decorator(**kwargs) +def f(): + ... + +## + +@double.dotted.decorator(*args, **kwargs) +def f(): + ... + +## + +@double.dotted.decorator(*args, **kwargs,) +def f(): + ... + +## + +@_(sequence["decorator"]) +def f(): + ... + +## + +@eval("sequence['decorator']") +def f(): + ... + +# output + +## + +@decorator()() +def f(): + ... + +## + +@(decorator) +def f(): + ... + +## + +@sequence["decorator"] +def f(): + ... + +## + +@decorator[List[str]] +def f(): + ... + +## + +@var := decorator +def f(): + ... \ No newline at end of file diff --git a/tests/data/miscellaneous/docstring_no_string_normalization.py b/tests/data/miscellaneous/docstring_no_string_normalization.py new file mode 100644 index 00000000000..a90b578f09a --- /dev/null +++ b/tests/data/miscellaneous/docstring_no_string_normalization.py @@ -0,0 +1,249 @@ +class ALonelyClass: + ''' + A multiline class docstring. + ''' + def AnEquallyLonelyMethod(self): + ''' + A multiline method docstring''' + pass + + +def one_function(): + '''This is a docstring with a single line of text.''' + pass + + +def shockingly_the_quotes_are_normalized(): + '''This is a multiline docstring. + This is a multiline docstring. + This is a multiline docstring. + ''' + pass + + +def foo(): + """This is a docstring with + some lines of text here + """ + return + + +def baz(): + '''"This" is a string with some + embedded "quotes"''' + return + + +def poit(): + """ + Lorem ipsum dolor sit amet. + + Consectetur adipiscing elit: + - sed do eiusmod tempor incididunt ut labore + - dolore magna aliqua + - enim ad minim veniam + - quis nostrud exercitation ullamco laboris nisi + - aliquip ex ea commodo consequat + """ + pass + + +def under_indent(): + """ + These lines are indented in a way that does not +make sense. + """ + pass + + +def over_indent(): + """ + This has a shallow indent + - But some lines are deeper + - And the closing quote is too deep + """ + pass + + +def single_line(): + """But with a newline after it! + + """ + pass + + +def this(): + r""" + 'hey ho' + """ + + +def that(): + """ "hey yah" """ + + +def and_that(): + """ + "hey yah" """ + + +def and_this(): + ''' + "hey yah"''' + + +def believe_it_or_not_this_is_in_the_py_stdlib(): ''' +"hey yah"''' + + +def shockingly_the_quotes_are_normalized_v2(): + ''' + Docstring Docstring Docstring + ''' + pass + + +def backslash_space(): + '\ ' + + +def multiline_backslash_1(): + ''' + hey\there\ + \ ''' + + +def multiline_backslash_2(): + ''' + hey there \ ''' + + +def multiline_backslash_3(): + ''' + already escaped \\ ''' + +# output + +class ALonelyClass: + ''' + A multiline class docstring. + ''' + + def AnEquallyLonelyMethod(self): + ''' + A multiline method docstring''' + pass + + +def one_function(): + '''This is a docstring with a single line of text.''' + pass + + +def shockingly_the_quotes_are_normalized(): + '''This is a multiline docstring. + This is a multiline docstring. + This is a multiline docstring. + ''' + pass + + +def foo(): + """This is a docstring with + some lines of text here + """ + return + + +def baz(): + '''"This" is a string with some + embedded "quotes"''' + return + + +def poit(): + """ + Lorem ipsum dolor sit amet. + + Consectetur adipiscing elit: + - sed do eiusmod tempor incididunt ut labore + - dolore magna aliqua + - enim ad minim veniam + - quis nostrud exercitation ullamco laboris nisi + - aliquip ex ea commodo consequat + """ + pass + + +def under_indent(): + """ + These lines are indented in a way that does not + make sense. + """ + pass + + +def over_indent(): + """ + This has a shallow indent + - But some lines are deeper + - And the closing quote is too deep + """ + pass + + +def single_line(): + """But with a newline after it!""" + pass + + +def this(): + r""" + 'hey ho' + """ + + +def that(): + """ "hey yah" """ + + +def and_that(): + """ + "hey yah" """ + + +def and_this(): + ''' + "hey yah"''' + + +def believe_it_or_not_this_is_in_the_py_stdlib(): + ''' + "hey yah"''' + + +def shockingly_the_quotes_are_normalized_v2(): + ''' + Docstring Docstring Docstring + ''' + pass + + +def backslash_space(): + '\ ' + + +def multiline_backslash_1(): + ''' + hey\there\ + \ ''' + + +def multiline_backslash_2(): + ''' + hey there \ ''' + + +def multiline_backslash_3(): + ''' + already escaped \\''' diff --git a/tests/data/miscellaneous/docstring_preview_no_string_normalization.py b/tests/data/miscellaneous/docstring_preview_no_string_normalization.py new file mode 100644 index 00000000000..338cc01d33e --- /dev/null +++ b/tests/data/miscellaneous/docstring_preview_no_string_normalization.py @@ -0,0 +1,10 @@ +def do_not_touch_this_prefix(): + R"""There was a bug where docstring prefixes would be normalized even with -S.""" + + +def do_not_touch_this_prefix2(): + FR'There was a bug where docstring prefixes would be normalized even with -S.' + + +def do_not_touch_this_prefix3(): + u'''There was a bug where docstring prefixes would be normalized even with -S.''' diff --git a/tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff b/tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff new file mode 100644 index 00000000000..eba3fd2da7d --- /dev/null +++ b/tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff @@ -0,0 +1,447 @@ +--- [Deterministic header] ++++ [Deterministic header] +@@ -1,8 +1,8 @@ + ... +-'some_string' +-b'\\xa3' ++"some_string" ++b"\\xa3" + Name + None + True + False + 1 +@@ -21,99 +21,118 @@ + Name1 or (Name2 and Name3) or Name4 + Name1 or Name2 and Name3 or Name4 + v1 << 2 + 1 >> v2 + 1 % finished +-1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 +-((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) ++1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8 ++((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8) + not great + ~great + +value + -1 + ~int and not v1 ^ 123 + v2 | True + (~int) and (not ((v1 ^ (123 + v2)) | True)) +-+really ** -confusing ** ~operator ** -precedence +-flags & ~ select.EPOLLIN and waiters.write_task is not None +++(really ** -(confusing ** ~(operator**-precedence))) ++flags & ~select.EPOLLIN and waiters.write_task is not None + lambda arg: None + lambda a=True: a + lambda a, b, c=True: a +-lambda a, b, c=True, *, d=(1 << v2), e='str': a +-lambda a, b, c=True, *vararg, d=(v1 << 2), e='str', **kwargs: a + b ++lambda a, b, c=True, *, d=(1 << v2), e="str": a ++lambda a, b, c=True, *vararg, d=(v1 << 2), e="str", **kwargs: a + b + manylambdas = lambda x=lambda y=lambda z=1: z: y(): x() +-foo = (lambda port_id, ignore_missing: {"port1": port1_resource, "port2": port2_resource}[port_id]) ++foo = lambda port_id, ignore_missing: { ++ "port1": port1_resource, ++ "port2": port2_resource, ++}[port_id] + 1 if True else 2 + str or None if True else str or bytes or None + (str or None) if True else (str or bytes or None) + str or None if (1 if True else 2) else str or bytes or None + (str or None) if (1 if True else 2) else (str or bytes or None) +-((super_long_variable_name or None) if (1 if super_long_test_name else 2) else (str or bytes or None)) +-{'2.7': dead, '3.7': (long_live or die_hard)} +-{'2.7': dead, '3.7': (long_live or die_hard), **{'3.6': verygood}} ++( ++ (super_long_variable_name or None) ++ if (1 if super_long_test_name else 2) ++ else (str or bytes or None) ++) ++{"2.7": dead, "3.7": (long_live or die_hard)} ++{"2.7": dead, "3.7": (long_live or die_hard), **{"3.6": verygood}} + {**a, **b, **c} +-{'2.7', '3.6', '3.7', '3.8', '3.9', ('4.0' if gilectomy else '3.10')} +-({'a': 'b'}, (True or False), (+value), 'string', b'bytes') or None ++{"2.7", "3.6", "3.7", "3.8", "3.9", ("4.0" if gilectomy else "3.10")} ++({"a": "b"}, (True or False), (+value), "string", b"bytes") or None + () + (1,) + (1, 2) + (1, 2, 3) + [] + [1, 2, 3, 4, 5, 6, 7, 8, 9, (10 or A), (11 or B), (12 or C)] +-[1, 2, 3,] ++[1, 2, 3] + [*a] + [*range(10)] +-[*a, 4, 5,] +-[4, *a, 5,] +-[this_is_a_very_long_variable_which_will_force_a_delimiter_split, element, another, *more] ++[*a, 4, 5] ++[4, *a, 5] ++[ ++ this_is_a_very_long_variable_which_will_force_a_delimiter_split, ++ element, ++ another, ++ *more, ++] + {i for i in (1, 2, 3)} +-{(i ** 2) for i in (1, 2, 3)} +-{(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))} +-{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} ++{(i**2) for i in (1, 2, 3)} ++{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} ++{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} + [i for i in (1, 2, 3)] +-[(i ** 2) for i in (1, 2, 3)] +-[(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))] +-[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] ++[(i**2) for i in (1, 2, 3)] ++[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] ++[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] + {i: 0 for i in (1, 2, 3)} +-{i: j for i, j in ((1, 'a'), (2, 'b'), (3, 'c'))} ++{i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} + {a: b * 2 for a, b in dictionary.items()} + {a: b * -2 for a, b in dictionary.items()} +-{k: v for k, v in this_is_a_very_long_variable_which_will_cause_a_trailing_comma_which_breaks_the_comprehension} ++{ ++ k: v ++ for k, v in this_is_a_very_long_variable_which_will_cause_a_trailing_comma_which_breaks_the_comprehension ++} + Python3 > Python2 > COBOL + Life is Life + call() + call(arg) +-call(kwarg='hey') +-call(arg, kwarg='hey') +-call(arg, another, kwarg='hey', **kwargs) +-call(this_is_a_very_long_variable_which_will_force_a_delimiter_split, arg, another, kwarg='hey', **kwargs) # note: no trailing comma pre-3.6 ++call(kwarg="hey") ++call(arg, kwarg="hey") ++call(arg, another, kwarg="hey", **kwargs) ++call( ++ this_is_a_very_long_variable_which_will_force_a_delimiter_split, ++ arg, ++ another, ++ kwarg="hey", ++ **kwargs ++) # note: no trailing comma pre-3.6 + call(*gidgets[:2]) + call(a, *gidgets[:2]) + call(**self.screen_kwargs) + call(b, **self.screen_kwargs) + lukasz.langa.pl + call.me(maybe) +-1 .real +-1.0 .real ++(1).real ++(1.0).real + ....__class__ + list[str] + dict[str, int] + tuple[str, ...] +-tuple[ +- str, int, float, dict[str, int] +-] ++tuple[str, int, float, dict[str, int]] + tuple[str, int, float, dict[str, int],] + very_long_variable_name_filters: t.List[ + t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]], + ] + xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) + ) + xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) + ) +-xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[ +- ..., List[SomeClass] +-] = classmethod(sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)) # type: ignore ++xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( ++ sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) ++) # type: ignore + slice[0] + slice[0:1] + slice[0:1:2] + slice[:] + slice[:-1] +@@ -137,118 +156,197 @@ + numpy[-(c + 1) :, d] + numpy[:, l[-2]] + numpy[:, ::-1] + numpy[np.newaxis, :] + (str or None) if (sys.version_info[0] > (3,)) else (str or bytes or None) +-{'2.7': dead, '3.7': long_live or die_hard} +-{'2.7', '3.6', '3.7', '3.8', '3.9', '4.0' if gilectomy else '3.10'} ++{"2.7": dead, "3.7": long_live or die_hard} ++{"2.7", "3.6", "3.7", "3.8", "3.9", "4.0" if gilectomy else "3.10"} + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10 or A, 11 or B, 12 or C] + (SomeName) + SomeName + (Good, Bad, Ugly) + (i for i in (1, 2, 3)) +-((i ** 2) for i in (1, 2, 3)) +-((i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))) +-(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) ++((i**2) for i in (1, 2, 3)) ++((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) ++(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) + (*starred,) +-{"id": "1","type": "type","started_at": now(),"ended_at": now() + timedelta(days=10),"priority": 1,"import_session_id": 1,**kwargs} ++{ ++ "id": "1", ++ "type": "type", ++ "started_at": now(), ++ "ended_at": now() + timedelta(days=10), ++ "priority": 1, ++ "import_session_id": 1, ++ **kwargs, ++} + a = (1,) +-b = 1, ++b = (1,) + c = 1 + d = (1,) + a + (2,) + e = (1,).count(1) + f = 1, *range(10) + g = 1, *"ten" +-what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(vars_to_remove) +-what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove) +-result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc()).all() +-result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc(),).all() ++what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set( ++ vars_to_remove ++) ++what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set( ++ vars_to_remove ++) ++result = ( ++ session.query(models.Customer.id) ++ .filter( ++ models.Customer.account_id == account_id, models.Customer.email == email_address ++ ) ++ .order_by(models.Customer.id.asc()) ++ .all() ++) ++result = ( ++ session.query(models.Customer.id) ++ .filter( ++ models.Customer.account_id == account_id, models.Customer.email == email_address ++ ) ++ .order_by(models.Customer.id.asc()) ++ .all() ++) + Ø = set() + authors.łukasz.say_thanks() + mapping = { + A: 0.25 * (10.0 / 12), + B: 0.1 * (10.0 / 12), + C: 0.1 * (10.0 / 12), + D: 0.1 * (10.0 / 12), + } + ++ + def gen(): + yield from outside_of_generator +- a = (yield) +- b = ((yield)) +- c = (((yield))) ++ a = yield ++ b = yield ++ c = yield ++ + + async def f(): + await some.complicated[0].call(with_args=(True or (1 is not 1))) +-print(* [] or [1]) ++ ++ ++print(*[] or [1]) + print(**{1: 3} if False else {x: x for x in range(3)}) +-print(* lambda x: x) +-assert(not Test),("Short message") +-assert this is ComplexTest and not requirements.fit_in_a_single_line(force=False), "Short message" +-assert(((parens is TooMany))) +-for x, in (1,), (2,), (3,): ... +-for y in (): ... +-for z in (i for i in (1, 2, 3)): ... +-for i in (call()): ... +-for j in (1 + (2 + 3)): ... +-while(this and that): ... +-for addr_family, addr_type, addr_proto, addr_canonname, addr_sockaddr in socket.getaddrinfo('google.com', 'http'): ++print(*lambda x: x) ++assert not Test, "Short message" ++assert this is ComplexTest and not requirements.fit_in_a_single_line( ++ force=False ++), "Short message" ++assert parens is TooMany ++for (x,) in (1,), (2,), (3,): ++ ... ++for y in (): ++ ... ++for z in (i for i in (1, 2, 3)): ++ ... ++for i in call(): ++ ... ++for j in 1 + (2 + 3): ++ ... ++while this and that: ++ ... ++for ( ++ addr_family, ++ addr_type, ++ addr_proto, ++ addr_canonname, ++ addr_sockaddr, ++) in socket.getaddrinfo("google.com", "http"): + pass +-a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +-a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +-a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +-a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +-if ( +- threading.current_thread() != threading.main_thread() and +- threading.current_thread() != threading.main_thread() or +- signal.getsignal(signal.SIGINT) != signal.default_int_handler +-): +- return True +-if ( +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +-): +- return True +-if ( +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa & +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +-): +- return True +-if ( +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +-): +- return True +-if ( +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +-): +- return True +-if ( +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa * +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +-): +- return True +-if ( +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa / +- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +-): +- return True +-if ( +- ~ aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n +-): +- return True +-if ( +- ~ aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n +-): +- return True +-if ( +- ~ aaaaaaaaaaaaaaaa.a + aaaaaaaaaaaaaaaa.b - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n +-): +- return True +-aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaa * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) ++a = ( ++ aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp ++ in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ++) ++a = ( ++ aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp ++ not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ++) ++a = ( ++ aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp ++ is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ++) ++a = ( ++ aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp ++ is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ++) ++if ( ++ threading.current_thread() != threading.main_thread() ++ and threading.current_thread() != threading.main_thread() ++ or signal.getsignal(signal.SIGINT) != signal.default_int_handler ++): ++ return True ++if ( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++): ++ return True ++if ( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++): ++ return True ++if ( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++): ++ return True ++if ( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++): ++ return True ++if ( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++): ++ return True ++if ( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ / aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++): ++ return True ++if ( ++ ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e ++ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n ++): ++ return True ++if ( ++ ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e ++ | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h ++ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n ++): ++ return True ++if ( ++ ~aaaaaaaaaaaaaaaa.a ++ + aaaaaaaaaaaaaaaa.b ++ - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e ++ | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ++ ^ aaaaaaaaaaaaaaaa.i ++ << aaaaaaaaaaaaaaaa.k ++ >> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ++): ++ return True ++( ++ aaaaaaaaaaaaaaaa ++ + aaaaaaaaaaaaaaaa ++ - aaaaaaaaaaaaaaaa ++ * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) ++ / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) ++) + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa +-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++) + bbbb >> bbbb * bbbb +-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ ^ bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ ^ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++) + last_call() + # standalone comment at ENDMARKER diff --git a/tests/data/force_py36.py b/tests/data/miscellaneous/force_py36.py similarity index 100% rename from tests/data/force_py36.py rename to tests/data/miscellaneous/force_py36.py diff --git a/tests/data/miscellaneous/force_pyi.py b/tests/data/miscellaneous/force_pyi.py new file mode 100644 index 00000000000..07ed93c6879 --- /dev/null +++ b/tests/data/miscellaneous/force_pyi.py @@ -0,0 +1,65 @@ +from typing import Union + +@bird +def zoo(): ... + +class A: ... +@bar +class B: + def BMethod(self) -> None: ... + @overload + def BMethod(self, arg : List[str]) -> None: ... + +class C: ... +@hmm +class D: ... +class E: ... + +@baz +def foo() -> None: + ... + +class F (A , C): ... +def spam() -> None: ... + +@overload +def spam(arg: str) -> str: ... + +var : int = 1 + +def eggs() -> Union[str, int]: ... + +# output + +from typing import Union + +@bird +def zoo(): ... + +class A: ... + +@bar +class B: + def BMethod(self) -> None: ... + @overload + def BMethod(self, arg: List[str]) -> None: ... + +class C: ... + +@hmm +class D: ... + +class E: ... + +@baz +def foo() -> None: ... + +class F(A, C): ... + +def spam() -> None: ... +@overload +def spam(arg: str) -> str: ... + +var: int = 1 + +def eggs() -> Union[str, int]: ... diff --git a/tests/data/miscellaneous/long_strings_flag_disabled.py b/tests/data/miscellaneous/long_strings_flag_disabled.py new file mode 100644 index 00000000000..db3954e3abd --- /dev/null +++ b/tests/data/miscellaneous/long_strings_flag_disabled.py @@ -0,0 +1,292 @@ +x = "This is a really long string that can't possibly be expected to fit all together on one line. In fact it may even take up three or more lines... like four or five... but probably just three." + +x += "This is a really long string that can't possibly be expected to fit all together on one line. In fact it may even take up three or more lines... like four or five... but probably just three." + +y = "Short string" + +print( + "This is a really long string inside of a print statement with extra arguments attached at the end of it.", + x, + y, + z, +) + +print( + "This is a really long string inside of a print statement with no extra arguments attached at the end of it." +) + +D1 = { + "The First": "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a dictionary, so formatting is more difficult.", + "The Second": "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a dictionary.", +} + +D2 = { + 1.0: "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a dictionary, so formatting is more difficult.", + 2.0: "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a dictionary.", +} + +D3 = { + x: "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a dictionary, so formatting is more difficult.", + y: "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a dictionary.", +} + +D4 = { + "A long and ridiculous {}".format( + string_key + ): "This is a really really really long string that has to go i,side of a dictionary. It is soooo bad.", + some_func( + "calling", "some", "stuff" + ): "This is a really really really long string that has to go inside of a dictionary. It is {soooo} bad (#{x}).".format( + sooo="soooo", x=2 + ), + "A %s %s" + % ( + "formatted", + "string", + ): "This is a really really really long string that has to go inside of a dictionary. It is %s bad (#%d)." + % ("soooo", 2), +} + +func_with_keywords( + my_arg, + my_kwarg="Long keyword strings also need to be wrapped, but they will probably need to be handled a little bit differently.", +) + +bad_split1 = ( + "But what should happen when code has already been formatted but in the wrong way? Like" + " with a space at the end instead of the beginning. Or what about when it is split too soon?" +) + +bad_split2 = ( + "But what should happen when code has already " + "been formatted but in the wrong way? Like " + "with a space at the end instead of the " + "beginning. Or what about when it is split too " + "soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split." +) + +bad_split3 = ( + "What if we have inline comments on " # First Comment + "each line of a bad split? In that " # Second Comment + "case, we should just leave it alone." # Third Comment +) + +bad_split_func1( + "But what should happen when code has already " + "been formatted but in the wrong way? Like " + "with a space at the end instead of the " + "beginning. Or what about when it is split too " + "soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split.", + xxx, + yyy, + zzz, +) + +bad_split_func2( + xxx, + yyy, + zzz, + long_string_kwarg="But what should happen when code has already been formatted but in the wrong way? Like " + "with a space at the end instead of the beginning. Or what about when it is split too " + "soon?", +) + +bad_split_func3( + ( + "But what should happen when code has already " + r"been formatted but in the wrong way? Like " + "with a space at the end instead of the " + r"beginning. Or what about when it is split too " + r"soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split." + ), + xxx, + yyy, + zzz, +) + +raw_string = r"This is a long raw string. When re-formatting this string, black needs to make sure it prepends the 'r' onto the new string." + +fmt_string1 = "We also need to be sure to preserve any and all {} which may or may not be attached to the string in question.".format( + "method calls" +) + +fmt_string2 = "But what about when the string is {} but {}".format( + "short", + "the method call is really really really really really really really really long?", +) + +old_fmt_string1 = ( + "While we are on the topic of %s, we should also note that old-style formatting must also be preserved, since some %s still uses it." + % ("formatting", "code") +) + +old_fmt_string2 = "This is a %s %s %s %s" % ( + "really really really really really", + "old", + "way to format strings!", + "Use f-strings instead!", +) + +old_fmt_string3 = ( + "Whereas only the strings after the percent sign were long in the last example, this example uses a long initial string as well. This is another %s %s %s %s" + % ( + "really really really really really", + "old", + "way to format strings!", + "Use f-strings instead!", + ) +) + +fstring = f"f-strings definitely make things more {difficult} than they need to be for {{black}}. But boy they sure are handy. The problem is that some lines will need to have the 'f' whereas others do not. This {line}, for example, needs one." + +fstring_with_no_fexprs = f"Some regular string that needs to get split certainly but is NOT an fstring by any means whatsoever." + +comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top. + +arg_comment_string = print( + "Long lines with inline comments which are apart of (and not the only member of) an argument list should have their comments appended to the reformatted string's enclosing left parentheses.", # This comment stays on the bottom. + "Arg #2", + "Arg #3", + "Arg #4", + "Arg #5", +) + +pragma_comment_string1 = "Lines which end with an inline pragma comment of the form `# : <...>` should be left alone." # noqa: E501 + +pragma_comment_string2 = "Lines which end with an inline pragma comment of the form `# : <...>` should be left alone." # noqa + +"""This is a really really really long triple quote string and it should not be touched.""" + +triple_quote_string = """This is a really really really long triple quote string assignment and it should not be touched.""" + +assert ( + some_type_of_boolean_expression +), "Followed by a really really really long string that is used to provide context to the AssertionError exception." + +assert ( + some_type_of_boolean_expression +), "Followed by a really really really long string that is used to provide context to the AssertionError exception, which uses dynamic string {}.".format( + "formatting" +) + +assert some_type_of_boolean_expression, ( + "Followed by a really really really long string that is used to provide context to the AssertionError exception, which uses dynamic string %s." + % "formatting" +) + +assert some_type_of_boolean_expression, ( + "Followed by a really really really long string that is used to provide context to the AssertionError exception, which uses dynamic %s %s." + % ("string", "formatting") +) + +some_function_call( + "With a reallly generic name and with a really really long string that is, at some point down the line, " + + added + + " to a variable and then added to another string." +) + +some_function_call( + "With a reallly generic name and with a really really long string that is, at some point down the line, " + + added + + " to a variable and then added to another string. But then what happens when the final string is also supppppperrrrr long?! Well then that second (realllllllly long) string should be split too.", + "and a second argument", + and_a_third, +) + +return "A really really really really really really really really really really really really really long {} {}".format( + "return", "value" +) + +func_with_bad_comma( + "This is a really long string argument to a function that has a trailing comma which should NOT be there.", +) + +func_with_bad_comma( + "This is a really long string argument to a function that has a trailing comma which should NOT be there.", # comment after comma +) + +func_with_bad_comma( + ( + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there." + ), +) + +func_with_bad_comma( + ( + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there." + ), # comment after comma +) + +func_with_bad_parens_that_wont_fit_in_one_line( + ("short string that should have parens stripped"), x, y, z +) + +func_with_bad_parens_that_wont_fit_in_one_line( + x, y, ("short string that should have parens stripped"), z +) + +func_with_bad_parens( + ("short string that should have parens stripped"), + x, + y, + z, +) + +func_with_bad_parens( + x, + y, + ("short string that should have parens stripped"), + z, +) + +annotated_variable: Final = ( + "This is a large " + + STRING + + " that has been " + + CONCATENATED + + "using the '+' operator." +) +annotated_variable: Final = "This is a large string that has a type annotation attached to it. A type annotation should NOT stop a long string from being wrapped." +annotated_variable: Literal[ + "fakse_literal" +] = "This is a large string that has a type annotation attached to it. A type annotation should NOT stop a long string from being wrapped." + +backslashes = "This is a really long string with \"embedded\" double quotes and 'single' quotes that also handles checking for an even number of backslashes \\" +backslashes = "This is a really long string with \"embedded\" double quotes and 'single' quotes that also handles checking for an even number of backslashes \\\\" +backslashes = "This is a really 'long' string with \"embedded double quotes\" and 'single' quotes that also handles checking for an odd number of backslashes \\\", like this...\\\\\\" + +short_string = "Hi" " there." + +func_call(short_string=("Hi" " there.")) + +raw_strings = r"Don't" " get" r" merged" " unless they are all raw." + + +def foo(): + yield "This is a really long string that can't possibly be expected to fit all together on one line. In fact it may even take up three or more lines... like four or five... but probably just three." + + +x = f"This is a {{really}} long string that needs to be split without a doubt (i.e. most definitely). In short, this {string} that can't possibly be {{expected}} to fit all together on one line. In {fact} it may even take up three or more lines... like four or five... but probably just four." + +long_unmergable_string_with_pragma = ( + "This is a really long string that can't be merged because it has a likely pragma at the end" # type: ignore + " of it." +) + +long_unmergable_string_with_pragma = ( + "This is a really long string that can't be merged because it has a likely pragma at the end" # noqa + " of it." +) + +long_unmergable_string_with_pragma = ( + "This is a really long string that can't be merged because it has a likely pragma at the end" # pylint: disable=some-pylint-check + " of it." +) diff --git a/tests/data/miscellaneous/missing_final_newline.diff b/tests/data/miscellaneous/missing_final_newline.diff new file mode 100644 index 00000000000..6d991c74f8f --- /dev/null +++ b/tests/data/miscellaneous/missing_final_newline.diff @@ -0,0 +1,8 @@ +--- [Deterministic header] ++++ [Deterministic header] +@@ -1,3 +1,3 @@ + # A comment-only file, with no final EOL character + # This triggers https://bugs.python.org/issue2142 +-# This is the line without the EOL character +\ No newline at end of file ++# This is the line without the EOL character diff --git a/tests/data/miscellaneous/missing_final_newline.py b/tests/data/miscellaneous/missing_final_newline.py new file mode 100644 index 00000000000..687e1367552 --- /dev/null +++ b/tests/data/miscellaneous/missing_final_newline.py @@ -0,0 +1,3 @@ +# A comment-only file, with no final EOL character +# This triggers https://bugs.python.org/issue2142 +# This is the line without the EOL character \ No newline at end of file diff --git a/tests/data/miscellaneous/pattern_matching_invalid.py b/tests/data/miscellaneous/pattern_matching_invalid.py new file mode 100644 index 00000000000..22b5b94c0a4 --- /dev/null +++ b/tests/data/miscellaneous/pattern_matching_invalid.py @@ -0,0 +1,18 @@ +# First match, no errors +match something: + case bla(): + pass + +# Problem on line 10 +match invalid_case: + case valid_case: + pass + case a := b: + pass + case valid_case: + pass + +# No problems either +match something: + case bla(): + pass diff --git a/tests/data/miscellaneous/power_op_newline.py b/tests/data/miscellaneous/power_op_newline.py new file mode 100644 index 00000000000..85d434d63f6 --- /dev/null +++ b/tests/data/miscellaneous/power_op_newline.py @@ -0,0 +1,10 @@ +importA;()<<0**0# + +# output + +importA +( + () + << 0 + ** 0 +) # diff --git a/tests/data/miscellaneous/python2_detection.py b/tests/data/miscellaneous/python2_detection.py new file mode 100644 index 00000000000..8de2bb58adc --- /dev/null +++ b/tests/data/miscellaneous/python2_detection.py @@ -0,0 +1,90 @@ +# This uses a similar construction to the decorators.py test data file FYI. + +print "hello, world!" + +### + +exec "print('hello, world!')" + +### + +def set_position((x, y), value): + pass + +### + +try: + pass +except Exception, err: + pass + +### + +raise RuntimeError, "I feel like crashing today :p" + +### + +`wow_these_really_did_exist` + +### + +10L + +### + +10l + +### + +0123 + +# output + +print("hello python three!") + +### + +exec("I'm not sure if you can use exec like this but that's not important here!") + +### + +try: + pass +except make_exception(1, 2): + pass + +### + +try: + pass +except Exception as err: + pass + +### + +raise RuntimeError(make_msg(1, 2)) + +### + +raise RuntimeError("boom!",) + +### + +def set_position(x, y, value): + pass + +### + +10 + +### + +0 + +### + +000 + +### + +0o12 \ No newline at end of file diff --git a/tests/data/string_quotes.py b/tests/data/miscellaneous/string_quotes.py similarity index 81% rename from tests/data/string_quotes.py rename to tests/data/miscellaneous/string_quotes.py index 5a4bc5d0b11..3384241f4ad 100644 --- a/tests/data/string_quotes.py +++ b/tests/data/miscellaneous/string_quotes.py @@ -51,6 +51,11 @@ '\'{z}\' {y * " "}' '{y * x} \'{z}\'' +# We must bail out if changing the quotes would introduce backslashes in f-string +# expressions. xref: https://github.com/psf/black/issues/2348 +f"\"{b}\"{' ' * (long-len(b)+1)}: \"{sts}\",\n" +f"\"{a}\"{'hello' * b}\"{c}\"" + # output """""" @@ -100,3 +105,8 @@ f"{y * x} '{z}'" "'{z}' {y * \" \"}" "{y * x} '{z}'" + +# We must bail out if changing the quotes would introduce backslashes in f-string +# expressions. xref: https://github.com/psf/black/issues/2348 +f"\"{b}\"{' ' * (long-len(b)+1)}: \"{sts}\",\n" +f"\"{a}\"{'hello' * b}\"{c}\"" diff --git a/tests/data/miscellaneous/stub.pyi b/tests/data/miscellaneous/stub.pyi new file mode 100644 index 00000000000..af2cd2c2c02 --- /dev/null +++ b/tests/data/miscellaneous/stub.pyi @@ -0,0 +1,151 @@ +X: int + +def f(): ... + + +class D: + ... + + +class C: + ... + +class B: + this_lack_of_newline_should_be_kept: int + def b(self) -> None: ... + + but_this_newline_should_also_be_kept: int + +class A: + attr: int + attr2: str + + def f(self) -> int: + ... + + def g(self) -> str: ... + + + +def g(): + ... + +def h(): ... + +if sys.version_info >= (3, 8): + class E: + def f(self): ... + class F: + + def f(self): ... + class G: ... + class H: ... +else: + class I: ... + class J: ... + def f(): ... + + class K: + def f(self): ... + def f(): ... + +class Nested: + class dirty: ... + class little: ... + class secret: + def who_has_to_know(self): ... + def verse(self): ... + +class Conditional: + def f(self): ... + if sys.version_info >= (3, 8): + def g(self): ... + else: + def g(self): ... + def h(self): ... + def i(self): ... + if sys.version_info >= (3, 8): + def j(self): ... + def k(self): ... + if sys.version_info >= (3, 8): + class A: ... + class B: ... + class C: + def l(self): ... + def m(self): ... + + +# output +X: int + +def f(): ... + +class D: ... +class C: ... + +class B: + this_lack_of_newline_should_be_kept: int + def b(self) -> None: ... + + but_this_newline_should_also_be_kept: int + +class A: + attr: int + attr2: str + + def f(self) -> int: ... + def g(self) -> str: ... + +def g(): ... +def h(): ... + +if sys.version_info >= (3, 8): + class E: + def f(self): ... + + class F: + def f(self): ... + + class G: ... + class H: ... + +else: + class I: ... + class J: ... + + def f(): ... + + class K: + def f(self): ... + + def f(): ... + +class Nested: + class dirty: ... + class little: ... + + class secret: + def who_has_to_know(self): ... + + def verse(self): ... + +class Conditional: + def f(self): ... + if sys.version_info >= (3, 8): + def g(self): ... + else: + def g(self): ... + + def h(self): ... + def i(self): ... + if sys.version_info >= (3, 8): + def j(self): ... + + def k(self): ... + if sys.version_info >= (3, 8): + class A: ... + class B: ... + + class C: + def l(self): ... + def m(self): ... diff --git a/tests/data/nested_gitignore_tests/pyproject.toml b/tests/data/nested_gitignore_tests/pyproject.toml new file mode 100644 index 00000000000..9ba7ec26980 --- /dev/null +++ b/tests/data/nested_gitignore_tests/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=41.0", "setuptools-scm", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/tests/data/nested_gitignore_tests/root/.gitignore b/tests/data/nested_gitignore_tests/root/.gitignore new file mode 100644 index 00000000000..2987e7bb646 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/.gitignore @@ -0,0 +1 @@ +a.py diff --git a/tests/data/nested_gitignore_tests/root/a.py b/tests/data/nested_gitignore_tests/root/a.py new file mode 100644 index 00000000000..7135cfd187c --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/a.py @@ -0,0 +1 @@ +# should be excluded (root/.gitignore) diff --git a/tests/data/nested_gitignore_tests/root/b.py b/tests/data/nested_gitignore_tests/root/b.py new file mode 100644 index 00000000000..bdeeca3c602 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/b.py @@ -0,0 +1 @@ +# should be included diff --git a/tests/data/nested_gitignore_tests/root/c.py b/tests/data/nested_gitignore_tests/root/c.py new file mode 100644 index 00000000000..bdeeca3c602 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/c.py @@ -0,0 +1 @@ +# should be included diff --git a/tests/data/nested_gitignore_tests/root/child/.gitignore b/tests/data/nested_gitignore_tests/root/child/.gitignore new file mode 100644 index 00000000000..6df81dd798e --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/child/.gitignore @@ -0,0 +1 @@ +b.py diff --git a/tests/data/nested_gitignore_tests/root/child/a.py b/tests/data/nested_gitignore_tests/root/child/a.py new file mode 100644 index 00000000000..7135cfd187c --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/child/a.py @@ -0,0 +1 @@ +# should be excluded (root/.gitignore) diff --git a/tests/data/nested_gitignore_tests/root/child/b.py b/tests/data/nested_gitignore_tests/root/child/b.py new file mode 100644 index 00000000000..c91d47946e6 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/child/b.py @@ -0,0 +1 @@ +# should be excluded (child/.gitignore) diff --git a/tests/data/nested_gitignore_tests/root/child/c.py b/tests/data/nested_gitignore_tests/root/child/c.py new file mode 100644 index 00000000000..bdeeca3c602 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/child/c.py @@ -0,0 +1 @@ +# should be included diff --git a/tests/data/nested_gitignore_tests/x.py b/tests/data/nested_gitignore_tests/x.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/numeric_literals_py2.py b/tests/data/numeric_literals_py2.py deleted file mode 100644 index 8f85c43f265..00000000000 --- a/tests/data/numeric_literals_py2.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python2.7 - -x = 123456789L -x = 123456789l -x = 123456789 -x = 0xb1acc - -# output - - -#!/usr/bin/env python2.7 - -x = 123456789L -x = 123456789L -x = 123456789 -x = 0xB1ACC diff --git a/tests/data/cantfit.py b/tests/data/preview/cantfit.py similarity index 100% rename from tests/data/cantfit.py rename to tests/data/preview/cantfit.py diff --git a/tests/data/preview/comments7.py b/tests/data/preview/comments7.py new file mode 100644 index 00000000000..ec2dc501d8e --- /dev/null +++ b/tests/data/preview/comments7.py @@ -0,0 +1,285 @@ +from .config import ( + Any, + Bool, + ConfigType, + ConfigTypeAttributes, + Int, + Path, + # String, + # resolve_to_config_type, + # DEFAULT_TYPE_ATTRIBUTES, +) + + +from .config import ( + Any, + Bool, + ConfigType, + ConfigTypeAttributes, + Int, + no_comma_here_yet + # and some comments, + # resolve_to_config_type, + # DEFAULT_TYPE_ATTRIBUTES, +) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent # NOT DRY +) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent as component # DRY +) + + +result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +result = ( + 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +) + +result = ( + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # aaa +) + +result = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # aaa + + +def func(): + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0789, + a[-1], # type: ignore + ) + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0789, + a[-1] # type: ignore + ) + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + a[-1] # type: ignore + ) + + # The type: ignore exception only applies to line length, not + # other types of formatting. + c = call( + "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", # type: ignore + "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", "aaaaaaaa", "aaaaaaaa" + ) + + +class C: + @pytest.mark.parametrize( + ("post_data", "message"), + [ + # metadata_version errors. + ( + {}, + "None is an invalid value for Metadata-Version. Error: This field is" + " required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ( + {"metadata_version": "-1"}, + "'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata" + " Version see" + " https://packaging.python.org/specifications/core-metadata" + ), + # name errors. + ( + {"metadata_version": "1.2"}, + "'' is an invalid value for Name. Error: This field is required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ( + {"metadata_version": "1.2", "name": "foo-"}, + "'foo-' is an invalid value for Name. Error: Must start and end with a" + " letter or numeral and contain only ascii numeric and '.', '_' and" + " '-'. see https://packaging.python.org/specifications/core-metadata" + ), + # version errors. + ( + {"metadata_version": "1.2", "name": "example"}, + "'' is an invalid value for Version. Error: This field is required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ( + {"metadata_version": "1.2", "name": "example", "version": "dog"}, + "'dog' is an invalid value for Version. Error: Must start and end with" + " a letter or numeral and contain only ascii numeric and '.', '_' and" + " '-'. see https://packaging.python.org/specifications/core-metadata" + ) + ] + ) + def test_fails_invalid_post_data( + self, pyramid_config, db_request, post_data, message + ): + ... + +square = Square(4) # type: Optional[Square] + +# output + +from .config import ( + Any, + Bool, + ConfigType, + ConfigTypeAttributes, + Int, + Path, + # String, + # resolve_to_config_type, + # DEFAULT_TYPE_ATTRIBUTES, +) + + +from .config import ( + Any, + Bool, + ConfigType, + ConfigTypeAttributes, + Int, + no_comma_here_yet, + # and some comments, + # resolve_to_config_type, + # DEFAULT_TYPE_ATTRIBUTES, +) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent, # NOT DRY +) +from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import ( + MyLovelyCompanyTeamProjectComponent as component, # DRY +) + + +result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +result = ( # aaa + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +) + +result = ( # aaa + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +) + + +def func(): + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0789, + a[-1], # type: ignore + ) + c = call(0.0123, 0.0456, 0.0789, 0.0123, 0.0789, a[-1]) # type: ignore + c = call( + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + 0.0123, + 0.0456, + 0.0789, + a[-1], # type: ignore + ) + + # The type: ignore exception only applies to line length, not + # other types of formatting. + c = call( + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", # type: ignore + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + "aaaaaaaa", + ) + + +class C: + @pytest.mark.parametrize( + ("post_data", "message"), + [ + # metadata_version errors. + ( + {}, + ( + "None is an invalid value for Metadata-Version. Error: This field" + " is required. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ), + ( + {"metadata_version": "-1"}, + ( + "'-1' is an invalid value for Metadata-Version. Error: Unknown" + " Metadata Version see" + " https://packaging.python.org/specifications/core-metadata" + ), + ), + # name errors. + ( + {"metadata_version": "1.2"}, + ( + "'' is an invalid value for Name. Error: This field is required." + " see https://packaging.python.org/specifications/core-metadata" + ), + ), + ( + {"metadata_version": "1.2", "name": "foo-"}, + ( + "'foo-' is an invalid value for Name. Error: Must start and end" + " with a letter or numeral and contain only ascii numeric and '.'," + " '_' and '-'. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ), + # version errors. + ( + {"metadata_version": "1.2", "name": "example"}, + ( + "'' is an invalid value for Version. Error: This field is required." + " see https://packaging.python.org/specifications/core-metadata" + ), + ), + ( + {"metadata_version": "1.2", "name": "example", "version": "dog"}, + ( + "'dog' is an invalid value for Version. Error: Must start and end" + " with a letter or numeral and contain only ascii numeric and '.'," + " '_' and '-'. see" + " https://packaging.python.org/specifications/core-metadata" + ), + ), + ], + ) + def test_fails_invalid_post_data( + self, pyramid_config, db_request, post_data, message + ): + ... + + +square = Square(4) # type: Optional[Square] diff --git a/tests/data/preview/comments8.py b/tests/data/preview/comments8.py new file mode 100644 index 00000000000..a2030c2a092 --- /dev/null +++ b/tests/data/preview/comments8.py @@ -0,0 +1,15 @@ +# The percent-percent comments are Spyder IDE cells. +# Both `#%%`` and `# %%` are accepted, so `black` standardises +# to the latter. + +#%% +# %% + +# output + +# The percent-percent comments are Spyder IDE cells. +# Both `#%%`` and `# %%` are accepted, so `black` standardises +# to the latter. + +# %% +# %% diff --git a/tests/data/preview/docstring_preview.py b/tests/data/preview/docstring_preview.py new file mode 100644 index 00000000000..292352c82f3 --- /dev/null +++ b/tests/data/preview/docstring_preview.py @@ -0,0 +1,105 @@ +def docstring_almost_at_line_limit(): + """long docstring................................................................. + """ + + +def docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + """ + + +def mulitline_docstring_almost_at_line_limit(): + """long docstring................................................................. + + .................................................................................. + """ + + +def mulitline_docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def docstring_at_line_limit_with_prefix(): + f"""long docstring...............................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def multiline_docstring_at_line_limit_with_prefix(): + f"""first line---------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def single_quote_docstring_over_line_limit(): + "We do not want to put the closing quote on a new line as that is invalid (see GH-3141)." + + +def single_quote_docstring_over_line_limit2(): + 'We do not want to put the closing quote on a new line as that is invalid (see GH-3141).' + + +# output + + +def docstring_almost_at_line_limit(): + """long docstring................................................................. + """ + + +def docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + """ + + +def mulitline_docstring_almost_at_line_limit(): + """long docstring................................................................. + + .................................................................................. + """ + + +def mulitline_docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def docstring_at_line_limit_with_prefix(): + f"""long docstring...............................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def multiline_docstring_at_line_limit_with_prefix(): + f"""first line---------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def single_quote_docstring_over_line_limit(): + "We do not want to put the closing quote on a new line as that is invalid (see GH-3141)." + + +def single_quote_docstring_over_line_limit2(): + "We do not want to put the closing quote on a new line as that is invalid (see GH-3141)." diff --git a/tests/data/long_strings.py b/tests/data/preview/long_strings.py similarity index 73% rename from tests/data/long_strings.py rename to tests/data/preview/long_strings.py index e1ed90f22de..6db3cfed9a9 100644 --- a/tests/data/long_strings.py +++ b/tests/data/preview/long_strings.py @@ -18,6 +18,26 @@ D4 = {"A long and ridiculous {}".format(string_key): "This is a really really really long string that has to go i,side of a dictionary. It is soooo bad.", some_func("calling", "some", "stuff"): "This is a really really really long string that has to go inside of a dictionary. It is {soooo} bad (#{x}).".format(sooo="soooo", x=2), "A %s %s" % ("formatted", "string"): "This is a really really really long string that has to go inside of a dictionary. It is %s bad (#%d)." % ("soooo", 2)} +D5 = { # Test for https://github.com/psf/black/issues/3261 + ("This is a really long string that can't be expected to fit in one line and is used as a nested dict's key"): {"inner": "value"}, +} + +D6 = { # Test for https://github.com/psf/black/issues/3261 + ("This is a really long string that can't be expected to fit in one line and is used as a dict's key"): ["value1", "value2"], +} + +L1 = ["The is a short string", "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a list literal, so it's expected to be wrapped in parens when spliting to avoid implicit str concatenation.", short_call("arg", {"key": "value"}), "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a list literal.", ("parens should be stripped for short string in list")] + +L2 = ["This is a really long string that can't be expected to fit in one line and is the only child of a list literal."] + +S1 = {"The is a short string", "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a set literal, so it's expected to be wrapped in parens when spliting to avoid implicit str concatenation.", short_call("arg", {"key": "value"}), "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a set literal.", ("parens should be stripped for short string in set")} + +S2 = {"This is a really long string that can't be expected to fit in one line and is the only child of a set literal."} + +T1 = ("The is a short string", "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a tuple literal, so it's expected to be wrapped in parens when spliting to avoid implicit str concatenation.", short_call("arg", {"key": "value"}), "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a tuple literal.", ("parens should be stripped for short string in list")) + +T2 = ("This is a really long string that can't be expected to fit in one line and is the only child of a tuple literal.",) + func_with_keywords(my_arg, my_kwarg="Long keyword strings also need to be wrapped, but they will probably need to be handled a little bit differently.") bad_split1 = ( @@ -72,6 +92,25 @@ zzz, ) +inline_comments_func1( + "if there are inline " + "comments in the middle " + # Here is the standard alone comment. + "of the implicitly concatenated " + "string, we should handle " + "them correctly", + xxx, +) + +inline_comments_func2( + "what if the string is very very very very very very very very very very long and this part does " + "not fit into a single line? " + # Here is the standard alone comment. + "then the string should still be properly handled by merging and splitting " + "it into parts that fit in line length.", + xxx, +) + raw_string = r"This is a long raw string. When re-formatting this string, black needs to make sure it prepends the 'r' onto the new string." fmt_string1 = "We also need to be sure to preserve any and all {} which may or may not be attached to the string in question.".format("method calls") @@ -90,7 +129,7 @@ comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top. -arg_comment_string = print("Long lines with inline comments which are apart of (and not the only member of) an argument list should have their comments appended to the reformatted string's enclosing left parentheses.", # This comment stays on the bottom. +arg_comment_string = print("Long lines with inline comments which are apart of (and not the only member of) an argument list should have their comments appended to the reformatted string's enclosing left parentheses.", # This comment gets thrown to the top. "Arg #2", "Arg #3", "Arg #4", "Arg #5") pragma_comment_string1 = "Lines which end with an inline pragma comment of the form `# : <...>` should be left alone." # noqa: E501 @@ -207,6 +246,38 @@ def foo(): " of it." ) +string_with_nameescape = ( + "........................................................................ \N{LAO KO LA}" +) + +string_with_nameescape = ( + "........................................................................... \N{LAO KO LA}" +) + +string_with_nameescape = ( + "............................................................................ \N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "...................................................................... \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "......................................................................... \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + ".......................................................................... \\\N{LAO KO LA}" +) + +string_with_escaped_nameescape = ( + "........................................................................ \\N{LAO KO LA}" +) + +string_with_escaped_nameescape = ( + "........................................................................... \\N{LAO KO LA}" +) + # output @@ -294,6 +365,84 @@ def foo(): % ("soooo", 2), } +D5 = { # Test for https://github.com/psf/black/issues/3261 + "This is a really long string that can't be expected to fit in one line and is used as a nested dict's key": { + "inner": "value" + }, +} + +D6 = { # Test for https://github.com/psf/black/issues/3261 + "This is a really long string that can't be expected to fit in one line and is used as a dict's key": [ + "value1", + "value2", + ], +} + +L1 = [ + "The is a short string", + ( + "This is a really long string that can't possibly be expected to fit all" + " together on one line. Also it is inside a list literal, so it's expected to" + " be wrapped in parens when spliting to avoid implicit str concatenation." + ), + short_call("arg", {"key": "value"}), + ( + "This is another really really (not really) long string that also can't be" + " expected to fit on one line and is, like the other string, inside a list" + " literal." + ), + "parens should be stripped for short string in list", +] + +L2 = [ + "This is a really long string that can't be expected to fit in one line and is the" + " only child of a list literal." +] + +S1 = { + "The is a short string", + ( + "This is a really long string that can't possibly be expected to fit all" + " together on one line. Also it is inside a set literal, so it's expected to be" + " wrapped in parens when spliting to avoid implicit str concatenation." + ), + short_call("arg", {"key": "value"}), + ( + "This is another really really (not really) long string that also can't be" + " expected to fit on one line and is, like the other string, inside a set" + " literal." + ), + "parens should be stripped for short string in set", +} + +S2 = { + "This is a really long string that can't be expected to fit in one line and is the" + " only child of a set literal." +} + +T1 = ( + "The is a short string", + ( + "This is a really long string that can't possibly be expected to fit all" + " together on one line. Also it is inside a tuple literal, so it's expected to" + " be wrapped in parens when spliting to avoid implicit str concatenation." + ), + short_call("arg", {"key": "value"}), + ( + "This is another really really (not really) long string that also can't be" + " expected to fit on one line and is, like the other string, inside a tuple" + " literal." + ), + "parens should be stripped for short string in list", +) + +T2 = ( + ( + "This is a really long string that can't be expected to fit in one line and is" + " the only child of a tuple literal." + ), +) + func_with_keywords( my_arg, my_kwarg=( @@ -363,6 +512,22 @@ def foo(): zzz, ) +inline_comments_func1( + "if there are inline comments in the middle " + # Here is the standard alone comment. + "of the implicitly concatenated string, we should handle them correctly", + xxx, +) + +inline_comments_func2( + "what if the string is very very very very very very very very very very long and" + " this part does not fit into a single line? " + # Here is the standard alone comment. + "then the string should still be properly handled by merging and splitting " + "it into parts that fit in line length.", + xxx, +) + raw_string = ( r"This is a long raw string. When re-formatting this string, black needs to make" r" sure it prepends the 'r' onto the new string." @@ -380,8 +545,7 @@ def foo(): old_fmt_string1 = ( "While we are on the topic of %s, we should also note that old-style formatting" - " must also be preserved, since some %s still uses it." - % ("formatting", "code") + " must also be preserved, since some %s still uses it." % ("formatting", "code") ) old_fmt_string2 = "This is a %s %s %s %s" % ( @@ -421,7 +585,7 @@ def foo(): arg_comment_string = print( "Long lines with inline comments which are apart of (and not the only member of) an" " argument list should have their comments appended to the reformatted string's" - " enclosing left parentheses.", # This comment stays on the bottom. + " enclosing left parentheses.", # This comment gets thrown to the top. "Arg #2", "Arg #3", "Arg #4", @@ -448,8 +612,7 @@ def foo(): assert some_type_of_boolean_expression, ( "Followed by a really really really long string that is used to provide context to" - " the AssertionError exception, which uses dynamic string %s." - % "formatting" + " the AssertionError exception, which uses dynamic string %s." % "formatting" ) assert some_type_of_boolean_expression, ( @@ -589,3 +752,43 @@ def foo(): "This is a really long string that can't be merged because it has a likely pragma at the end" # pylint: disable=some-pylint-check " of it." ) + +string_with_nameescape = ( + "........................................................................" + " \N{LAO KO LA}" +) + +string_with_nameescape = ( + "..........................................................................." + " \N{LAO KO LA}" +) + +string_with_nameescape = ( + "............................................................................" + " \N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "......................................................................" + " \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "........................................................................." + " \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + ".........................................................................." + " \\\N{LAO KO LA}" +) + +string_with_escaped_nameescape = ( + "........................................................................ \\N{LAO" + " KO LA}" +) + +string_with_escaped_nameescape = ( + "..........................................................................." + " \\N{LAO KO LA}" +) diff --git a/tests/data/long_strings__edge_case.py b/tests/data/preview/long_strings__edge_case.py similarity index 80% rename from tests/data/long_strings__edge_case.py rename to tests/data/preview/long_strings__edge_case.py index 6919db5a80b..2bc0b6ed328 100644 --- a/tests/data/long_strings__edge_case.py +++ b/tests/data/preview/long_strings__edge_case.py @@ -29,6 +29,12 @@ ) return f'{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaa' return f'{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaaa' +assert str(result) == "This long string should be split at some point right close to or around hereeeeeee" +assert str(result) < "This long string should be split at some point right close to or around hereeeeee" +assert "A format string: %s" % "This long string should be split at some point right close to or around hereeeeeee" != result +msg += "This long string should be wrapped in parens at some point right around hereeeee" +msg += "This long string should be split at some point right close to or around hereeeeeeee" +msg += "This long string should not be split at any point ever since it is just righttt" # output @@ -108,3 +114,27 @@ f"{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaa" ) return f"{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaaa" +assert ( + str(result) + == "This long string should be split at some point right close to or around" + " hereeeeeee" +) +assert ( + str(result) + < "This long string should be split at some point right close to or around" + " hereeeeee" +) +assert ( + "A format string: %s" + % "This long string should be split at some point right close to or around" + " hereeeeeee" + != result +) +msg += ( + "This long string should be wrapped in parens at some point right around hereeeee" +) +msg += ( + "This long string should be split at some point right close to or around" + " hereeeeeeee" +) +msg += "This long string should not be split at any point ever since it is just righttt" diff --git a/tests/data/long_strings__regression.py b/tests/data/preview/long_strings__regression.py similarity index 56% rename from tests/data/long_strings__regression.py rename to tests/data/preview/long_strings__regression.py index 044bb4a5deb..634db46a5e0 100644 --- a/tests/data/long_strings__regression.py +++ b/tests/data/preview/long_strings__regression.py @@ -310,6 +310,221 @@ def who(self): passenger_association=passenger_association, ) +xxxxxxx_xxxxxx_xxxxxxx = xxx( + [ + xxxxxxxxxxxx( + xxxxxx_xxxxxxx=( + '((x.aaaaaaaaa = "xxxxxx.xxxxxxxxxxxxxxxxxxxxx") || (x.xxxxxxxxx = "xxxxxxxxxxxx")) && ' + # xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx. + "(x.bbbbbbbbbbbb.xxx != " + '"xxx:xxx:xxx::cccccccccccc:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && ' + ) + ) + ] +) + +if __name__ == "__main__": + for i in range(4, 8): + cmd = ( + r"for pid in $(ps aux | grep paster | grep -v grep | grep '\-%d' | awk '{print $2}'); do kill $pid; done" + % (i) + ) + +def A(): + def B(): + def C(): + def D(): + def E(): + def F(): + def G(): + assert ( + c_float(val[0][0] / val[0][1]).value + == c_float(value[0][0] / value[0][1]).value + ), "%s didn't roundtrip" % tag + +class xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx): + def xxxxxxx_xxxxxx(xxxx): + assert xxxxxxx_xxxx in [ + x.xxxxx.xxxxxx.xxxxx.xxxxxx, + x.xxxxx.xxxxxx.xxxxx.xxxx, + ], ("xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx) + +value.__dict__[ + key +] = "test" # set some Thrift field to non-None in the struct aa bb cc dd ee + +RE_ONE_BACKSLASH = { + "asdf_hjkl_jkl": re.compile( + r"(?>\n" +) + +assert str(suffix_arr) == ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) != ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) <= ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) >= ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) < ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) > ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +assert str(suffix_arr) not in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + "3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + "5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + "9. You now have a key to add to `{prefix}set api youtube api_key`" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + "9. You now have a key to add to `{prefix}set api youtube api_key`" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{prefix}set api youtube api_key`" +) + +# It shouldn't matter if the string prefixes are capitalized. +temp_msg = ( + F"{F'{humanize_number(pos)}.': <{pound_len+2}} " + F"{balance: <{bal_len + 5}} " + F"<<{author.display_name}>>\n" +) + +fstring = ( + F"We have to remember to escape {braces}." + " Like {these}." + F" But not {this}." +) + +welcome_to_programming = R"hello," R" world!" + +fstring = F"f-strings definitely make things more {difficult} than they need to be for {{black}}. But boy they sure are handy. The problem is that some lines will need to have the 'f' whereas others do not. This {line}, for example, needs one." + +x = F"This is a long string which contains an f-expr that should not split {{{[i for i in range(5)]}}}." + +x = ( + "\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}" +) + +xxxxxx_xxx_xxxx_xx_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxx_xxxx_xxxxx = xxxx.xxxxxx.xxxxxxxxx.xxxxxxxxxxxxxxxxxxxx( + xx_xxxxxx={ + "x3_xxxxxxxx": "xxx3_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxxxxxx_xxxxxx_xxxxxxx", + }, +) + + # output @@ -384,7 +599,7 @@ def foo(): def foo(xxxx): - for (xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx) in xxxx: + for xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx in xxxx: for xxx in xxx_xxxx: assert ("x" in xxx) or (xxx in xxx_xxx_xxxxx), ( "{0} xxxxxxx xx {1}, xxx {1} xx xxx xx xxxx xx xxx xxxx: xxx xxxx {2}" @@ -435,14 +650,12 @@ def foo(): func_call_where_string_arg_has_old_fmt_and_bad_parens( "A long string with {}. This string is so long that it is ridiculous. It can't fit" - " on one line at alllll." - % "formatting", + " on one line at alllll." % "formatting", ) func_call_where_string_arg_has_old_fmt_and_bad_parens( "A long string with {}. This {} is so long that it is ridiculous. It can't fit on" - " one line at alllll." - % ("formatting", "string"), + " one line at alllll." % ("formatting", "string"), ) @@ -550,20 +763,28 @@ def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx): some_dictionary = { "xxxxx006": [ - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" - " xxxxx000 xxxxxxxxxx\n", - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" - " xxxxx010 xxxxxxxxxx\n", + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" + " xxxxx000 xxxxxxxxxx\n" + ), + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" + " xxxxx010 xxxxxxxxxx\n" + ), ], "xxxxx016": [ - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" - " xxxxx000 xxxxxxxxxx\n", - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" - " xxxxx010 xxxxxxxxxx\n", + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" + " xxxxx000 xxxxxxxxxx\n" + ), + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" + " xxxxx010 xxxxxxxxxx\n" + ), ], } @@ -702,3 +923,252 @@ def who(self): passenger_association=passenger_association, ) ) + + +xxxxxxx_xxxxxx_xxxxxxx = xxx( + [ + xxxxxxxxxxxx( + xxxxxx_xxxxxxx=( + '((x.aaaaaaaaa = "xxxxxx.xxxxxxxxxxxxxxxxxxxxx") || (x.xxxxxxxxx =' + ' "xxxxxxxxxxxx")) && ' + # xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx. + "(x.bbbbbbbbbbbb.xxx != " + '"xxx:xxx:xxx::cccccccccccc:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && ' + ) + ) + ] +) + +if __name__ == "__main__": + for i in range(4, 8): + cmd = ( + r"for pid in $(ps aux | grep paster | grep -v grep | grep '\-%d' | awk" + r" '{print $2}'); do kill $pid; done" % (i) + ) + + +def A(): + def B(): + def C(): + def D(): + def E(): + def F(): + def G(): + assert ( + c_float(val[0][0] / val[0][1]).value + == c_float(value[0][0] / value[0][1]).value + ), "%s didn't roundtrip" % tag + + +class xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx): + def xxxxxxx_xxxxxx(xxxx): + assert xxxxxxx_xxxx in [ + x.xxxxx.xxxxxx.xxxxx.xxxxxx, + x.xxxxx.xxxxxx.xxxxx.xxxx, + ], ( + "xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx + ) + + +value.__dict__[ + key +] = "test" # set some Thrift field to non-None in the struct aa bb cc dd ee + +RE_ONE_BACKSLASH = { + "asdf_hjkl_jkl": re.compile( + r"(?>\n" +) + +assert ( + str(suffix_arr) + == "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + != "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + <= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + >= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + < "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + > "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$'," + " 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$'," + " 'ykangaroo$']" +) +assert ( + str(suffix_arr) + not in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$'," + " 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$'," + " 'rykangaroo$', 'ykangaroo$']" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + f"(https://console.developers.google.com/)" + f"2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + f"4. In the list of APIs choose or search for YouTube Data API v3 and " + f"click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + f"6. Click on Create Credential at the top." + f'7. At the top click the link for "API key".' + f"8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{{prefix}}set api youtube api_key`" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + f"(https://console.developers.google.com/)" + f"2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + f"4. In the list of APIs choose or search for YouTube Data API v3 and " + f"click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + f"6. Click on Create Credential at the top." + f'7. At the top click the link for "API key".' + f"8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{{prefix}}set api youtube api_key`" +) +message = ( + "1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + "3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + "5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{prefix}set api youtube api_key`" +) + +# It shouldn't matter if the string prefixes are capitalized. +temp_msg = ( + f"{F'{humanize_number(pos)}.': <{pound_len+2}} " + f"{balance: <{bal_len + 5}} " + f"<<{author.display_name}>>\n" +) + +fstring = f"We have to remember to escape {braces}. Like {{these}}. But not {this}." + +welcome_to_programming = R"hello," R" world!" + +fstring = ( + f"f-strings definitely make things more {difficult} than they need to be for" + " {black}. But boy they sure are handy. The problem is that some lines will need" + f" to have the 'f' whereas others do not. This {line}, for example, needs one." +) + +x = ( + "This is a long string which contains an f-expr that should not split" + f" {{{[i for i in range(5)]}}}." +) + +x = ( + "\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}" +) + +xxxxxx_xxx_xxxx_xx_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxx_xxxx_xxxxx = xxxx.xxxxxx.xxxxxxxxx.xxxxxxxxxxxxxxxxxxxx( + xx_xxxxxx={ + "x3_xxxxxxxx": ( + "xxx3_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxxxxxx_xxxxxx_xxxxxxx" + ), + }, +) diff --git a/tests/data/preview/one_element_subscript.py b/tests/data/preview/one_element_subscript.py new file mode 100644 index 00000000000..39205ba9f7a --- /dev/null +++ b/tests/data/preview/one_element_subscript.py @@ -0,0 +1,36 @@ +# We should not treat the trailing comma +# in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# The magic comma still applies to multi-element subscripts. +c: tuple[int, int,] +d = tuple[int, int,] + +# Magic commas still work as expected for non-subscripts. +small_list = [1,] +list_of_types = [tuple[int,],] + +# output +# We should not treat the trailing comma +# in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# The magic comma still applies to multi-element subscripts. +c: tuple[ + int, + int, +] +d = tuple[ + int, + int, +] + +# Magic commas still work as expected for non-subscripts. +small_list = [ + 1, +] +list_of_types = [ + tuple[int,], +] diff --git a/tests/data/preview/percent_precedence.py b/tests/data/preview/percent_precedence.py new file mode 100644 index 00000000000..b895443fb46 --- /dev/null +++ b/tests/data/preview/percent_precedence.py @@ -0,0 +1,41 @@ +("" % a) ** 2 +("" % a)[0] +("" % a)() +("" % a).b + +2 * ("" % a) +2 @ ("" % a) +2 / ("" % a) +2 // ("" % a) +2 % ("" % a) ++("" % a) +b + ("" % a) +-("" % a) +b - ("" % a) +b + -("" % a) +~("" % a) +2 ** ("" % a) +await ("" % a) +b[("" % a)] +b(("" % a)) +# output +("" % a) ** 2 +("" % a)[0] +("" % a)() +("" % a).b + +2 * ("" % a) +2 @ ("" % a) +2 / ("" % a) +2 // ("" % a) +2 % ("" % a) ++("" % a) +b + "" % a +-("" % a) +b - "" % a +b + -("" % a) +~("" % a) +2 ** ("" % a) +await ("" % a) +b[("" % a)] +b(("" % a)) diff --git a/tests/data/preview/remove_await_parens.py b/tests/data/preview/remove_await_parens.py new file mode 100644 index 00000000000..eb7dad340c3 --- /dev/null +++ b/tests/data/preview/remove_await_parens.py @@ -0,0 +1,168 @@ +import asyncio + +# Control example +async def main(): + await asyncio.sleep(1) + +# Remove brackets for short coroutine/task +async def main(): + await (asyncio.sleep(1)) + +async def main(): + await ( + asyncio.sleep(1) + ) + +async def main(): + await (asyncio.sleep(1) + ) + +# Check comments +async def main(): + await ( # Hello + asyncio.sleep(1) + ) + +async def main(): + await ( + asyncio.sleep(1) # Hello + ) + +async def main(): + await ( + asyncio.sleep(1) + ) # Hello + +# Long lines +async def main(): + await asyncio.gather(asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1)) + +# Same as above but with magic trailing comma in function +async def main(): + await asyncio.gather(asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1),) + +# Cr@zY Br@ck3Tz +async def main(): + await ( + ((((((((((((( + ((( ((( + ((( ((( + ((( ((( + ((( ((( + ((black(1))) + ))) ))) + ))) ))) + ))) ))) + ))) ))) + ))))))))))))) + ) + +# Keep brackets around non power operations and nested awaits +async def main(): + await (set_of_tasks | other_set) + +async def main(): + await (await asyncio.sleep(1)) + +# It's awaits all the way down... +async def main(): + await (await x) + +async def main(): + await (yield x) + +async def main(): + await (await (asyncio.sleep(1))) + +async def main(): + await (await (await (await (await (asyncio.sleep(1)))))) + +# output +import asyncio + +# Control example +async def main(): + await asyncio.sleep(1) + + +# Remove brackets for short coroutine/task +async def main(): + await asyncio.sleep(1) + + +async def main(): + await asyncio.sleep(1) + + +async def main(): + await asyncio.sleep(1) + + +# Check comments +async def main(): + await asyncio.sleep(1) # Hello + + +async def main(): + await asyncio.sleep(1) # Hello + + +async def main(): + await asyncio.sleep(1) # Hello + + +# Long lines +async def main(): + await asyncio.gather( + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + ) + + +# Same as above but with magic trailing comma in function +async def main(): + await asyncio.gather( + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + ) + + +# Cr@zY Br@ck3Tz +async def main(): + await black(1) + + +# Keep brackets around non power operations and nested awaits +async def main(): + await (set_of_tasks | other_set) + + +async def main(): + await (await asyncio.sleep(1)) + + +# It's awaits all the way down... +async def main(): + await (await x) + + +async def main(): + await (yield x) + + +async def main(): + await (await asyncio.sleep(1)) + + +async def main(): + await (await (await (await (await asyncio.sleep(1))))) diff --git a/tests/data/preview/remove_except_parens.py b/tests/data/preview/remove_except_parens.py new file mode 100644 index 00000000000..322c5b7a51b --- /dev/null +++ b/tests/data/preview/remove_except_parens.py @@ -0,0 +1,79 @@ +# These brackets are redundant, therefore remove. +try: + a.something +except (AttributeError) as err: + raise err + +# This is tuple of exceptions. +# Although this could be replaced with just the exception, +# we do not remove brackets to preserve AST. +try: + a.something +except (AttributeError,) as err: + raise err + +# This is a tuple of exceptions. Do not remove brackets. +try: + a.something +except (AttributeError, ValueError) as err: + raise err + +# Test long variants. +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error) as err: + raise err + +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error,) as err: + raise err + +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error) as err: + raise err + +# output +# These brackets are redundant, therefore remove. +try: + a.something +except AttributeError as err: + raise err + +# This is tuple of exceptions. +# Although this could be replaced with just the exception, +# we do not remove brackets to preserve AST. +try: + a.something +except (AttributeError,) as err: + raise err + +# This is a tuple of exceptions. Do not remove brackets. +try: + a.something +except (AttributeError, ValueError) as err: + raise err + +# Test long variants. +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error +) as err: + raise err + +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, +) as err: + raise err + +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, +) as err: + raise err diff --git a/tests/data/preview/remove_for_brackets.py b/tests/data/preview/remove_for_brackets.py new file mode 100644 index 00000000000..cd5340462da --- /dev/null +++ b/tests/data/preview/remove_for_brackets.py @@ -0,0 +1,48 @@ +# Only remove tuple brackets after `for` +for (k, v) in d.items(): + print(k, v) + +# Don't touch tuple brackets after `in` +for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + +# Brackets remain for long for loop lines +for (why_would_anyone_choose_to_name_a_loop_variable_with_a_name_this_long, i_dont_know_but_we_should_still_check_the_behaviour_if_they_do) in d.items(): + print(k, v) + +for (k, v) in dfkasdjfldsjflkdsjflkdsjfdslkfjldsjfgkjdshgkljjdsfldgkhsdofudsfudsofajdslkfjdslkfjldisfjdffjsdlkfjdlkjjkdflskadjldkfjsalkfjdasj.items(): + print(k, v) + +# Test deeply nested brackets +for (((((k, v))))) in d.items(): + print(k, v) + +# output +# Only remove tuple brackets after `for` +for k, v in d.items(): + print(k, v) + +# Don't touch tuple brackets after `in` +for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + +# Brackets remain for long for loop lines +for ( + why_would_anyone_choose_to_name_a_loop_variable_with_a_name_this_long, + i_dont_know_but_we_should_still_check_the_behaviour_if_they_do, +) in d.items(): + print(k, v) + +for ( + k, + v, +) in ( + dfkasdjfldsjflkdsjflkdsjfdslkfjldsjfgkjdshgkljjdsfldgkhsdofudsfudsofajdslkfjdslkfjldisfjdffjsdlkfjdlkjjkdflskadjldkfjsalkfjdasj.items() +): + print(k, v) + +# Test deeply nested brackets +for k, v in d.items(): + print(k, v) diff --git a/tests/data/preview/remove_newline_after_code_block_open.py b/tests/data/preview/remove_newline_after_code_block_open.py new file mode 100644 index 00000000000..ef2e5c2f6f5 --- /dev/null +++ b/tests/data/preview/remove_newline_after_code_block_open.py @@ -0,0 +1,189 @@ +import random + + +def foo1(): + + print("The newline above me should be deleted!") + + +def foo2(): + + + + print("All the newlines above me should be deleted!") + + +def foo3(): + + print("No newline above me!") + + print("There is a newline above me, and that's OK!") + + +def foo4(): + + # There is a comment here + + print("The newline above me should not be deleted!") + + +class Foo: + def bar(self): + + print("The newline above me should be deleted!") + + +for i in range(5): + + print(f"{i}) The line above me should be removed!") + + +for i in range(5): + + + + print(f"{i}) The lines above me should be removed!") + + +for i in range(5): + + for j in range(7): + + print(f"{i}) The lines above me should be removed!") + + +if random.randint(0, 3) == 0: + + print("The new line above me is about to be removed!") + + +if random.randint(0, 3) == 0: + + + + + print("The new lines above me is about to be removed!") + + +if random.randint(0, 3) == 0: + if random.uniform(0, 1) > 0.5: + print("Two lines above me are about to be removed!") + + +while True: + + print("The newline above me should be deleted!") + + +while True: + + + + print("The newlines above me should be deleted!") + + +while True: + + while False: + + print("The newlines above me should be deleted!") + + +with open("/path/to/file.txt", mode="w") as file: + + file.write("The new line above me is about to be removed!") + + +with open("/path/to/file.txt", mode="w") as file: + + + + file.write("The new lines above me is about to be removed!") + + +with open("/path/to/file.txt", mode="r") as read_file: + + with open("/path/to/output_file.txt", mode="w") as write_file: + + write_file.writelines(read_file.readlines()) + +# output + +import random + + +def foo1(): + print("The newline above me should be deleted!") + + +def foo2(): + print("All the newlines above me should be deleted!") + + +def foo3(): + print("No newline above me!") + + print("There is a newline above me, and that's OK!") + + +def foo4(): + # There is a comment here + + print("The newline above me should not be deleted!") + + +class Foo: + def bar(self): + print("The newline above me should be deleted!") + + +for i in range(5): + print(f"{i}) The line above me should be removed!") + + +for i in range(5): + print(f"{i}) The lines above me should be removed!") + + +for i in range(5): + for j in range(7): + print(f"{i}) The lines above me should be removed!") + + +if random.randint(0, 3) == 0: + print("The new line above me is about to be removed!") + + +if random.randint(0, 3) == 0: + print("The new lines above me is about to be removed!") + + +if random.randint(0, 3) == 0: + if random.uniform(0, 1) > 0.5: + print("Two lines above me are about to be removed!") + + +while True: + print("The newline above me should be deleted!") + + +while True: + print("The newlines above me should be deleted!") + + +while True: + while False: + print("The newlines above me should be deleted!") + + +with open("/path/to/file.txt", mode="w") as file: + file.write("The new line above me is about to be removed!") + + +with open("/path/to/file.txt", mode="w") as file: + file.write("The new lines above me is about to be removed!") + + +with open("/path/to/file.txt", mode="r") as read_file: + with open("/path/to/output_file.txt", mode="w") as write_file: + write_file.writelines(read_file.readlines()) diff --git a/tests/data/preview/return_annotation_brackets.py b/tests/data/preview/return_annotation_brackets.py new file mode 100644 index 00000000000..27760bd51d7 --- /dev/null +++ b/tests/data/preview/return_annotation_brackets.py @@ -0,0 +1,222 @@ +# Control +def double(a: int) -> int: + return 2*a + +# Remove the brackets +def double(a: int) -> (int): + return 2*a + +# Some newline variations +def double(a: int) -> ( + int): + return 2*a + +def double(a: int) -> (int +): + return 2*a + +def double(a: int) -> ( + int +): + return 2*a + +# Don't lose the comments +def double(a: int) -> ( # Hello + int +): + return 2*a + +def double(a: int) -> ( + int # Hello +): + return 2*a + +# Really long annotations +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + +def foo() -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +def foo() -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +def foo(a: int, b: int, c: int,) -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +def foo(a: int, b: int, c: int,) -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +# Split args but no need to split return +def foo(a: int, b: int, c: int,) -> int: + return 2 + +# Deeply nested brackets +# with *interesting* spacing +def double(a: int) -> (((((int))))): + return 2*a + +def double(a: int) -> ( + ( ( + ((int) + ) + ) + ) + ): + return 2*a + +def foo() -> ( + ( ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +) +)): + return 2 + +# Return type with commas +def foo() -> ( + tuple[int, int, int] +): + return 2 + +def foo() -> tuple[loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong]: + return 2 + +# Magic trailing comma example +def foo() -> tuple[int, int, int,]: + return 2 + +# Long string example +def frobnicate() -> "ThisIsTrulyUnreasonablyExtremelyLongClassName | list[ThisIsTrulyUnreasonablyExtremelyLongClassName]": + pass + +# output +# Control +def double(a: int) -> int: + return 2 * a + + +# Remove the brackets +def double(a: int) -> int: + return 2 * a + + +# Some newline variations +def double(a: int) -> int: + return 2 * a + + +def double(a: int) -> int: + return 2 * a + + +def double(a: int) -> int: + return 2 * a + + +# Don't lose the comments +def double(a: int) -> int: # Hello + return 2 * a + + +def double(a: int) -> int: # Hello + return 2 * a + + +# Really long annotations +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds + | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +def foo( + a: int, + b: int, + c: int, +) -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + + +def foo( + a: int, + b: int, + c: int, +) -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds + | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +# Split args but no need to split return +def foo( + a: int, + b: int, + c: int, +) -> int: + return 2 + + +# Deeply nested brackets +# with *interesting* spacing +def double(a: int) -> int: + return 2 * a + + +def double(a: int) -> int: + return 2 * a + + +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +# Return type with commas +def foo() -> tuple[int, int, int]: + return 2 + + +def foo() -> ( + tuple[ + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, + ] +): + return 2 + + +# Magic trailing comma example +def foo() -> ( + tuple[ + int, + int, + int, + ] +): + return 2 + + +# Long string example +def frobnicate() -> ( + "ThisIsTrulyUnreasonablyExtremelyLongClassName |" + " list[ThisIsTrulyUnreasonablyExtremelyLongClassName]" +): + pass diff --git a/tests/data/preview/skip_magic_trailing_comma.py b/tests/data/preview/skip_magic_trailing_comma.py new file mode 100644 index 00000000000..e98174af427 --- /dev/null +++ b/tests/data/preview/skip_magic_trailing_comma.py @@ -0,0 +1,34 @@ +# We should not remove the trailing comma in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# But commas in multiple element subscripts should be removed. +c: tuple[int, int,] +d = tuple[int, int,] + +# Remove commas for non-subscripts. +small_list = [1,] +list_of_types = [tuple[int,],] +small_set = {1,} +set_of_types = {tuple[int,],} + +# Except single element tuples +small_tuple = (1,) + +# output +# We should not remove the trailing comma in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# But commas in multiple element subscripts should be removed. +c: tuple[int, int] +d = tuple[int, int] + +# Remove commas for non-subscripts. +small_list = [1] +list_of_types = [tuple[int,]] +small_set = {1} +set_of_types = {tuple[int,]} + +# Except single element tuples +small_tuple = (1,) diff --git a/tests/data/preview_310/remove_newline_after_match.py b/tests/data/preview_310/remove_newline_after_match.py new file mode 100644 index 00000000000..f7bcfbf27a2 --- /dev/null +++ b/tests/data/preview_310/remove_newline_after_match.py @@ -0,0 +1,34 @@ +def http_status(status): + + match status: + + case 400: + + return "Bad request" + + case 401: + + return "Unauthorized" + + case 403: + + return "Forbidden" + + case 404: + + return "Not found" + +# output +def http_status(status): + match status: + case 400: + return "Bad request" + + case 401: + return "Unauthorized" + + case 403: + return "Forbidden" + + case 404: + return "Not found" \ No newline at end of file diff --git a/tests/data/preview_39/remove_with_brackets.py b/tests/data/preview_39/remove_with_brackets.py new file mode 100644 index 00000000000..ea58ab93a16 --- /dev/null +++ b/tests/data/preview_39/remove_with_brackets.py @@ -0,0 +1,119 @@ +with (open("bla.txt")): + pass + +with (open("bla.txt")), (open("bla.txt")): + pass + +with (open("bla.txt") as f): + pass + +# Remove brackets within alias expression +with (open("bla.txt")) as f: + pass + +# Remove brackets around one-line context managers +with (open("bla.txt") as f, (open("x"))): + pass + +with ((open("bla.txt")) as f, open("x")): + pass + +with (CtxManager1() as example1, CtxManager2() as example2): + ... + +# Brackets remain when using magic comma +with (CtxManager1() as example1, CtxManager2() as example2,): + ... + +# Brackets remain for multi-line context managers +with (CtxManager1() as example1, CtxManager2() as example2, CtxManager2() as example2, CtxManager2() as example2, CtxManager2() as example2): + ... + +# Don't touch assignment expressions +with (y := open("./test.py")) as f: + pass + +# Deeply nested examples +# N.B. Multiple brackets are only possible +# around the context manager itself. +# Only one brackets is allowed around the +# alias expression or comma-delimited context managers. +with (((open("bla.txt")))): + pass + +with (((open("bla.txt")))), (((open("bla.txt")))): + pass + +with (((open("bla.txt")))) as f: + pass + +with ((((open("bla.txt")))) as f): + pass + +with ((((CtxManager1()))) as example1, (((CtxManager2()))) as example2): + ... + +# output +with open("bla.txt"): + pass + +with open("bla.txt"), open("bla.txt"): + pass + +with open("bla.txt") as f: + pass + +# Remove brackets within alias expression +with open("bla.txt") as f: + pass + +# Remove brackets around one-line context managers +with open("bla.txt") as f, open("x"): + pass + +with open("bla.txt") as f, open("x"): + pass + +with CtxManager1() as example1, CtxManager2() as example2: + ... + +# Brackets remain when using magic comma +with ( + CtxManager1() as example1, + CtxManager2() as example2, +): + ... + +# Brackets remain for multi-line context managers +with ( + CtxManager1() as example1, + CtxManager2() as example2, + CtxManager2() as example2, + CtxManager2() as example2, + CtxManager2() as example2, +): + ... + +# Don't touch assignment expressions +with (y := open("./test.py")) as f: + pass + +# Deeply nested examples +# N.B. Multiple brackets are only possible +# around the context manager itself. +# Only one brackets is allowed around the +# alias expression or comma-delimited context managers. +with open("bla.txt"): + pass + +with open("bla.txt"), open("bla.txt"): + pass + +with open("bla.txt") as f: + pass + +with open("bla.txt") as f: + pass + +with CtxManager1() as example1, CtxManager2() as example2: + ... diff --git a/tests/data/py_310/parenthesized_context_managers.py b/tests/data/py_310/parenthesized_context_managers.py new file mode 100644 index 00000000000..ccf1f94883e --- /dev/null +++ b/tests/data/py_310/parenthesized_context_managers.py @@ -0,0 +1,21 @@ +with (CtxManager() as example): + ... + +with (CtxManager1(), CtxManager2()): + ... + +with (CtxManager1() as example, CtxManager2()): + ... + +with (CtxManager1(), CtxManager2() as example): + ... + +with (CtxManager1() as example1, CtxManager2() as example2): + ... + +with ( + CtxManager1() as example1, + CtxManager2() as example2, + CtxManager3() as example3, +): + ... diff --git a/tests/data/py_310/pattern_matching_complex.py b/tests/data/py_310/pattern_matching_complex.py new file mode 100644 index 00000000000..97ee194fd39 --- /dev/null +++ b/tests/data/py_310/pattern_matching_complex.py @@ -0,0 +1,144 @@ +# Cases sampled from Lib/test/test_patma.py + +# case black_test_patma_098 +match x: + case -0j: + y = 0 +# case black_test_patma_142 +match x: + case bytes(z): + y = 0 +# case black_test_patma_073 +match x: + case 0 if 0: + y = 0 + case 0 if 1: + y = 1 +# case black_test_patma_006 +match 3: + case 0 | 1 | 2 | 3: + x = True +# case black_test_patma_049 +match x: + case [0, 1] | [1, 0]: + y = 0 +# case black_check_sequence_then_mapping +match x: + case [*_]: + return "seq" + case {}: + return "map" +# case black_test_patma_035 +match x: + case {0: [1, 2, {}]}: + y = 0 + case {0: [1, 2, {}] | True} | {1: [[]]} | {0: [1, 2, {}]} | [] | "X" | {}: + y = 1 + case []: + y = 2 +# case black_test_patma_107 +match x: + case 0.25 + 1.75j: + y = 0 +# case black_test_patma_097 +match x: + case -0j: + y = 0 +# case black_test_patma_007 +match 4: + case 0 | 1 | 2 | 3: + x = True +# case black_test_patma_154 +match x: + case 0 if x: + y = 0 +# case black_test_patma_134 +match x: + case {1: 0}: + y = 0 + case {0: 0}: + y = 1 + case {**z}: + y = 2 +# case black_test_patma_185 +match Seq(): + case [*_]: + y = 0 +# case black_test_patma_063 +match x: + case 1: + y = 0 + case 1: + y = 1 +# case black_test_patma_248 +match x: + case {"foo": bar}: + y = bar +# case black_test_patma_019 +match (0, 1, 2): + case [0, 1, *x, 2]: + y = 0 +# case black_test_patma_052 +match x: + case [0]: + y = 0 + case [1, 0] if (x := x[:0]): + y = 1 + case [1, 0]: + y = 2 +# case black_test_patma_191 +match w: + case [x, y, *_]: + z = 0 +# case black_test_patma_110 +match x: + case -0.25 - 1.75j: + y = 0 +# case black_test_patma_151 +match (x,): + case [y]: + z = 0 +# case black_test_patma_114 +match x: + case A.B.C.D: + y = 0 +# case black_test_patma_232 +match x: + case None: + y = 0 +# case black_test_patma_058 +match x: + case 0: + y = 0 +# case black_test_patma_233 +match x: + case False: + y = 0 +# case black_test_patma_078 +match x: + case []: + y = 0 + case [""]: + y = 1 + case "": + y = 2 +# case black_test_patma_156 +match x: + case z: + y = 0 +# case black_test_patma_189 +match w: + case [x, y, *rest]: + z = 0 +# case black_test_patma_042 +match x: + case (0 as z) | (1 as z) | (2 as z) if z == x % 2: + y = 0 +# case black_test_patma_034 +match x: + case {0: [1, 2, {}]}: + y = 0 + case {0: [1, 2, {}] | False} | {1: [[]]} | {0: [1, 2, {}]} | [] | "X" | {}: + y = 1 + case []: + y = 2 diff --git a/tests/data/py_310/pattern_matching_extras.py b/tests/data/py_310/pattern_matching_extras.py new file mode 100644 index 00000000000..9f6907f7575 --- /dev/null +++ b/tests/data/py_310/pattern_matching_extras.py @@ -0,0 +1,119 @@ +import match + +match something: + case [a as b]: + print(b) + case [a as b, c, d, e as f]: + print(f) + case Point(a as b): + print(b) + case Point(int() as x, int() as y): + print(x, y) + + +match = 1 +case: int = re.match(something) + +match re.match(case): + case type("match", match): + pass + case match: + pass + + +def func(match: case, case: match) -> case: + match Something(): + case func(match, case): + ... + case another: + ... + + +match maybe, multiple: + case perhaps, 5: + pass + case perhaps, 6,: + pass + + +match more := (than, one), indeed,: + case _, (5, 6): + pass + case [[5], (6)], [7],: + pass + case _: + pass + + +match a, *b, c: + case [*_]: + assert "seq" == _ + case {}: + assert "map" == b + + +match match( + case, + match( + match, case, match, looooooooooooooooooooooooooooooooooooong, match, case, match + ), + case, +): + case case( + match=case, + case=re.match( + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong + ), + ): + pass + + case [a as match]: + pass + + case case: + pass + + +match match: + case case: + pass + + +match a, *b(), c: + case d, *f, g: + pass + + +match something: + case { + "key": key as key_1, + "password": PASS.ONE | PASS.TWO | PASS.THREE as password, + }: + pass + case {"maybe": something(complicated as this) as that}: + pass + + +match something: + case 1 as a: + pass + + case 2 as b, 3 as c: + pass + + case 4 as d, (5 as e), (6 | 7 as g), *h: + pass + + +match bar1: + case Foo(aa=Callable() as aa, bb=int()): + print(bar1.aa, bar1.bb) + case _: + print("no match", "\n") + + +match bar1: + case Foo( + normal=x, perhaps=[list, {an: d, dict: 1.0}] as y, otherwise=something, q=t as u + ): + pass diff --git a/tests/data/py_310/pattern_matching_generic.py b/tests/data/py_310/pattern_matching_generic.py new file mode 100644 index 00000000000..00a0e4a677d --- /dev/null +++ b/tests/data/py_310/pattern_matching_generic.py @@ -0,0 +1,107 @@ +re.match() +match = a +with match() as match: + match = f"{match}" + +re.match() +match = a +with match() as match: + match = f"{match}" + + +def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7+ + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar_no_print_statement_no_exec_statement, + # Python 2.7 with future print_function import + pygram.python_grammar_no_print_statement, + # Python 2.7 + pygram.python_grammar, + ] + + match match: + case case: + match match: + case case: + pass + + if all(version.is_python2() for version in target_versions): + # Python 2-only code, so try Python 2 grammars. + return [ + # Python 2.7 with future print_function import + pygram.python_grammar_no_print_statement, + # Python 2.7 + pygram.python_grammar, + ] + + re.match() + match = a + with match() as match: + match = f"{match}" + + def test_patma_139(self): + x = False + match x: + case bool(z): + y = 0 + self.assertIs(x, False) + self.assertEqual(y, 0) + self.assertIs(z, x) + + # Python 3-compatible code, so only try Python 3 grammar. + grammars = [] + if supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append( + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords + ) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) + + def test_patma_155(self): + x = 0 + y = None + match x: + case 1e1000: + y = 0 + self.assertEqual(x, 0) + self.assertIs(y, None) + + x = range(3) + match x: + case [y, case as x, z]: + w = 0 + + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + + +re.match() +match = a +with match() as match: + match = f"{match}" + +re.match() +match = a +with match() as match: + match = f"{match}" diff --git a/tests/data/py_310/pattern_matching_simple.py b/tests/data/py_310/pattern_matching_simple.py new file mode 100644 index 00000000000..5ed62415a4b --- /dev/null +++ b/tests/data/py_310/pattern_matching_simple.py @@ -0,0 +1,92 @@ +# Cases sampled from PEP 636 examples + +match command.split(): + case [action, obj]: + ... # interpret action, obj + +match command.split(): + case [action]: + ... # interpret single-verb action + case [action, obj]: + ... # interpret action, obj + +match command.split(): + case ["quit"]: + print("Goodbye!") + quit_game() + case ["look"]: + current_room.describe() + case ["get", obj]: + character.get(obj, current_room) + case ["go", direction]: + current_room = current_room.neighbor(direction) + # The rest of your commands go here + +match command.split(): + case ["drop", *objects]: + for obj in objects: + character.drop(obj, current_room) + # The rest of your commands go here + +match command.split(): + case ["quit"]: + pass + case ["go", direction]: + print("Going:", direction) + case ["drop", *objects]: + print("Dropping: ", *objects) + case _: + print(f"Sorry, I couldn't understand {command!r}") + +match command.split(): + case ["north"] | ["go", "north"]: + current_room = current_room.neighbor("north") + case ["get", obj] | ["pick", "up", obj] | ["pick", obj, "up"]: + ... # Code for picking up the given object + +match command.split(): + case ["go", ("north" | "south" | "east" | "west")]: + current_room = current_room.neighbor(...) + # how do I know which direction to go? + +match command.split(): + case ["go", ("north" | "south" | "east" | "west") as direction]: + current_room = current_room.neighbor(direction) + +match command.split(): + case ["go", direction] if direction in current_room.exits: + current_room = current_room.neighbor(direction) + case ["go", _]: + print("Sorry, you can't go that way") + +match event.get(): + case Click(position=(x, y)): + handle_click_at(x, y) + case KeyPress(key_name="Q") | Quit(): + game.quit() + case KeyPress(key_name="up arrow"): + game.go_north() + case KeyPress(): + pass # Ignore other keystrokes + case other_event: + raise ValueError(f"Unrecognized event: {other_event}") + +match event.get(): + case Click((x, y), button=Button.LEFT): # This is a left click + handle_click_at(x, y) + case Click(): + pass # ignore other clicks + + +def where_is(point): + match point: + case Point(x=0, y=0): + print("Origin") + case Point(x=0, y=y): + print(f"Y={y}") + case Point(x=x, y=0): + print(f"X={x}") + case Point(): + print("Somewhere else") + case _: + print("Not a point") diff --git a/tests/data/py_310/pattern_matching_style.py b/tests/data/py_310/pattern_matching_style.py new file mode 100644 index 00000000000..8e18ce2ada6 --- /dev/null +++ b/tests/data/py_310/pattern_matching_style.py @@ -0,0 +1,91 @@ +match something: + case b(): print(1+1) + case c( + very_complex=True, + perhaps_even_loooooooooooooooooooooooooooooooooooooong=- 1 + ): print(1) + case c( + very_complex=True, + perhaps_even_loooooooooooooooooooooooooooooooooooooong=-1, + ): print(2) + case a: pass + +match( + arg # comment +) + +match( +) + +match( + + +) + +case( + arg # comment +) + +case( +) + +case( + + +) + + +re.match( + something # fast +) +re.match( + + + +) +match match( + + +): + case case( + arg, # comment + ): + pass + +# output + +match something: + case b(): + print(1 + 1) + case c( + very_complex=True, perhaps_even_loooooooooooooooooooooooooooooooooooooong=-1 + ): + print(1) + case c( + very_complex=True, + perhaps_even_loooooooooooooooooooooooooooooooooooooong=-1, + ): + print(2) + case a: + pass + +match(arg) # comment + +match() + +match() + +case(arg) # comment + +case() + +case() + + +re.match(something) # fast +re.match() +match match(): + case case( + arg, # comment + ): + pass diff --git a/tests/data/py_310/pep_572_py310.py b/tests/data/py_310/pep_572_py310.py new file mode 100644 index 00000000000..2aef589ce8d --- /dev/null +++ b/tests/data/py_310/pep_572_py310.py @@ -0,0 +1,4 @@ +# Unparenthesized walruses are now allowed in indices since Python 3.10. +x[a:=0] +x[a:=0, b:=1] +x[5, b:=0] diff --git a/tests/data/py_310/starred_for_target.py b/tests/data/py_310/starred_for_target.py new file mode 100644 index 00000000000..8fc8e059ed3 --- /dev/null +++ b/tests/data/py_310/starred_for_target.py @@ -0,0 +1,27 @@ +for x in *a, *b: + print(x) + +for x in a, b, *c: + print(x) + +for x in *a, b, c: + print(x) + +for x in *a, b, *c: + print(x) + +async for x in *a, *b: + print(x) + +async for x in *a, b, *c: + print(x) + +async for x in a, b, *c: + print(x) + +async for x in ( + *loooooooooooooooooooooong, + very, + *loooooooooooooooooooooooooooooooooooooooooooooooong, +): + print(x) diff --git a/tests/data/py_311/pep_646.py b/tests/data/py_311/pep_646.py new file mode 100644 index 00000000000..e843ecf39d8 --- /dev/null +++ b/tests/data/py_311/pep_646.py @@ -0,0 +1,194 @@ +A[*b] +A[*b] = 1 +A +del A[*b] +A +A[*b, *b] +A[*b, *b] = 1 +A +del A[*b, *b] +A +A[b, *b] +A[b, *b] = 1 +A +del A[b, *b] +A +A[*b, b] +A[*b, b] = 1 +A +del A[*b, b] +A +A[b, b, *b] +A[b, b, *b] = 1 +A +del A[b, b, *b] +A +A[*b, b, b] +A[*b, b, b] = 1 +A +del A[*b, b, b] +A +A[b, *b, b] +A[b, *b, b] = 1 +A +del A[b, *b, b] +A +A[b, b, *b, b] +A[b, b, *b, b] = 1 +A +del A[b, b, *b, b] +A +A[b, *b, b, b] +A[b, *b, b, b] = 1 +A +del A[b, *b, b, b] +A +A[A[b, *b, b]] +A[A[b, *b, b]] = 1 +A +del A[A[b, *b, b]] +A +A[*A[b, *b, b]] +A[*A[b, *b, b]] = 1 +A +del A[*A[b, *b, b]] +A +A[b, ...] +A[b, ...] = 1 +A +del A[b, ...] +A +A[*A[b, ...]] +A[*A[b, ...]] = 1 +A +del A[*A[b, ...]] +A +l = [1, 2, 3] +A[*l] +A[*l] = 1 +A +del A[*l] +A +A[*l, 4] +A[*l, 4] = 1 +A +del A[*l, 4] +A +A[0, *l] +A[0, *l] = 1 +A +del A[0, *l] +A +A[1:2, *l] +A[1:2, *l] = 1 +A +del A[1:2, *l] +A +repr(A[1:2, *l]) == repr(A[1:2, 1, 2, 3]) +t = (1, 2, 3) +A[*t] +A[*t] = 1 +A +del A[*t] +A +A[*t, 4] +A[*t, 4] = 1 +A +del A[*t, 4] +A +A[0, *t] +A[0, *t] = 1 +A +del A[0, *t] +A +A[1:2, *t] +A[1:2, *t] = 1 +A +del A[1:2, *t] +A +repr(A[1:2, *t]) == repr(A[1:2, 1, 2, 3]) + + +def returns_list(): + return [1, 2, 3] + + +A[returns_list()] +A[returns_list()] = 1 +A +del A[returns_list()] +A +A[returns_list(), 4] +A[returns_list(), 4] = 1 +A +del A[returns_list(), 4] +A +A[*returns_list()] +A[*returns_list()] = 1 +A +del A[*returns_list()] +A +A[*returns_list(), 4] +A[*returns_list(), 4] = 1 +A +del A[*returns_list(), 4] +A +A[0, *returns_list()] +A[0, *returns_list()] = 1 +A +del A[0, *returns_list()] +A +A[*returns_list(), *returns_list()] +A[*returns_list(), *returns_list()] = 1 +A +del A[*returns_list(), *returns_list()] +A +A[1:2, *b] +A[*b, 1:2] +A[1:2, *b, 1:2] +A[*b, 1:2, *b] +A[1:, *b] +A[*b, 1:] +A[1:, *b, 1:] +A[*b, 1:, *b] +A[:1, *b] +A[*b, :1] +A[:1, *b, :1] +A[*b, :1, *b] +A[:, *b] +A[*b, :] +A[:, *b, :] +A[*b, :, *b] +A[a * b()] +A[a * b(), *c, *d(), e * f(g * h)] +A[a * b(), :] +A[a * b(), *c, *d(), e * f(g * h) :] +A[[b] * len(c), :] + + +def f1(*args: *b): + pass + + +f1.__annotations__ + + +def f2(*args: *b, arg1): + pass + + +f2.__annotations__ + + +def f3(*args: *b, arg1: int): + pass + + +f3.__annotations__ + + +def f4(*args: *b, arg1: int = 2): + pass + + +f4.__annotations__ diff --git a/tests/data/py_311/pep_654.py b/tests/data/py_311/pep_654.py new file mode 100644 index 00000000000..387c0816f4b --- /dev/null +++ b/tests/data/py_311/pep_654.py @@ -0,0 +1,53 @@ +try: + raise OSError("blah") +except* ExceptionGroup as e: + pass + + +try: + async with trio.open_nursery() as nursery: + # Make two concurrent calls to child() + nursery.start_soon(child) + nursery.start_soon(child) +except* ValueError: + pass + +try: + try: + raise ValueError(42) + except: + try: + raise TypeError(int) + except* Exception: + pass + 1 / 0 +except Exception as e: + exc = e + +try: + try: + raise FalsyEG("eg", [TypeError(1), ValueError(2)]) + except* TypeError as e: + tes = e + raise + except* ValueError as e: + ves = e + pass +except Exception as e: + exc = e + +try: + try: + raise orig + except* (TypeError, ValueError) as e: + raise SyntaxError(3) from e +except BaseException as e: + exc = e + +try: + try: + raise orig + except* OSError as e: + raise TypeError(3) from e +except ExceptionGroup as e: + exc = e diff --git a/tests/data/py_311/pep_654_style.py b/tests/data/py_311/pep_654_style.py new file mode 100644 index 00000000000..568e5e3efa4 --- /dev/null +++ b/tests/data/py_311/pep_654_style.py @@ -0,0 +1,111 @@ +try: + raise OSError("blah") +except * ExceptionGroup as e: + pass + + +try: + async with trio.open_nursery() as nursery: + # Make two concurrent calls to child() + nursery.start_soon(child) + nursery.start_soon(child) +except *ValueError: + pass + +try: + try: + raise ValueError(42) + except: + try: + raise TypeError(int) + except *(Exception): + pass + 1 / 0 +except Exception as e: + exc = e + +try: + try: + raise FalsyEG("eg", [TypeError(1), ValueError(2)]) + except \ + *TypeError as e: + tes = e + raise + except * ValueError as e: + ves = e + pass +except Exception as e: + exc = e + +try: + try: + raise orig + except *(TypeError, ValueError, *OTHER_EXCEPTIONS) as e: + raise SyntaxError(3) from e +except BaseException as e: + exc = e + +try: + try: + raise orig + except\ + * OSError as e: + raise TypeError(3) from e +except ExceptionGroup as e: + exc = e + +# output + +try: + raise OSError("blah") +except* ExceptionGroup as e: + pass + + +try: + async with trio.open_nursery() as nursery: + # Make two concurrent calls to child() + nursery.start_soon(child) + nursery.start_soon(child) +except* ValueError: + pass + +try: + try: + raise ValueError(42) + except: + try: + raise TypeError(int) + except* (Exception): + pass + 1 / 0 +except Exception as e: + exc = e + +try: + try: + raise FalsyEG("eg", [TypeError(1), ValueError(2)]) + except* TypeError as e: + tes = e + raise + except* ValueError as e: + ves = e + pass +except Exception as e: + exc = e + +try: + try: + raise orig + except* (TypeError, ValueError, *OTHER_EXCEPTIONS) as e: + raise SyntaxError(3) from e +except BaseException as e: + exc = e + +try: + try: + raise orig + except* OSError as e: + raise TypeError(3) from e +except ExceptionGroup as e: + exc = e diff --git a/tests/data/numeric_literals.py b/tests/data/py_36/numeric_literals.py similarity index 100% rename from tests/data/numeric_literals.py rename to tests/data/py_36/numeric_literals.py diff --git a/tests/data/numeric_literals_skip_underscores.py b/tests/data/py_36/numeric_literals_skip_underscores.py similarity index 100% rename from tests/data/numeric_literals_skip_underscores.py rename to tests/data/py_36/numeric_literals_skip_underscores.py diff --git a/tests/data/python37.py b/tests/data/py_37/python37.py similarity index 100% rename from tests/data/python37.py rename to tests/data/py_37/python37.py diff --git a/tests/data/pep_570.py b/tests/data/py_38/pep_570.py similarity index 100% rename from tests/data/pep_570.py rename to tests/data/py_38/pep_570.py diff --git a/tests/data/pep_572.py b/tests/data/py_38/pep_572.py similarity index 90% rename from tests/data/pep_572.py rename to tests/data/py_38/pep_572.py index 9e429f913ce..d41805f1cb1 100644 --- a/tests/data/pep_572.py +++ b/tests/data/py_38/pep_572.py @@ -2,7 +2,9 @@ (a := a) if (match := pattern.search(data)) is None: pass -[y := f(x), y ** 2, y ** 3] +if match := pattern.search(data): + pass +[y := f(x), y**2, y**3] filtered_data = [y for x in data if (y := f(x)) is None] (y := f(x)) y0 = (y1 := f(x)) @@ -41,3 +43,5 @@ def foo(answer: (p := 42) = 5): while x := f(x): pass +while x := f(x): + pass diff --git a/tests/data/py_38/pep_572_remove_parens.py b/tests/data/py_38/pep_572_remove_parens.py new file mode 100644 index 00000000000..9718d95b499 --- /dev/null +++ b/tests/data/py_38/pep_572_remove_parens.py @@ -0,0 +1,105 @@ +if (foo := 0): + pass + +if (foo := 1): + pass + +if (y := 5 + 5): + pass + +y = (x := 0) + +y += (x := 0) + +(y := 5 + 5) + +test: int = (test2 := 2) + +a, b = (test := (1, 2)) + +# see also https://github.com/psf/black/issues/2139 +assert (foo := 42 - 12) + +foo(x=(y := f(x))) + + +def foo(answer=(p := 42)): + ... + + +def foo2(answer: (p := 42) = 5): + ... + + +lambda: (x := 1) + +a[(x := 12)] +a[:(x := 13)] + +# we don't touch expressions in f-strings but if we do one day, don't break 'em +f'{(x:=10)}' + + +def a(): + return (x := 3) + await (b := 1) + yield (a := 2) + raise (c := 3) + +def this_is_so_dumb() -> (please := no): + pass + + +# output +if foo := 0: + pass + +if foo := 1: + pass + +if y := 5 + 5: + pass + +y = (x := 0) + +y += (x := 0) + +(y := 5 + 5) + +test: int = (test2 := 2) + +a, b = (test := (1, 2)) + +# see also https://github.com/psf/black/issues/2139 +assert (foo := 42 - 12) + +foo(x=(y := f(x))) + + +def foo(answer=(p := 42)): + ... + + +def foo2(answer: (p := 42) = 5): + ... + + +lambda: (x := 1) + +a[(x := 12)] +a[:(x := 13)] + +# we don't touch expressions in f-strings but if we do one day, don't break 'em +f"{(x:=10)}" + + +def a(): + return (x := 3) + await (b := 1) + yield (a := 2) + raise (c := 3) + + +def this_is_so_dumb() -> (please := no): + pass + diff --git a/tests/data/py_38/python38.py b/tests/data/py_38/python38.py new file mode 100644 index 00000000000..63b0588bc27 --- /dev/null +++ b/tests/data/py_38/python38.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3.8 + + +def starred_return(): + my_list = ["value2", "value3"] + return "value1", *my_list + + +def starred_yield(): + my_list = ["value2", "value3"] + yield "value1", *my_list + + +# all right hand side expressions allowed in regular assignments are now also allowed in +# annotated assignments +a : Tuple[ str, int] = "1", 2 +a: Tuple[int , ... ] = b, *c, d +def t(): + a : str = yield "a" + + +# output + + +#!/usr/bin/env python3.8 + + +def starred_return(): + my_list = ["value2", "value3"] + return "value1", *my_list + + +def starred_yield(): + my_list = ["value2", "value3"] + yield "value1", *my_list + + +# all right hand side expressions allowed in regular assignments are now also allowed in +# annotated assignments +a: Tuple[str, int] = "1", 2 +a: Tuple[int, ...] = b, *c, d + + +def t(): + a: str = yield "a" diff --git a/tests/data/py_39/pep_572_py39.py b/tests/data/py_39/pep_572_py39.py new file mode 100644 index 00000000000..b8b081b8c45 --- /dev/null +++ b/tests/data/py_39/pep_572_py39.py @@ -0,0 +1,7 @@ +# Unparenthesized walruses are now allowed in set literals & set comprehensions +# since Python 3.9 +{x := 1, 2, 3} +{x4 := x**5 for x in range(7)} +# We better not remove the parentheses here (since it's a 3.10 feature) +x[(a := 1)] +x[(a := 1), (b := 3)] diff --git a/tests/data/py_39/python39.py b/tests/data/py_39/python39.py new file mode 100644 index 00000000000..ae67c2257eb --- /dev/null +++ b/tests/data/py_39/python39.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3.9 + +@relaxed_decorator[0] +def f(): + ... + +@relaxed_decorator[extremely_long_name_that_definitely_will_not_fit_on_one_line_of_standard_length] +def f(): + ... + +@extremely_long_variable_name_that_doesnt_fit := complex.expression(with_long="arguments_value_that_wont_fit_at_the_end_of_the_line") +def f(): + ... + +# output + + +#!/usr/bin/env python3.9 + + +@relaxed_decorator[0] +def f(): + ... + + +@relaxed_decorator[ + extremely_long_name_that_definitely_will_not_fit_on_one_line_of_standard_length +] +def f(): + ... + + +@extremely_long_variable_name_that_doesnt_fit := complex.expression( + with_long="arguments_value_that_wont_fit_at_the_end_of_the_line" +) +def f(): + ... \ No newline at end of file diff --git a/tests/data/python2.py b/tests/data/python2.py deleted file mode 100644 index 4a22f46de42..00000000000 --- a/tests/data/python2.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python2 - -import sys - -print >> sys.stderr , "Warning:" , -print >> sys.stderr , "this is a blast from the past." -print >> sys.stderr , "Look, a repr:", `sys` - - -def function((_globals, _locals)): - exec ur"print 'hi from exec!'" in _globals, _locals - - -function((globals(), locals())) - - -# output - - -#!/usr/bin/env python2 - -import sys - -print >>sys.stderr, "Warning:", -print >>sys.stderr, "this is a blast from the past." -print >>sys.stderr, "Look, a repr:", ` sys ` - - -def function((_globals, _locals)): - exec ur"print 'hi from exec!'" in _globals, _locals - - -function((globals(), locals())) diff --git a/tests/data/python2_print_function.py b/tests/data/python2_print_function.py deleted file mode 100755 index 81b8d8a70ce..00000000000 --- a/tests/data/python2_print_function.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python2 -from __future__ import print_function - -print('hello') -print(u'hello') -print(a, file=sys.stderr) - -# output - - -#!/usr/bin/env python2 -from __future__ import print_function - -print("hello") -print(u"hello") -print(a, file=sys.stderr) diff --git a/tests/data/python2_unicode_literals.py b/tests/data/python2_unicode_literals.py deleted file mode 100644 index 2fe70392af6..00000000000 --- a/tests/data/python2_unicode_literals.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python2 -from __future__ import unicode_literals as _unicode_literals -from __future__ import absolute_import -from __future__ import print_function as lol, with_function - -u'hello' -U"hello" -Ur"hello" - -# output - - -#!/usr/bin/env python2 -from __future__ import unicode_literals as _unicode_literals -from __future__ import absolute_import -from __future__ import print_function as lol, with_function - -"hello" -"hello" -r"hello" diff --git a/tests/data/python38.py b/tests/data/python38.py deleted file mode 100644 index 1a7f76167d9..00000000000 --- a/tests/data/python38.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3.8 - - -def starred_return(): - my_list = ["value2", "value3"] - return "value1", *my_list - - -def starred_yield(): - my_list = ["value2", "value3"] - yield "value1", *my_list - - -# output - - -#!/usr/bin/env python3.8 - - -def starred_return(): - my_list = ["value2", "value3"] - return "value1", *my_list - - -def starred_yield(): - my_list = ["value2", "value3"] - yield "value1", *my_list diff --git a/tests/data/simple_cases/attribute_access_on_number_literals.py b/tests/data/simple_cases/attribute_access_on_number_literals.py new file mode 100644 index 00000000000..7c16bdfb3a5 --- /dev/null +++ b/tests/data/simple_cases/attribute_access_on_number_literals.py @@ -0,0 +1,47 @@ +x = 123456789 .bit_count() +x = (123456).__abs__() +x = .1.is_integer() +x = 1. .imag +x = 1E+1.imag +x = 1E-1.real +x = 123456789.123456789.hex() +x = 123456789.123456789E123456789 .real +x = 123456789E123456789 .conjugate() +x = 123456789J.real +x = 123456789.123456789J.__add__(0b1011.bit_length()) +x = 0XB1ACC.conjugate() +x = 0B1011 .conjugate() +x = 0O777 .real +x = 0.000000006 .hex() +x = -100.0000J + +if 10 .real: + ... + +y = 100[no] +y = 100(no) + +# output + +x = (123456789).bit_count() +x = (123456).__abs__() +x = (0.1).is_integer() +x = (1.0).imag +x = (1e1).imag +x = (1e-1).real +x = (123456789.123456789).hex() +x = (123456789.123456789e123456789).real +x = (123456789e123456789).conjugate() +x = 123456789j.real +x = 123456789.123456789j.__add__(0b1011.bit_length()) +x = 0xB1ACC.conjugate() +x = 0b1011.conjugate() +x = 0o777.real +x = (0.000000006).hex() +x = -100.0000j + +if (10).real: + ... + +y = 100[no] +y = 100(no) diff --git a/tests/data/beginning_backslash.py b/tests/data/simple_cases/beginning_backslash.py similarity index 100% rename from tests/data/beginning_backslash.py rename to tests/data/simple_cases/beginning_backslash.py diff --git a/tests/data/bracketmatch.py b/tests/data/simple_cases/bracketmatch.py similarity index 100% rename from tests/data/bracketmatch.py rename to tests/data/simple_cases/bracketmatch.py diff --git a/tests/data/class_blank_parentheses.py b/tests/data/simple_cases/class_blank_parentheses.py similarity index 100% rename from tests/data/class_blank_parentheses.py rename to tests/data/simple_cases/class_blank_parentheses.py diff --git a/tests/data/class_methods_new_line.py b/tests/data/simple_cases/class_methods_new_line.py similarity index 100% rename from tests/data/class_methods_new_line.py rename to tests/data/simple_cases/class_methods_new_line.py diff --git a/tests/data/collections.py b/tests/data/simple_cases/collections.py similarity index 100% rename from tests/data/collections.py rename to tests/data/simple_cases/collections.py diff --git a/tests/data/comment_after_escaped_newline.py b/tests/data/simple_cases/comment_after_escaped_newline.py similarity index 100% rename from tests/data/comment_after_escaped_newline.py rename to tests/data/simple_cases/comment_after_escaped_newline.py diff --git a/tests/data/comments.py b/tests/data/simple_cases/comments.py similarity index 100% rename from tests/data/comments.py rename to tests/data/simple_cases/comments.py diff --git a/tests/data/comments2.py b/tests/data/simple_cases/comments2.py similarity index 98% rename from tests/data/comments2.py rename to tests/data/simple_cases/comments2.py index 221cb3fe143..4eea013151a 100644 --- a/tests/data/comments2.py +++ b/tests/data/simple_cases/comments2.py @@ -159,7 +159,7 @@ def _init_host(self, parsed) -> None: ####################### -instruction() +instruction()#comment with bad spacing # END COMMENTS # MORE END COMMENTS @@ -336,7 +336,7 @@ def _init_host(self, parsed) -> None: ####################### -instruction() +instruction() # comment with bad spacing # END COMMENTS # MORE END COMMENTS diff --git a/tests/data/comments3.py b/tests/data/simple_cases/comments3.py similarity index 100% rename from tests/data/comments3.py rename to tests/data/simple_cases/comments3.py diff --git a/tests/data/comments4.py b/tests/data/simple_cases/comments4.py similarity index 100% rename from tests/data/comments4.py rename to tests/data/simple_cases/comments4.py diff --git a/tests/data/comments5.py b/tests/data/simple_cases/comments5.py similarity index 100% rename from tests/data/comments5.py rename to tests/data/simple_cases/comments5.py diff --git a/tests/data/comments6.py b/tests/data/simple_cases/comments6.py similarity index 100% rename from tests/data/comments6.py rename to tests/data/simple_cases/comments6.py diff --git a/tests/data/simple_cases/comments_non_breaking_space.py b/tests/data/simple_cases/comments_non_breaking_space.py new file mode 100644 index 00000000000..e17c3f4ca39 --- /dev/null +++ b/tests/data/simple_cases/comments_non_breaking_space.py @@ -0,0 +1,44 @@ +from .config import ( ConfigTypeAttributes, Int, Path, # String, + # DEFAULT_TYPE_ATTRIBUTES, +) + +result = 1 # A simple comment +result = ( 1, ) # Another one + +result = 1 # type: ignore +result = 1# This comment is talking about type: ignore +square = Square(4) # type: Optional[Square] + +def function(a:int=42): + """ This docstring is already formatted + a + b + """ + #  There's a NBSP + 3 spaces before + # And 4 spaces on the next line + pass + +# output +from .config import ( + ConfigTypeAttributes, + Int, + Path, # String, + # DEFAULT_TYPE_ATTRIBUTES, +) + +result = 1 # A simple comment +result = (1,) # Another one + +result = 1 #  type: ignore +result = 1 # This comment is talking about type: ignore +square = Square(4) #  type: Optional[Square] + + +def function(a: int = 42): + """This docstring is already formatted + a + b + """ + # There's a NBSP + 3 spaces before + # And 4 spaces on the next line + pass diff --git a/tests/data/composition.py b/tests/data/simple_cases/composition.py similarity index 100% rename from tests/data/composition.py rename to tests/data/simple_cases/composition.py diff --git a/tests/data/simple_cases/composition_no_trailing_comma.py b/tests/data/simple_cases/composition_no_trailing_comma.py new file mode 100644 index 00000000000..f17b89dea8d --- /dev/null +++ b/tests/data/simple_cases/composition_no_trailing_comma.py @@ -0,0 +1,367 @@ +class C: + def test(self) -> None: + with patch("black.out", print): + self.assertEqual( + unstyle(str(report)), "1 file reformatted, 1 file failed to reformat." + ) + self.assertEqual( + unstyle(str(report)), + "1 file reformatted, 1 file left unchanged, 1 file failed to reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 1 file left unchanged, 1 file failed to" + " reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", + ) + for i in (a,): + if ( + # Rule 1 + i % 2 == 0 + # Rule 2 + and i % 3 == 0 + ): + while ( + # Just a comment + call() + # Another + ): + print(i) + xxxxxxxxxxxxxxxx = Yyyy2YyyyyYyyyyy( + push_manager=context.request.resource_manager, + max_items_to_push=num_items, + batch_size=Yyyy2YyyyYyyyyYyyy.FULL_SIZE + ).push( + # Only send the first n items. + items=items[:num_items] + ) + return ( + 'Utterly failed doctest test for %s\n File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def omitting_trailers(self) -> None: + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex] + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex][TwoLevelIndex][ThreeLevelIndex][FourLevelIndex] + d[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15][16][17][18][19][20][21][ + 22 + ] + assignment = ( + some.rather.elaborate.rule() and another.rule.ending_with.index[123] + ) + + def easy_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } == expected, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } + + def tricky_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } == expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ), "Not what we expected" + + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } == expected, ( + "Not what we expected and the message is too long to fit in one line" + ) + + assert expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ) == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + }, ( + "Not what we expected and the message is too long to fit in one line" + " because it's too long" + ) + + dis_c_instance_method = """\ + %3d 0 LOAD_FAST 1 (x) + 2 LOAD_CONST 1 (1) + 4 COMPARE_OP 2 (==) + 6 LOAD_FAST 0 (self) + 8 STORE_ATTR 0 (x) + 10 LOAD_CONST 0 (None) + 12 RETURN_VALUE + """ % ( + _C.__init__.__code__.co_firstlineno + 1, + ) + + assert ( + expectedexpectedexpectedexpectedexpectedexpectedexpectedexpectedexpect + == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9 + } + ) + + + +# output + +class C: + def test(self) -> None: + with patch("black.out", print): + self.assertEqual( + unstyle(str(report)), "1 file reformatted, 1 file failed to reformat." + ) + self.assertEqual( + unstyle(str(report)), + "1 file reformatted, 1 file left unchanged, 1 file failed to reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 1 file left unchanged, 1 file failed to" + " reformat.", + ) + self.assertEqual( + unstyle(str(report)), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", + ) + for i in (a,): + if ( + # Rule 1 + i % 2 == 0 + # Rule 2 + and i % 3 == 0 + ): + while ( + # Just a comment + call() + # Another + ): + print(i) + xxxxxxxxxxxxxxxx = Yyyy2YyyyyYyyyyy( + push_manager=context.request.resource_manager, + max_items_to_push=num_items, + batch_size=Yyyy2YyyyYyyyyYyyy.FULL_SIZE, + ).push( + # Only send the first n items. + items=items[:num_items] + ) + return ( + 'Utterly failed doctest test for %s\n File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def omitting_trailers(self) -> None: + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex] + get_collection( + hey_this_is_a_very_long_call, it_has_funny_attributes, really=True + )[OneLevelIndex][TwoLevelIndex][ThreeLevelIndex][FourLevelIndex] + d[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15][16][17][18][19][20][21][ + 22 + ] + assignment = ( + some.rather.elaborate.rule() and another.rule.ending_with.index[123] + ) + + def easy_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } == expected, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } + + def tricky_asserts(self) -> None: + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } == expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ), "Not what we expected" + + assert { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } == expected, ( + "Not what we expected and the message is too long to fit in one line" + ) + + assert expected( + value, is_going_to_be="too long to fit in a single line", srsly=True + ) == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + }, "Not what we expected" + + assert expected == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + }, ( + "Not what we expected and the message is too long to fit in one line" + " because it's too long" + ) + + dis_c_instance_method = """\ + %3d 0 LOAD_FAST 1 (x) + 2 LOAD_CONST 1 (1) + 4 COMPARE_OP 2 (==) + 6 LOAD_FAST 0 (self) + 8 STORE_ATTR 0 (x) + 10 LOAD_CONST 0 (None) + 12 RETURN_VALUE + """ % ( + _C.__init__.__code__.co_firstlineno + 1, + ) + + assert ( + expectedexpectedexpectedexpectedexpectedexpectedexpectedexpectedexpect + == { + key1: value1, + key2: value2, + key3: value3, + key4: value4, + key5: value5, + key6: value6, + key7: value7, + key8: value8, + key9: value9, + } + ) diff --git a/tests/data/simple_cases/docstring.py b/tests/data/simple_cases/docstring.py new file mode 100644 index 00000000000..f08bba575fe --- /dev/null +++ b/tests/data/simple_cases/docstring.py @@ -0,0 +1,433 @@ +class MyClass: + """ Multiline + class docstring + """ + + def method(self): + """Multiline + method docstring + """ + pass + + +def foo(): + """This is a docstring with + some lines of text here + """ + return + + +def bar(): + '''This is another docstring + with more lines of text + ''' + return + + +def baz(): + '''"This" is a string with some + embedded "quotes"''' + return + + +def troz(): + '''Indentation with tabs + is just as OK + ''' + return + + +def zort(): + """Another + multiline + docstring + """ + pass + +def poit(): + """ + Lorem ipsum dolor sit amet. + + Consectetur adipiscing elit: + - sed do eiusmod tempor incididunt ut labore + - dolore magna aliqua + - enim ad minim veniam + - quis nostrud exercitation ullamco laboris nisi + - aliquip ex ea commodo consequat + """ + pass + + +def under_indent(): + """ + These lines are indented in a way that does not +make sense. + """ + pass + + +def over_indent(): + """ + This has a shallow indent + - But some lines are deeper + - And the closing quote is too deep + """ + pass + + +def single_line(): + """But with a newline after it! + + """ + pass + + +def this(): + r""" + 'hey ho' + """ + + +def that(): + """ "hey yah" """ + + +def and_that(): + """ + "hey yah" """ + + +def and_this(): + ''' + "hey yah"''' + + +def multiline_whitespace(): + ''' + + + + + ''' + + +def oneline_whitespace(): + ''' ''' + + +def empty(): + """""" + + +def single_quotes(): + 'testing' + + +def believe_it_or_not_this_is_in_the_py_stdlib(): ''' +"hey yah"''' + + +def ignored_docstring(): + """a => \ +b""" + +def single_line_docstring_with_whitespace(): + """ This should be stripped """ + +def docstring_with_inline_tabs_and_space_indentation(): + """hey + + tab separated value + tab at start of line and then a tab separated value + multiple tabs at the beginning and inline + mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. + + line ends with some tabs + """ + + +def docstring_with_inline_tabs_and_tab_indentation(): + """hey + + tab separated value + tab at start of line and then a tab separated value + multiple tabs at the beginning and inline + mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. + + line ends with some tabs + """ + pass + + +def backslash_space(): + """\ """ + + +def multiline_backslash_1(): + ''' + hey\there\ + \ ''' + + +def multiline_backslash_2(): + ''' + hey there \ ''' + + +def multiline_backslash_3(): + ''' + already escaped \\ ''' + + +def my_god_its_full_of_stars_1(): + "I'm sorry Dave\u2001" + + +# the space below is actually a \u2001, removed in output +def my_god_its_full_of_stars_2(): + "I'm sorry Dave " + + +def docstring_almost_at_line_limit(): + """long docstring.................................................................""" + + +def docstring_almost_at_line_limit2(): + """long docstring................................................................. + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def stable_quote_normalization_with_immediate_inner_single_quote(self): + '''' + + + ''' + + +# output + +class MyClass: + """Multiline + class docstring + """ + + def method(self): + """Multiline + method docstring + """ + pass + + +def foo(): + """This is a docstring with + some lines of text here + """ + return + + +def bar(): + """This is another docstring + with more lines of text + """ + return + + +def baz(): + '''"This" is a string with some + embedded "quotes"''' + return + + +def troz(): + """Indentation with tabs + is just as OK + """ + return + + +def zort(): + """Another + multiline + docstring + """ + pass + + +def poit(): + """ + Lorem ipsum dolor sit amet. + + Consectetur adipiscing elit: + - sed do eiusmod tempor incididunt ut labore + - dolore magna aliqua + - enim ad minim veniam + - quis nostrud exercitation ullamco laboris nisi + - aliquip ex ea commodo consequat + """ + pass + + +def under_indent(): + """ + These lines are indented in a way that does not + make sense. + """ + pass + + +def over_indent(): + """ + This has a shallow indent + - But some lines are deeper + - And the closing quote is too deep + """ + pass + + +def single_line(): + """But with a newline after it!""" + pass + + +def this(): + r""" + 'hey ho' + """ + + +def that(): + """ "hey yah" """ + + +def and_that(): + """ + "hey yah" """ + + +def and_this(): + ''' + "hey yah"''' + + +def multiline_whitespace(): + """ """ + + +def oneline_whitespace(): + """ """ + + +def empty(): + """""" + + +def single_quotes(): + "testing" + + +def believe_it_or_not_this_is_in_the_py_stdlib(): + ''' + "hey yah"''' + + +def ignored_docstring(): + """a => \ +b""" + + +def single_line_docstring_with_whitespace(): + """This should be stripped""" + + +def docstring_with_inline_tabs_and_space_indentation(): + """hey + + tab separated value + tab at start of line and then a tab separated value + multiple tabs at the beginning and inline + mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. + + line ends with some tabs + """ + + +def docstring_with_inline_tabs_and_tab_indentation(): + """hey + + tab separated value + tab at start of line and then a tab separated value + multiple tabs at the beginning and inline + mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. + + line ends with some tabs + """ + pass + + +def backslash_space(): + """\ """ + + +def multiline_backslash_1(): + """ + hey\there\ + \ """ + + +def multiline_backslash_2(): + """ + hey there \ """ + + +def multiline_backslash_3(): + """ + already escaped \\""" + + +def my_god_its_full_of_stars_1(): + "I'm sorry Dave\u2001" + + +# the space below is actually a \u2001, removed in output +def my_god_its_full_of_stars_2(): + "I'm sorry Dave" + + +def docstring_almost_at_line_limit(): + """long docstring.................................................................""" + + +def docstring_almost_at_line_limit2(): + """long docstring................................................................. + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def stable_quote_normalization_with_immediate_inner_single_quote(self): + """' + + + """ diff --git a/tests/data/empty_lines.py b/tests/data/simple_cases/empty_lines.py similarity index 100% rename from tests/data/empty_lines.py rename to tests/data/simple_cases/empty_lines.py diff --git a/tests/data/expression.diff b/tests/data/simple_cases/expression.diff similarity index 83% rename from tests/data/expression.diff rename to tests/data/simple_cases/expression.diff index 684f92cd3b7..2eaaeb479f8 100644 --- a/tests/data/expression.diff +++ b/tests/data/simple_cases/expression.diff @@ -11,7 +11,17 @@ True False 1 -@@ -29,63 +29,96 @@ +@@ -21,99 +21,135 @@ + Name1 or (Name2 and Name3) or Name4 + Name1 or Name2 and Name3 or Name4 + v1 << 2 + 1 >> v2 + 1 % finished +-1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 +-((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) ++1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8 ++((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8) + not great ~great +value -1 @@ -19,7 +29,7 @@ (~int) and (not ((v1 ^ (123 + v2)) | True)) -+really ** -confusing ** ~operator ** -precedence -flags & ~ select.EPOLLIN and waiters.write_task is not None -++(really ** -(confusing ** ~(operator ** -precedence))) +++(really ** -(confusing ** ~(operator**-precedence))) +flags & ~select.EPOLLIN and waiters.write_task is not None lambda arg: None lambda a=True: a @@ -88,15 +98,19 @@ + *more, +] {i for i in (1, 2, 3)} - {(i ** 2) for i in (1, 2, 3)} +-{(i ** 2) for i in (1, 2, 3)} -{(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))} -+{(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} - {((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} +-{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} ++{(i**2) for i in (1, 2, 3)} ++{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} ++{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} [i for i in (1, 2, 3)] - [(i ** 2) for i in (1, 2, 3)] +-[(i ** 2) for i in (1, 2, 3)] -[(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))] -+[(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] - [((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] +-[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] ++[(i**2) for i in (1, 2, 3)] ++[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] ++[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] {i: 0 for i in (1, 2, 3)} -{i: j for i, j in ((1, 'a'), (2, 'b'), (3, 'c'))} +{i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} @@ -130,8 +144,11 @@ call(**self.screen_kwargs) call(b, **self.screen_kwargs) lukasz.langa.pl -@@ -94,26 +127,29 @@ - 1.0 .real + call.me(maybe) +-1 .real +-1.0 .real ++(1).real ++(1.0).real ....__class__ list[str] dict[str, int] @@ -166,7 +183,7 @@ slice[0:1:2] slice[:] slice[:-1] -@@ -137,113 +173,180 @@ +@@ -137,118 +173,199 @@ numpy[-(c + 1) :, d] numpy[:, l[-2]] numpy[:, ::-1] @@ -181,10 +198,12 @@ SomeName (Good, Bad, Ugly) (i for i in (1, 2, 3)) - ((i ** 2) for i in (1, 2, 3)) +-((i ** 2) for i in (1, 2, 3)) -((i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))) -+((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) - (((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) +-(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) ++((i**2) for i in (1, 2, 3)) ++((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) ++(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) (*starred,) -{"id": "1","type": "type","started_at": now(),"ended_at": now() + timedelta(days=10),"priority": 1,"import_session_id": 1,**kwargs} +{ @@ -346,6 +365,9 @@ - return True -if ( - ~ aaaaaaaaaaaaaaaa.a + aaaaaaaaaaaaaaaa.b - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n +-): +- return True +-aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaa * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) +a = ( + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp + in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz @@ -400,13 +422,13 @@ + return True +if ( + ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e -+ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n ++ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n +): + return True +if ( + ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e + | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h -+ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n ++ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n +): + return True +if ( @@ -416,8 +438,29 @@ + | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h + ^ aaaaaaaaaaaaaaaa.i + << aaaaaaaaaaaaaaaa.k -+ >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n - ): - return True ++ >> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ++): ++ return True ++( ++ aaaaaaaaaaaaaaaa ++ + aaaaaaaaaaaaaaaa ++ - aaaaaaaaaaaaaaaa ++ * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) ++ / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) ++) + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa +-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++) + bbbb >> bbbb * bbbb +-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++( ++ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ ^ bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++ ^ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ++) last_call() # standalone comment at ENDMARKER diff --git a/tests/data/expression.py b/tests/data/simple_cases/expression.py similarity index 89% rename from tests/data/expression.py rename to tests/data/simple_cases/expression.py index 8e63bdcdf9b..06096c589f1 100644 --- a/tests/data/expression.py +++ b/tests/data/simple_cases/expression.py @@ -245,6 +245,11 @@ async def f(): ~ aaaaaaaaaaaaaaaa.a + aaaaaaaaaaaaaaaa.b - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ): return True +aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaa * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) +aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +bbbb >> bbbb * bbbb +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa last_call() # standalone comment at ENDMARKER @@ -277,15 +282,15 @@ async def f(): v1 << 2 1 >> v2 1 % finished -1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 -((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) +1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8 +((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8) not great ~great +value -1 ~int and not v1 ^ 123 + v2 | True (~int) and (not ((v1 ^ (123 + v2)) | True)) -+(really ** -(confusing ** ~(operator ** -precedence))) ++(really ** -(confusing ** ~(operator**-precedence))) flags & ~select.EPOLLIN and waiters.write_task is not None lambda arg: None lambda a=True: a @@ -342,13 +347,13 @@ async def f(): *more, ] {i for i in (1, 2, 3)} -{(i ** 2) for i in (1, 2, 3)} -{(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} -{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} +{(i**2) for i in (1, 2, 3)} +{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} +{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} [i for i in (1, 2, 3)] -[(i ** 2) for i in (1, 2, 3)] -[(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] -[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] +[(i**2) for i in (1, 2, 3)] +[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] +[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] {i: 0 for i in (1, 2, 3)} {i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} {a: b * 2 for a, b in dictionary.items()} @@ -377,8 +382,8 @@ async def f(): call(b, **self.screen_kwargs) lukasz.langa.pl call.me(maybe) -1 .real -1.0 .real +(1).real +(1.0).real ....__class__ list[str] dict[str, int] @@ -436,9 +441,9 @@ async def f(): SomeName (Good, Bad, Ugly) (i for i in (1, 2, 3)) -((i ** 2) for i in (1, 2, 3)) -((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) -(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) +((i**2) for i in (1, 2, 3)) +((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) +(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) (*starred,) { "id": "1", @@ -583,13 +588,13 @@ async def f(): return True if ( ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e - | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n + | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n ): return True if ( ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h - ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n + ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n ): return True if ( @@ -599,8 +604,27 @@ async def f(): | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k - >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n + >> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ): return True +( + aaaaaaaaaaaaaaaa + + aaaaaaaaaaaaaaaa + - aaaaaaaaaaaaaaaa + * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) + / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) +) +aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa +( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +) +bbbb >> bbbb * bbbb +( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + ^ bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + ^ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +) last_call() # standalone comment at ENDMARKER diff --git a/tests/data/fmtonoff.py b/tests/data/simple_cases/fmtonoff.py similarity index 100% rename from tests/data/fmtonoff.py rename to tests/data/simple_cases/fmtonoff.py diff --git a/tests/data/fmtonoff2.py b/tests/data/simple_cases/fmtonoff2.py similarity index 100% rename from tests/data/fmtonoff2.py rename to tests/data/simple_cases/fmtonoff2.py diff --git a/tests/data/fmtonoff3.py b/tests/data/simple_cases/fmtonoff3.py similarity index 100% rename from tests/data/fmtonoff3.py rename to tests/data/simple_cases/fmtonoff3.py diff --git a/tests/data/fmtonoff4.py b/tests/data/simple_cases/fmtonoff4.py similarity index 100% rename from tests/data/fmtonoff4.py rename to tests/data/simple_cases/fmtonoff4.py diff --git a/tests/data/simple_cases/fmtonoff5.py b/tests/data/simple_cases/fmtonoff5.py new file mode 100644 index 00000000000..71b1381ed0d --- /dev/null +++ b/tests/data/simple_cases/fmtonoff5.py @@ -0,0 +1,158 @@ +# Regression test for https://github.com/psf/black/issues/3129. +setup( + entry_points={ + # fmt: off + "console_scripts": [ + "foo-bar" + "=foo.bar.:main", + # fmt: on + ] # Includes an formatted indentation. + }, +) + + +# Regression test for https://github.com/psf/black/issues/2015. +run( + # fmt: off + [ + "ls", + "-la", + ] + # fmt: on + + path, + check=True, +) + + +# Regression test for https://github.com/psf/black/issues/3026. +def test_func(): + # yapf: disable + if unformatted( args ): + return True + # yapf: enable + elif b: + return True + + return False + + +# Regression test for https://github.com/psf/black/issues/2567. +if True: + # fmt: off + for _ in range( 1 ): + # fmt: on + print ( "This won't be formatted" ) + print ( "This won't be formatted either" ) +else: + print ( "This will be formatted" ) + + +# Regression test for https://github.com/psf/black/issues/3184. +class A: + async def call(param): + if param: + # fmt: off + if param[0:4] in ( + "ABCD", "EFGH" + ) : + # fmt: on + print ( "This won't be formatted" ) + + elif param[0:4] in ("ZZZZ",): + print ( "This won't be formatted either" ) + + print ( "This will be formatted" ) + + +# Regression test for https://github.com/psf/black/issues/2985 +class Named(t.Protocol): + # fmt: off + @property + def this_wont_be_formatted ( self ) -> str: ... + +class Factory(t.Protocol): + def this_will_be_formatted ( self, **kwargs ) -> Named: ... + # fmt: on + + +# output + + +# Regression test for https://github.com/psf/black/issues/3129. +setup( + entry_points={ + # fmt: off + "console_scripts": [ + "foo-bar" + "=foo.bar.:main", + # fmt: on + ] # Includes an formatted indentation. + }, +) + + +# Regression test for https://github.com/psf/black/issues/2015. +run( + # fmt: off + [ + "ls", + "-la", + ] + # fmt: on + + path, + check=True, +) + + +# Regression test for https://github.com/psf/black/issues/3026. +def test_func(): + # yapf: disable + if unformatted( args ): + return True + # yapf: enable + elif b: + return True + + return False + + +# Regression test for https://github.com/psf/black/issues/2567. +if True: + # fmt: off + for _ in range( 1 ): + # fmt: on + print ( "This won't be formatted" ) + print ( "This won't be formatted either" ) +else: + print("This will be formatted") + + +# Regression test for https://github.com/psf/black/issues/3184. +class A: + async def call(param): + if param: + # fmt: off + if param[0:4] in ( + "ABCD", "EFGH" + ) : + # fmt: on + print ( "This won't be formatted" ) + + elif param[0:4] in ("ZZZZ",): + print ( "This won't be formatted either" ) + + print("This will be formatted") + + +# Regression test for https://github.com/psf/black/issues/2985 +class Named(t.Protocol): + # fmt: off + @property + def this_wont_be_formatted ( self ) -> str: ... + + +class Factory(t.Protocol): + def this_will_be_formatted(self, **kwargs) -> Named: + ... + + # fmt: on diff --git a/tests/data/simple_cases/fmtskip.py b/tests/data/simple_cases/fmtskip.py new file mode 100644 index 00000000000..1d5836fc031 --- /dev/null +++ b/tests/data/simple_cases/fmtskip.py @@ -0,0 +1,3 @@ +a, b = 1, 2 +c = 6 # fmt: skip +d = 5 diff --git a/tests/data/simple_cases/fmtskip2.py b/tests/data/simple_cases/fmtskip2.py new file mode 100644 index 00000000000..e6248117aa9 --- /dev/null +++ b/tests/data/simple_cases/fmtskip2.py @@ -0,0 +1,17 @@ +l1 = ["This list should be broken up", "into multiple lines", "because it is way too long"] +l2 = ["But this list shouldn't", "even though it also has", "way too many characters in it"] # fmt: skip +l3 = ["I have", "trailing comma", "so I should be braked",] + +# output + +l1 = [ + "This list should be broken up", + "into multiple lines", + "because it is way too long", +] +l2 = ["But this list shouldn't", "even though it also has", "way too many characters in it"] # fmt: skip +l3 = [ + "I have", + "trailing comma", + "so I should be braked", +] \ No newline at end of file diff --git a/tests/data/simple_cases/fmtskip3.py b/tests/data/simple_cases/fmtskip3.py new file mode 100644 index 00000000000..6e166888e21 --- /dev/null +++ b/tests/data/simple_cases/fmtskip3.py @@ -0,0 +1,20 @@ +a = 3 +# fmt: off +b, c = 1, 2 +d = 6 # fmt: skip +e = 5 +# fmt: on +f = ["This is a very long line that should be formatted into a clearer line ", "by rearranging."] + +# output + +a = 3 +# fmt: off +b, c = 1, 2 +d = 6 # fmt: skip +e = 5 +# fmt: on +f = [ + "This is a very long line that should be formatted into a clearer line ", + "by rearranging.", +] diff --git a/tests/data/simple_cases/fmtskip4.py b/tests/data/simple_cases/fmtskip4.py new file mode 100644 index 00000000000..aadd77d0e53 --- /dev/null +++ b/tests/data/simple_cases/fmtskip4.py @@ -0,0 +1,13 @@ +a = 2 +# fmt: skip +l = [1, 2, 3,] + +# output + +a = 2 +# fmt: skip +l = [ + 1, + 2, + 3, +] \ No newline at end of file diff --git a/tests/data/simple_cases/fmtskip5.py b/tests/data/simple_cases/fmtskip5.py new file mode 100644 index 00000000000..d7b15e0ff41 --- /dev/null +++ b/tests/data/simple_cases/fmtskip5.py @@ -0,0 +1,22 @@ +a, b, c = 3, 4, 5 +if ( + a == 3 + and b != 9 # fmt: skip + and c is not None +): + print("I'm good!") +else: + print("I'm bad") + + +# output + +a, b, c = 3, 4, 5 +if ( + a == 3 + and b != 9 # fmt: skip + and c is not None +): + print("I'm good!") +else: + print("I'm bad") diff --git a/tests/data/simple_cases/fmtskip6.py b/tests/data/simple_cases/fmtskip6.py new file mode 100644 index 00000000000..0a779fcee00 --- /dev/null +++ b/tests/data/simple_cases/fmtskip6.py @@ -0,0 +1,13 @@ +class A: + def f(self): + for line in range(10): + if True: + pass # fmt: skip + +# output + +class A: + def f(self): + for line in range(10): + if True: + pass # fmt: skip diff --git a/tests/data/simple_cases/fmtskip7.py b/tests/data/simple_cases/fmtskip7.py new file mode 100644 index 00000000000..15ac0ad7080 --- /dev/null +++ b/tests/data/simple_cases/fmtskip7.py @@ -0,0 +1,11 @@ +a = "this is some code" +b = 5 #fmt:skip +c = 9 #fmt: skip +d = "thisisasuperlongstringthisisasuperlongstringthisisasuperlongstringthisisasuperlongstring" #fmt:skip + +# output + +a = "this is some code" +b = 5 # fmt:skip +c = 9 # fmt: skip +d = "thisisasuperlongstringthisisasuperlongstringthisisasuperlongstringthisisasuperlongstring" # fmt:skip diff --git a/tests/data/simple_cases/fmtskip8.py b/tests/data/simple_cases/fmtskip8.py new file mode 100644 index 00000000000..38e9c2a9f47 --- /dev/null +++ b/tests/data/simple_cases/fmtskip8.py @@ -0,0 +1,62 @@ +# Make sure a leading comment is not removed. +def some_func( unformatted, args ): # fmt: skip + print("I am some_func") + return 0 + # Make sure this comment is not removed. + + +# Make sure a leading comment is not removed. +async def some_async_func( unformatted, args): # fmt: skip + print("I am some_async_func") + await asyncio.sleep(1) + + +# Make sure a leading comment is not removed. +class SomeClass( Unformatted, SuperClasses ): # fmt: skip + def some_method( self, unformatted, args ): # fmt: skip + print("I am some_method") + return 0 + + async def some_async_method( self, unformatted, args ): # fmt: skip + print("I am some_async_method") + await asyncio.sleep(1) + + +# Make sure a leading comment is not removed. +if unformatted_call( args ): # fmt: skip + print("First branch") + # Make sure this is not removed. +elif another_unformatted_call( args ): # fmt: skip + print("Second branch") +else : # fmt: skip + print("Last branch") + + +while some_condition( unformatted, args ): # fmt: skip + print("Do something") + + +for i in some_iter( unformatted, args ): # fmt: skip + print("Do something") + + +async def test_async_for(): + async for i in some_async_iter( unformatted, args ): # fmt: skip + print("Do something") + + +try : # fmt: skip + some_call() +except UnformattedError as ex: # fmt: skip + handle_exception() +finally : # fmt: skip + finally_call() + + +with give_me_context( unformatted, args ): # fmt: skip + print("Do something") + + +async def test_async_with(): + async with give_me_async_context( unformatted, args ): # fmt: skip + print("Do something") diff --git a/tests/data/fstring.py b/tests/data/simple_cases/fstring.py similarity index 100% rename from tests/data/fstring.py rename to tests/data/simple_cases/fstring.py diff --git a/tests/data/function.py b/tests/data/simple_cases/function.py similarity index 100% rename from tests/data/function.py rename to tests/data/simple_cases/function.py diff --git a/tests/data/function2.py b/tests/data/simple_cases/function2.py similarity index 52% rename from tests/data/function2.py rename to tests/data/simple_cases/function2.py index cfc259ea7bd..5bb36c26318 100644 --- a/tests/data/function2.py +++ b/tests/data/simple_cases/function2.py @@ -23,6 +23,35 @@ def inner(): pass print("Inner defs should breathe a little.") + +if os.name == "posix": + import termios + def i_should_be_followed_by_only_one_newline(): + pass +elif os.name == "nt": + try: + import msvcrt + def i_should_be_followed_by_only_one_newline(): + pass + + except ImportError: + + def i_should_be_followed_by_only_one_newline(): + pass + +elif False: + + class IHopeYouAreHavingALovelyDay: + def __call__(self): + print("i_should_be_followed_by_only_one_newline") +else: + + def foo(): + pass + +with hmm_but_this_should_get_two_preceding_newlines(): + pass + # output def f( @@ -56,3 +85,37 @@ def inner(): pass print("Inner defs should breathe a little.") + + +if os.name == "posix": + import termios + + def i_should_be_followed_by_only_one_newline(): + pass + +elif os.name == "nt": + try: + import msvcrt + + def i_should_be_followed_by_only_one_newline(): + pass + + except ImportError: + + def i_should_be_followed_by_only_one_newline(): + pass + +elif False: + + class IHopeYouAreHavingALovelyDay: + def __call__(self): + print("i_should_be_followed_by_only_one_newline") + +else: + + def foo(): + pass + + +with hmm_but_this_should_get_two_preceding_newlines(): + pass diff --git a/tests/data/simple_cases/function_trailing_comma.py b/tests/data/simple_cases/function_trailing_comma.py new file mode 100644 index 00000000000..429eb0e330f --- /dev/null +++ b/tests/data/simple_cases/function_trailing_comma.py @@ -0,0 +1,153 @@ +def f(a,): + d = {'key': 'value',} + tup = (1,) + +def f2(a,b,): + d = {'key': 'value', 'key2': 'value2',} + tup = (1,2,) + +def f(a:int=1,): + call(arg={'explode': 'this',}) + call2(arg=[1,2,3],) + x = { + "a": 1, + "b": 2, + }["a"] + if a == {"a": 1,"b": 2,"c": 3,"d": 4,"e": 5,"f": 6,"g": 7,"h": 8,}["a"]: + pass + +def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +]: + json = {"k": {"k2": {"k3": [1,]}}} + + + +# The type annotation shouldn't get a trailing comma since that would change its type. +# Relevant bug report: https://github.com/psf/black/issues/2381. +def some_function_with_a_really_long_name() -> ( + returning_a_deeply_nested_import_of_a_type_i_suppose +): + pass + + +def some_method_with_a_really_long_name(very_long_parameter_so_yeah: str, another_long_parameter: int) -> ( + another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not +): + pass + + +def func() -> ( + also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black(this_shouldn_t_get_a_trailing_comma_too) +): + pass + + +def func() -> ((also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black( + this_shouldn_t_get_a_trailing_comma_too + )) +): + pass + +# output + +def f( + a, +): + d = { + "key": "value", + } + tup = (1,) + + +def f2( + a, + b, +): + d = { + "key": "value", + "key2": "value2", + } + tup = ( + 1, + 2, + ) + + +def f( + a: int = 1, +): + call( + arg={ + "explode": "this", + } + ) + call2( + arg=[1, 2, 3], + ) + x = { + "a": 1, + "b": 2, + }["a"] + if ( + a + == { + "a": 1, + "b": 2, + "c": 3, + "d": 4, + "e": 5, + "f": 6, + "g": 7, + "h": 8, + }["a"] + ): + pass + + +def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +]: + json = { + "k": { + "k2": { + "k3": [ + 1, + ] + } + } + } + + +# The type annotation shouldn't get a trailing comma since that would change its type. +# Relevant bug report: https://github.com/psf/black/issues/2381. +def some_function_with_a_really_long_name() -> ( + returning_a_deeply_nested_import_of_a_type_i_suppose +): + pass + + +def some_method_with_a_really_long_name( + very_long_parameter_so_yeah: str, another_long_parameter: int +) -> ( + another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not +): + pass + + +def func() -> ( + also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black( + this_shouldn_t_get_a_trailing_comma_too + ) +): + pass + + +def func() -> ( + ( + also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black( + this_shouldn_t_get_a_trailing_comma_too + ) + ) +): + pass diff --git a/tests/data/import_spacing.py b/tests/data/simple_cases/import_spacing.py similarity index 100% rename from tests/data/import_spacing.py rename to tests/data/simple_cases/import_spacing.py diff --git a/tests/data/simple_cases/power_op_spacing.py b/tests/data/simple_cases/power_op_spacing.py new file mode 100644 index 00000000000..c95fa788fc3 --- /dev/null +++ b/tests/data/simple_cases/power_op_spacing.py @@ -0,0 +1,131 @@ +def function(**kwargs): + t = a**2 + b**3 + return t ** 2 + + +def function_replace_spaces(**kwargs): + t = a **2 + b** 3 + c ** 4 + + +def function_dont_replace_spaces(): + {**a, **b, **c} + + +a = 5**~4 +b = 5 ** f() +c = -(5**2) +d = 5 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5 +g = a.b**c.d +h = 5 ** funcs.f() +i = funcs.f() ** 5 +j = super().name ** 5 +k = [(2**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2**63], [1, 2**63])] +n = count <= 10**5 +o = settings(max_examples=10**6) +p = {(k, k**2): v**2 for k, v in pairs} +q = [10**i for i in range(6)] +r = x**y + +a = 5.0**~4.0 +b = 5.0 ** f() +c = -(5.0**2.0) +d = 5.0 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5.0 +g = a.b**c.d +h = 5.0 ** funcs.f() +i = funcs.f() ** 5.0 +j = super().name ** 5.0 +k = [(2.0**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2.0**63.0], [1.0, 2**63.0])] +n = count <= 10**5.0 +o = settings(max_examples=10**6.0) +p = {(k, k**2): v**2.0 for k, v in pairs} +q = [10.5**i for i in range(6)] + + +# WE SHOULD DEFINITELY NOT EAT THESE COMMENTS (https://github.com/psf/black/issues/2873) +if hasattr(view, "sum_of_weights"): + return np.divide( # type: ignore[no-any-return] + view.variance, # type: ignore[union-attr] + view.sum_of_weights, # type: ignore[union-attr] + out=np.full(view.sum_of_weights.shape, np.nan), # type: ignore[union-attr] + where=view.sum_of_weights**2 > view.sum_of_weights_squared, # type: ignore[union-attr] + ) + +return np.divide( + where=view.sum_of_weights_of_weight_long**2 > view.sum_of_weights_squared, # type: ignore +) + + +# output + + +def function(**kwargs): + t = a**2 + b**3 + return t**2 + + +def function_replace_spaces(**kwargs): + t = a**2 + b**3 + c**4 + + +def function_dont_replace_spaces(): + {**a, **b, **c} + + +a = 5**~4 +b = 5 ** f() +c = -(5**2) +d = 5 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5 +g = a.b**c.d +h = 5 ** funcs.f() +i = funcs.f() ** 5 +j = super().name ** 5 +k = [(2**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2**63], [1, 2**63])] +n = count <= 10**5 +o = settings(max_examples=10**6) +p = {(k, k**2): v**2 for k, v in pairs} +q = [10**i for i in range(6)] +r = x**y + +a = 5.0**~4.0 +b = 5.0 ** f() +c = -(5.0**2.0) +d = 5.0 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5.0 +g = a.b**c.d +h = 5.0 ** funcs.f() +i = funcs.f() ** 5.0 +j = super().name ** 5.0 +k = [(2.0**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2.0**63.0], [1.0, 2**63.0])] +n = count <= 10**5.0 +o = settings(max_examples=10**6.0) +p = {(k, k**2): v**2.0 for k, v in pairs} +q = [10.5**i for i in range(6)] + + +# WE SHOULD DEFINITELY NOT EAT THESE COMMENTS (https://github.com/psf/black/issues/2873) +if hasattr(view, "sum_of_weights"): + return np.divide( # type: ignore[no-any-return] + view.variance, # type: ignore[union-attr] + view.sum_of_weights, # type: ignore[union-attr] + out=np.full(view.sum_of_weights.shape, np.nan), # type: ignore[union-attr] + where=view.sum_of_weights**2 > view.sum_of_weights_squared, # type: ignore[union-attr] + ) + +return np.divide( + where=view.sum_of_weights_of_weight_long**2 > view.sum_of_weights_squared, # type: ignore +) diff --git a/tests/data/remove_parens.py b/tests/data/simple_cases/remove_parens.py similarity index 99% rename from tests/data/remove_parens.py rename to tests/data/simple_cases/remove_parens.py index afc34010c30..abd5f71fcd0 100644 --- a/tests/data/remove_parens.py +++ b/tests/data/simple_cases/remove_parens.py @@ -54,7 +54,6 @@ def example7(): def example8(): return (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((None))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - # output x = 1 x = 1.2 @@ -141,4 +140,3 @@ def example7(): def example8(): return None - diff --git a/tests/data/slices.py b/tests/data/simple_cases/slices.py similarity index 94% rename from tests/data/slices.py rename to tests/data/simple_cases/slices.py index 7a42678f646..165117cdcb4 100644 --- a/tests/data/slices.py +++ b/tests/data/simple_cases/slices.py @@ -9,7 +9,7 @@ slice[:c, c - 1] slice[c, c + 1, d::] slice[ham[c::d] :: 1] -slice[ham[cheese ** 2 : -1] : 1 : 1, ham[1:2]] +slice[ham[cheese**2 : -1] : 1 : 1, ham[1:2]] slice[:-1:] slice[lambda: None : lambda: None] slice[lambda x, y, *args, really=2, **kwargs: None :, None::] diff --git a/tests/data/simple_cases/string_prefixes.py b/tests/data/simple_cases/string_prefixes.py new file mode 100644 index 00000000000..f86da696e15 --- /dev/null +++ b/tests/data/simple_cases/string_prefixes.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +name = "Łukasz" +(f"hello {name}", F"hello {name}") +(b"", B"") +(u"", U"") +(r"", R"") + +(rf"", fr"", Rf"", fR"", rF"", Fr"", RF"", FR"") +(rb"", br"", Rb"", bR"", rB"", Br"", RB"", BR"") + + +def docstring_singleline(): + R"""2020 was one hell of a year. The good news is that we were able to""" + + +def docstring_multiline(): + R""" + clear out all of the issues opened in that time :p + """ + + +# output + + +#!/usr/bin/env python3 + +name = "Łukasz" +(f"hello {name}", f"hello {name}") +(b"", b"") +("", "") +(r"", R"") + +(rf"", rf"", Rf"", Rf"", rf"", rf"", Rf"", Rf"") +(rb"", rb"", Rb"", Rb"", rb"", rb"", Rb"", Rb"") + + +def docstring_singleline(): + R"""2020 was one hell of a year. The good news is that we were able to""" + + +def docstring_multiline(): + R""" + clear out all of the issues opened in that time :p + """ diff --git a/tests/data/simple_cases/torture.py b/tests/data/simple_cases/torture.py new file mode 100644 index 00000000000..2a194759a82 --- /dev/null +++ b/tests/data/simple_cases/torture.py @@ -0,0 +1,91 @@ +importA;() << 0 ** 101234234242352525425252352352525234890264906820496920680926538059059209922523523525 # + +assert sort_by_dependency( + { + "1": {"2", "3"}, "2": {"2a", "2b"}, "3": {"3a", "3b"}, + "2a": set(), "2b": set(), "3a": set(), "3b": set() + } +) == ["2a", "2b", "2", "3a", "3b", "3", "1"] + +importA +0;0^0# + +class A: + def foo(self): + for _ in range(10): + aaaaaaaaaaaaaaaaaaa = bbbbbbbbbbbbbbb.cccccccccc( # pylint: disable=no-member + xxxxxxxxxxxx + ) + +def test(self, othr): + return (1 == 2 and + (name, description, self.default, self.selected, self.auto_generated, self.parameters, self.meta_data, self.schedule) == + (name, description, othr.default, othr.selected, othr.auto_generated, othr.parameters, othr.meta_data, othr.schedule)) + + +assert ( + a_function(very_long_arguments_that_surpass_the_limit, which_is_eighty_eight_in_this_case_plus_a_bit_more) + == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"} +) + +# output + +importA +( + () + << 0 + ** 101234234242352525425252352352525234890264906820496920680926538059059209922523523525 +) # + +assert sort_by_dependency( + { + "1": {"2", "3"}, + "2": {"2a", "2b"}, + "3": {"3a", "3b"}, + "2a": set(), + "2b": set(), + "3a": set(), + "3b": set(), + } +) == ["2a", "2b", "2", "3a", "3b", "3", "1"] + +importA +0 +0 ^ 0 # + + +class A: + def foo(self): + for _ in range(10): + aaaaaaaaaaaaaaaaaaa = bbbbbbbbbbbbbbb.cccccccccc( + xxxxxxxxxxxx + ) # pylint: disable=no-member + + +def test(self, othr): + return 1 == 2 and ( + name, + description, + self.default, + self.selected, + self.auto_generated, + self.parameters, + self.meta_data, + self.schedule, + ) == ( + name, + description, + othr.default, + othr.selected, + othr.auto_generated, + othr.parameters, + othr.meta_data, + othr.schedule, + ) + + +assert a_function( + very_long_arguments_that_surpass_the_limit, + which_is_eighty_eight_in_this_case_plus_a_bit_more, +) == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"} + diff --git a/tests/data/simple_cases/trailing_comma_optional_parens1.py b/tests/data/simple_cases/trailing_comma_optional_parens1.py new file mode 100644 index 00000000000..85aa8badb26 --- /dev/null +++ b/tests/data/simple_cases/trailing_comma_optional_parens1.py @@ -0,0 +1,63 @@ +if e1234123412341234.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + pass + +if x: + if y: + new_id = max(Vegetable.objects.order_by('-id')[0].id, + Mineral.objects.order_by('-id')[0].id) + 1 + +class X: + def get_help_text(self): + return ngettext( + "Your password must contain at least %(min_length)d character.", + "Your password must contain at least %(min_length)d characters.", + self.min_length, + ) % {'min_length': self.min_length} + +class A: + def b(self): + if self.connection.mysql_is_mariadb and ( + 10, + 4, + 3, + ) < self.connection.mysql_version < (10, 5, 2): + pass + + +# output + +if e1234123412341234.winerror not in ( + _winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY, +) or _check_timeout(t): + pass + +if x: + if y: + new_id = ( + max( + Vegetable.objects.order_by("-id")[0].id, + Mineral.objects.order_by("-id")[0].id, + ) + + 1 + ) + + +class X: + def get_help_text(self): + return ngettext( + "Your password must contain at least %(min_length)d character.", + "Your password must contain at least %(min_length)d characters.", + self.min_length, + ) % {"min_length": self.min_length} + + +class A: + def b(self): + if self.connection.mysql_is_mariadb and ( + 10, + 4, + 3, + ) < self.connection.mysql_version < (10, 5, 2): + pass diff --git a/tests/data/simple_cases/trailing_comma_optional_parens2.py b/tests/data/simple_cases/trailing_comma_optional_parens2.py new file mode 100644 index 00000000000..9541670e394 --- /dev/null +++ b/tests/data/simple_cases/trailing_comma_optional_parens2.py @@ -0,0 +1,12 @@ +if (e123456.get_tk_patchlevel() >= (8, 6, 0, 'final') or + (8, 5, 8) <= get_tk_patchlevel() < (8, 6)): + pass + +# output + +if e123456.get_tk_patchlevel() >= (8, 6, 0, "final") or ( + 8, + 5, + 8, +) <= get_tk_patchlevel() < (8, 6): + pass diff --git a/tests/data/simple_cases/trailing_comma_optional_parens3.py b/tests/data/simple_cases/trailing_comma_optional_parens3.py new file mode 100644 index 00000000000..c0ed699e6a6 --- /dev/null +++ b/tests/data/simple_cases/trailing_comma_optional_parens3.py @@ -0,0 +1,21 @@ +if True: + if True: + if True: + return _( + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweas " + + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwegqweasdzxcqweasdzxc.", + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwe", + ) % {"reported_username": reported_username, "report_reason": report_reason} + + +# output + + +if True: + if True: + if True: + return _( + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweas " + + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwegqweasdzxcqweasdzxc.", + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwe", + ) % {"reported_username": reported_username, "report_reason": report_reason} diff --git a/tests/data/tricky_unicode_symbols.py b/tests/data/simple_cases/tricky_unicode_symbols.py similarity index 76% rename from tests/data/tricky_unicode_symbols.py rename to tests/data/simple_cases/tricky_unicode_symbols.py index 366a92fa9d4..ad8b6108590 100644 --- a/tests/data/tricky_unicode_symbols.py +++ b/tests/data/simple_cases/tricky_unicode_symbols.py @@ -4,3 +4,6 @@ x󠄀 = 4 មុ = 1 Q̇_per_meter = 4 + +A᧚ = 3 +A፩ = 8 diff --git a/tests/data/tupleassign.py b/tests/data/simple_cases/tupleassign.py similarity index 100% rename from tests/data/tupleassign.py rename to tests/data/simple_cases/tupleassign.py diff --git a/tests/data/string_prefixes.py b/tests/data/string_prefixes.py deleted file mode 100644 index 0ca3686a2b6..00000000000 --- a/tests/data/string_prefixes.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python3.6 - -name = R"Łukasz" -F"hello {name}" -B"hello" -r"hello" -fR"hello" - -# output - - -#!/usr/bin/env python3.6 - -name = R"Łukasz" -f"hello {name}" -b"hello" -r"hello" -fR"hello" diff --git a/tests/data/stub.pyi b/tests/data/stub.pyi deleted file mode 100644 index 94ba852e018..00000000000 --- a/tests/data/stub.pyi +++ /dev/null @@ -1,35 +0,0 @@ -X: int - -def f(): ... - -class C: - ... - -class B: - ... - -class A: - def f(self) -> int: - ... - - def g(self) -> str: ... - -def g(): - ... - -def h(): ... - -# output -X: int - -def f(): ... - -class C: ... -class B: ... - -class A: - def f(self) -> int: ... - def g(self) -> str: ... - -def g(): ... -def h(): ... diff --git a/tests/optional.py b/tests/optional.py new file mode 100644 index 00000000000..8a39cc440a6 --- /dev/null +++ b/tests/optional.py @@ -0,0 +1,124 @@ +""" +Allows configuring optional test markers in config, see pyproject.toml. + +Run optional tests with `pytest --run-optional=...`. + +Mark tests to run only if an optional test ISN'T selected by prepending the mark with +"no_". + +You can specify a "no_" prefix straight in config, in which case you can mark tests +to run when this tests ISN'T selected by omitting the "no_" prefix. + +Specifying the name of the default behavior in `--run-optional=` is harmless. + +Adapted from https://pypi.org/project/pytest-optional-tests/, (c) 2019 Reece Hart +""" + +import itertools +import logging +import re +from functools import lru_cache +from typing import TYPE_CHECKING, FrozenSet, List, Set + +import pytest + +try: + from pytest import StashKey +except ImportError: + # pytest < 7 + from _pytest.store import StoreKey as StashKey # type: ignore[no-redef] + +log = logging.getLogger(__name__) + + +if TYPE_CHECKING: + from _pytest.config import Config + from _pytest.config.argparsing import Parser + from _pytest.mark.structures import MarkDecorator + from _pytest.nodes import Node + + +ALL_POSSIBLE_OPTIONAL_MARKERS = StashKey[FrozenSet[str]]() +ENABLED_OPTIONAL_MARKERS = StashKey[FrozenSet[str]]() + + +def pytest_addoption(parser: "Parser") -> None: + group = parser.getgroup("collect") + group.addoption( + "--run-optional", + action="append", + dest="run_optional", + default=None, + help="Optional test markers to run; comma-separated", + ) + parser.addini("optional-tests", "List of optional tests markers", "linelist") + + +def pytest_configure(config: "Config") -> None: + """Optional tests are markers. + + Use the syntax in https://docs.pytest.org/en/stable/mark.html#registering-marks. + """ + ot_ini = config.inicfg.get("optional-tests") or [] + ot_markers = set() + ot_run: Set[str] = set() + if isinstance(ot_ini, str): + ot_ini = ot_ini.strip().split("\n") + marker_re = re.compile(r"^\s*(?Pno_)?(?P\w+)(:\s*(?P.*))?") + for ot in ot_ini: + m = marker_re.match(ot) + if not m: + raise ValueError(f"{ot!r} doesn't match pytest marker syntax") + + marker = (m.group("no") or "") + m.group("marker") + description = m.group("description") + config.addinivalue_line("markers", f"{marker}: {description}") + config.addinivalue_line( + "markers", f"{no(marker)}: run when `{marker}` not passed" + ) + ot_markers.add(marker) + + # collect requested optional tests + passed_args = config.getoption("run_optional") + if passed_args: + ot_run.update(itertools.chain.from_iterable(a.split(",") for a in passed_args)) + ot_run |= {no(excluded) for excluded in ot_markers - ot_run} + ot_markers |= {no(m) for m in ot_markers} + + log.info("optional tests to run:", ot_run) + unknown_tests = ot_run - ot_markers + if unknown_tests: + raise ValueError(f"Unknown optional tests wanted: {unknown_tests!r}") + + store = config._store + store[ALL_POSSIBLE_OPTIONAL_MARKERS] = frozenset(ot_markers) + store[ENABLED_OPTIONAL_MARKERS] = frozenset(ot_run) + + +def pytest_collection_modifyitems(config: "Config", items: "List[Node]") -> None: + store = config._store + all_possible_optional_markers = store[ALL_POSSIBLE_OPTIONAL_MARKERS] + enabled_optional_markers = store[ENABLED_OPTIONAL_MARKERS] + + for item in items: + all_markers_on_test = {m.name for m in item.iter_markers()} + optional_markers_on_test = all_markers_on_test & all_possible_optional_markers + if not optional_markers_on_test or ( + optional_markers_on_test & enabled_optional_markers + ): + continue + log.info("skipping non-requested optional", item) + item.add_marker(skip_mark(frozenset(optional_markers_on_test))) + + +@lru_cache() +def skip_mark(tests: FrozenSet[str]) -> "MarkDecorator": + names = ", ".join(sorted(tests)) + return pytest.mark.skip(reason=f"Marked with disabled optional tests ({names})") + + +@lru_cache() +def no(name: str) -> str: + if name.startswith("no_"): + return name[len("no_") :] + return "no_" + name diff --git a/tests/test.toml b/tests/test.toml index 405c00ce2c3..e5fb9228f19 100644 --- a/tests/test.toml +++ b/tests/test.toml @@ -7,4 +7,11 @@ line-length = 79 target-version = ["py36", "py37", "py38"] exclude='\.pyi?$' include='\.py?$' +python-cell-magics = ["custom1", "custom2"] +[v1.0.0-syntax] +# This shouldn't break Black. +contributors = [ + "Foo Bar ", + { name = "Baz Qux", email = "bazqux@example.com", url = "https://example.com/bazqux" } +] diff --git a/tests/test_black.py b/tests/test_black.py index f5d4e1115a8..7f85fcdc409 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -1,94 +1,79 @@ #!/usr/bin/env python3 + import asyncio +import inspect +import io import logging +import multiprocessing +import os +import re +import sys +import types +import unittest from concurrent.futures import ThreadPoolExecutor -from contextlib import contextmanager +from contextlib import contextmanager, redirect_stderr from dataclasses import replace -from functools import partial -import inspect -from io import BytesIO, TextIOWrapper -import os +from io import BytesIO from pathlib import Path -import regex as re -import sys +from platform import system from tempfile import TemporaryDirectory -import types from typing import ( Any, - BinaryIO, Callable, Dict, - Generator, - List, - Tuple, Iterator, + List, + Optional, + Sequence, TypeVar, + Union, ) -import unittest -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch import click +import pytest from click import unstyle from click.testing import CliRunner +from pathspec import PathSpec import black +import black.files from black import Feature, TargetVersion - -try: - import blackd - from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop - from aiohttp import web -except ImportError: - has_blackd_deps = False -else: - has_blackd_deps = True - -from pathspec import PathSpec +from black import re_compile_maybe_verbose as compile_pattern +from black.cache import get_cache_dir, get_cache_file +from black.debug import DebugVisitor +from black.output import color_diff, diff +from black.report import Report # Import other test classes -from .test_primer import PrimerCLITests # noqa: F401 - +from tests.util import ( + DATA_DIR, + DEFAULT_MODE, + DETERMINISTIC_HEADER, + PROJECT_ROOT, + PY36_VERSIONS, + THIS_DIR, + BlackBaseTestCase, + assert_format, + change_directory, + dump_to_stderr, + ff, + fs, + get_case_path, + read_data, + read_data_from_file, +) -DEFAULT_MODE = black.FileMode(experimental_string_processing=True) -ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True) -fs = partial(black.format_str, mode=DEFAULT_MODE) THIS_FILE = Path(__file__) -THIS_DIR = THIS_FILE.parent -PROJECT_ROOT = THIS_DIR.parent -DETERMINISTIC_HEADER = "[Deterministic header]" -EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)" -PY36_ARGS = [ - f"--target-version={version.name.lower()}" for version in black.PY36_VERSIONS -] +EMPTY_CONFIG = THIS_DIR / "data" / "empty_pyproject.toml" +PY36_ARGS = [f"--target-version={version.name.lower()}" for version in PY36_VERSIONS] +DEFAULT_EXCLUDE = black.re_compile_maybe_verbose(black.const.DEFAULT_EXCLUDES) +DEFAULT_INCLUDE = black.re_compile_maybe_verbose(black.const.DEFAULT_INCLUDES) T = TypeVar("T") R = TypeVar("R") - -def dump_to_stderr(*output: str) -> str: - return "\n" + "\n".join(output) + "\n" - - -def read_data(name: str, data: bool = True) -> Tuple[str, str]: - """read_data('test_name') -> 'input', 'output'""" - if not name.endswith((".py", ".pyi", ".out", ".diff")): - name += ".py" - _input: List[str] = [] - _output: List[str] = [] - base_dir = THIS_DIR / "data" if data else PROJECT_ROOT - with open(base_dir / name, "r", encoding="utf8") as test: - lines = test.readlines() - result = _input - for line in lines: - line = line.replace(EMPTY_LINE, "") - if line.rstrip() == "# output": - result = _output - continue - - result.append(line) - if _input and not _output: - # If there's no output marker, treat the entire file as already pre-formatted. - _output = _input[:] - return "".join(_input).strip() + "\n", "".join(_output).strip() + "\n" +# Match the time output in a diff, but nothing else +DIFF_TIME = re.compile(r"\t[\d\-:+\. ]+") @contextmanager @@ -97,7 +82,7 @@ def cache_dir(exists: bool = True) -> Iterator[Path]: cache_dir = Path(workspace) if not exists: cache_dir = cache_dir / "new" - with patch("black.CACHE_DIR", cache_dir): + with patch("black.cache.CACHE_DIR", cache_dir): yield cache_dir @@ -113,22 +98,13 @@ def event_loop() -> Iterator[None]: loop.close() -@contextmanager -def skip_if_exception(e: str) -> Iterator[None]: - try: - yield - except Exception as exc: - if exc.__class__.__name__ == e: - unittest.skip(f"Encountered expected exception {exc}, skipping") - else: - raise - - class FakeContext(click.Context): """A fake click Context for when calling functions that need it.""" def __init__(self) -> None: self.default_map: Dict[str, Any] = {} + # Dummy root, since most of the tests don't care about it + self.obj: Dict[str, Any] = {"root": PROJECT_ROOT} class FakeParameter(click.Parameter): @@ -139,88 +115,32 @@ def __init__(self) -> None: class BlackRunner(CliRunner): - """Modify CliRunner so that stderr is not merged with stdout. - - This is a hack that can be removed once we depend on Click 7.x""" + """Make sure STDOUT and STDERR are kept separate when testing Black via its CLI.""" def __init__(self) -> None: - self.stderrbuf = BytesIO() - self.stdoutbuf = BytesIO() - self.stdout_bytes = b"" - self.stderr_bytes = b"" - super().__init__() - - @contextmanager - def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None]: - with super().isolation(*args, **kwargs) as output: - try: - hold_stderr = sys.stderr - sys.stderr = TextIOWrapper(self.stderrbuf, encoding=self.charset) - yield output - finally: - self.stdout_bytes = sys.stdout.buffer.getvalue() # type: ignore - self.stderr_bytes = sys.stderr.buffer.getvalue() # type: ignore - sys.stderr = hold_stderr - - -class BlackTestCase(unittest.TestCase): - maxDiff = None - _diffThreshold = 2 ** 20 - - def assertFormatEqual(self, expected: str, actual: str) -> None: - if actual != expected and not os.environ.get("SKIP_AST_PRINT"): - bdv: black.DebugVisitor[Any] - black.out("Expected tree:", fg="green") - try: - exp_node = black.lib2to3_parse(expected) - bdv = black.DebugVisitor() - list(bdv.visit(exp_node)) - except Exception as ve: - black.err(str(ve)) - black.out("Actual tree:", fg="red") - try: - exp_node = black.lib2to3_parse(actual) - bdv = black.DebugVisitor() - list(bdv.visit(exp_node)) - except Exception as ve: - black.err(str(ve)) - self.assertMultiLineEqual(expected, actual) - - def invokeBlack( - self, args: List[str], exit_code: int = 0, ignore_config: bool = True - ) -> None: - runner = BlackRunner() - if ignore_config: - args = ["--verbose", "--config", str(THIS_DIR / "empty.toml"), *args] - result = runner.invoke(black.main, args) - self.assertEqual( - result.exit_code, - exit_code, - msg=( - f"Failed with args: {args}\n" - f"stdout: {runner.stdout_bytes.decode()!r}\n" - f"stderr: {runner.stderr_bytes.decode()!r}\n" - f"exception: {result.exception}" - ), - ) - - @patch("black.dump_to_file", dump_to_stderr) - def checkSourceFile(self, name: str, mode: black.FileMode = DEFAULT_MODE) -> None: - path = THIS_DIR.parent / name - source, expected = read_data(str(path), data=False) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) - self.assertFalse(ff(path)) - - @patch("black.dump_to_file", dump_to_stderr) - def test_empty(self) -> None: - source = expected = "" - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + super().__init__(mix_stderr=False) + + +def invokeBlack( + args: List[str], exit_code: int = 0, ignore_config: bool = True +) -> None: + runner = BlackRunner() + if ignore_config: + args = ["--verbose", "--config", str(THIS_DIR / "empty.toml"), *args] + result = runner.invoke(black.main, args, catch_exceptions=False) + assert result.stdout_bytes is not None + assert result.stderr_bytes is not None + msg = ( + f"Failed with args: {args}\n" + f"stdout: {result.stdout_bytes.decode()!r}\n" + f"stderr: {result.stderr_bytes.decode()!r}\n" + f"exception: {result.exception}" + ) + assert result.exit_code == exit_code, msg + + +class BlackTestCase(BlackBaseTestCase): + invokeBlack = staticmethod(invokeBlack) def test_empty_ff(self) -> None: expected = "" @@ -233,71 +153,42 @@ def test_empty_ff(self) -> None: os.unlink(tmp_file) self.assertFormatEqual(expected, actual) - def test_self(self) -> None: - self.checkSourceFile("tests/test_black.py") - - def test_black(self) -> None: - self.checkSourceFile("src/black/__init__.py") - - def test_pygram(self) -> None: - self.checkSourceFile("src/blib2to3/pygram.py") - - def test_pytree(self) -> None: - self.checkSourceFile("src/blib2to3/pytree.py") - - def test_conv(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/conv.py") - - def test_driver(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/driver.py") - - def test_grammar(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/grammar.py") - - def test_literals(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/literals.py") - - def test_parse(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/parse.py") - - def test_pgen(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/pgen.py") - - def test_tokenize(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/tokenize.py") - - def test_token(self) -> None: - self.checkSourceFile("src/blib2to3/pgen2/token.py") - - def test_setup(self) -> None: - self.checkSourceFile("setup.py") + def test_experimental_string_processing_warns(self) -> None: + self.assertWarns( + black.mode.Deprecated, black.Mode, experimental_string_processing=True + ) def test_piping(self) -> None: - source, expected = read_data("src/black/__init__", data=False) + source, expected = read_data_from_file(PROJECT_ROOT / "src/black/__init__.py") result = BlackRunner().invoke( black.main, - ["-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}"], + [ + "-", + "--fast", + f"--line-length={black.DEFAULT_LINE_LENGTH}", + f"--config={EMPTY_CONFIG}", + ], input=BytesIO(source.encode("utf8")), ) self.assertEqual(result.exit_code, 0) self.assertFormatEqual(expected, result.output) - black.assert_equivalent(source, result.output) - black.assert_stable(source, result.output, DEFAULT_MODE) + if source != result.output: + black.assert_equivalent(source, result.output) + black.assert_stable(source, result.output, DEFAULT_MODE) def test_piping_diff(self) -> None: diff_header = re.compile( r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d " r"\+\d\d\d\d" ) - source, _ = read_data("expression.py") - expected, _ = read_data("expression.diff") - config = THIS_DIR / "data" / "empty_pyproject.toml" + source, _ = read_data("simple_cases", "expression.py") + expected, _ = read_data("simple_cases", "expression.diff") args = [ "-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}", "--diff", - f"--config={config}", + f"--config={EMPTY_CONFIG}", ] result = BlackRunner().invoke( black.main, args, input=BytesIO(source.encode("utf8")) @@ -308,46 +199,29 @@ def test_piping_diff(self) -> None: self.assertEqual(expected, actual) def test_piping_diff_with_color(self) -> None: - source, _ = read_data("expression.py") - config = THIS_DIR / "data" / "empty_pyproject.toml" + source, _ = read_data("simple_cases", "expression.py") args = [ "-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}", "--diff", "--color", - f"--config={config}", + f"--config={EMPTY_CONFIG}", ] result = BlackRunner().invoke( black.main, args, input=BytesIO(source.encode("utf8")) ) actual = result.output # Again, the contents are checked in a different test, so only look for colors. - self.assertIn("\033[1;37m", actual) + self.assertIn("\033[1m", actual) self.assertIn("\033[36m", actual) self.assertIn("\033[32m", actual) self.assertIn("\033[31m", actual) self.assertIn("\033[0m", actual) - @patch("black.dump_to_file", dump_to_stderr) - def test_function(self) -> None: - source, expected = read_data("function") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_function2(self) -> None: - source, expected = read_data("function2") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - @patch("black.dump_to_file", dump_to_stderr) def _test_wip(self) -> None: - source, expected = read_data("wip") + source, expected = read_data("miscellaneous", "wip") sys.settrace(tracefunc) mode = replace( DEFAULT_MODE, @@ -358,56 +232,10 @@ def _test_wip(self) -> None: sys.settrace(None) self.assertFormatEqual(expected, actual) black.assert_equivalent(source, actual) - black.assert_stable(source, actual, black.FileMode()) - - @patch("black.dump_to_file", dump_to_stderr) - def test_function_trailing_comma(self) -> None: - source, expected = read_data("function_trailing_comma") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @unittest.expectedFailure - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability1(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens1") - actual = fs(source) - black.assert_stable(source, actual, DEFAULT_MODE) - - @unittest.expectedFailure - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability2(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens2") - actual = fs(source) - black.assert_stable(source, actual, DEFAULT_MODE) - - @unittest.expectedFailure - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability3(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens3") - actual = fs(source) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_expression(self) -> None: - source, expected = read_data("expression") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) black.assert_stable(source, actual, DEFAULT_MODE) - @patch("black.dump_to_file", dump_to_stderr) - def test_pep_572(self) -> None: - source, expected = read_data("pep_572") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - if sys.version_info >= (3, 8): - black.assert_equivalent(source, actual) - def test_pep_572_version_detection(self) -> None: - source, _ = read_data("pep_572") + source, _ = read_data("py_38", "pep_572") root = black.lib2to3_parse(source) features = black.get_features_used(root) self.assertIn(black.Feature.ASSIGNMENT_EXPRESSIONS, features) @@ -415,7 +243,7 @@ def test_pep_572_version_detection(self) -> None: self.assertIn(black.TargetVersion.PY38, versions) def test_expression_ff(self) -> None: - source, expected = read_data("expression") + source, expected = read_data("simple_cases", "expression.py") tmp_file = Path(black.dump_to_file(source)) try: self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES)) @@ -429,21 +257,22 @@ def test_expression_ff(self) -> None: black.assert_stable(source, actual, DEFAULT_MODE) def test_expression_diff(self) -> None: - source, _ = read_data("expression.py") - expected, _ = read_data("expression.diff") + source, _ = read_data("simple_cases", "expression.py") + expected, _ = read_data("simple_cases", "expression.diff") tmp_file = Path(black.dump_to_file(source)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" ) try: - result = BlackRunner().invoke(black.main, ["--diff", str(tmp_file)]) + result = BlackRunner().invoke( + black.main, ["--diff", str(tmp_file), f"--config={EMPTY_CONFIG}"] + ) self.assertEqual(result.exit_code, 0) finally: os.unlink(tmp_file) actual = result.output actual = diff_header.sub(DETERMINISTIC_HEADER, actual) - actual = actual.rstrip() + "\n" # the diff output has a trailing space if expected != actual: dump = black.dump_to_file(actual) msg = ( @@ -454,451 +283,157 @@ def test_expression_diff(self) -> None: self.assertEqual(expected, actual, msg) def test_expression_diff_with_color(self) -> None: - source, _ = read_data("expression.py") - expected, _ = read_data("expression.diff") + source, _ = read_data("simple_cases", "expression.py") + expected, _ = read_data("simple_cases", "expression.diff") tmp_file = Path(black.dump_to_file(source)) try: result = BlackRunner().invoke( - black.main, ["--diff", "--color", str(tmp_file)] + black.main, + ["--diff", "--color", str(tmp_file), f"--config={EMPTY_CONFIG}"], ) finally: os.unlink(tmp_file) actual = result.output # We check the contents of the diff in `test_expression_diff`. All # we need to check here is that color codes exist in the result. - self.assertIn("\033[1;37m", actual) + self.assertIn("\033[1m", actual) self.assertIn("\033[36m", actual) self.assertIn("\033[32m", actual) self.assertIn("\033[31m", actual) self.assertIn("\033[0m", actual) - @patch("black.dump_to_file", dump_to_stderr) - def test_fstring(self) -> None: - source, expected = read_data("fstring") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_pep_570(self) -> None: - source, expected = read_data("pep_570") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - if sys.version_info >= (3, 8): - black.assert_equivalent(source, actual) - def test_detect_pos_only_arguments(self) -> None: - source, _ = read_data("pep_570") + source, _ = read_data("py_38", "pep_570") root = black.lib2to3_parse(source) features = black.get_features_used(root) self.assertIn(black.Feature.POS_ONLY_ARGUMENTS, features) versions = black.detect_target_versions(root) self.assertIn(black.TargetVersion.PY38, versions) + def test_detect_debug_f_strings(self) -> None: + root = black.lib2to3_parse("""f"{x=}" """) + features = black.get_features_used(root) + self.assertIn(black.Feature.DEBUG_F_STRINGS, features) + versions = black.detect_target_versions(root) + self.assertIn(black.TargetVersion.PY38, versions) + + root = black.lib2to3_parse( + """f"{x}"\nf'{"="}'\nf'{(x:=5)}'\nf'{f(a="3=")}'\nf'{x:=10}'\n""" + ) + features = black.get_features_used(root) + self.assertNotIn(black.Feature.DEBUG_F_STRINGS, features) + + # We don't yet support feature version detection in nested f-strings + root = black.lib2to3_parse( + """f"heard a rumour that { f'{1+1=}' } ... seems like it could be true" """ + ) + features = black.get_features_used(root) + self.assertNotIn(black.Feature.DEBUG_F_STRINGS, features) + @patch("black.dump_to_file", dump_to_stderr) def test_string_quotes(self) -> None: - source, expected = read_data("string_quotes") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - mode = replace(DEFAULT_MODE, string_normalization=False) + source, expected = read_data("miscellaneous", "string_quotes") + mode = black.Mode(preview=True) + assert_format(source, expected, mode) + mode = replace(mode, string_normalization=False) not_normalized = fs(source, mode=mode) self.assertFormatEqual(source.replace("\\\n", ""), not_normalized) black.assert_equivalent(source, not_normalized) black.assert_stable(source, not_normalized, mode=mode) - @patch("black.dump_to_file", dump_to_stderr) - def test_docstring(self) -> None: - source, expected = read_data("docstring") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - mode = replace(DEFAULT_MODE, string_normalization=False) - not_normalized = fs(source, mode=mode) - self.assertFormatEqual(expected, not_normalized) - black.assert_equivalent(source, not_normalized) - black.assert_stable(source, not_normalized, mode=mode) - - def test_long_strings(self) -> None: - """Tests for splitting long strings.""" - source, expected = read_data("long_strings") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - def test_long_strings_flag_disabled(self) -> None: - """Tests for turning off the string processing logic.""" - source, expected = read_data("long_strings_flag_disabled") - mode = replace(DEFAULT_MODE, experimental_string_processing=False) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_stable(expected, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_long_strings__edge_case(self) -> None: - """Edge-case tests for splitting long strings.""" - source, expected = read_data("long_strings__edge_case") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + def test_skip_magic_trailing_comma(self) -> None: + source, _ = read_data("simple_cases", "expression") + expected, _ = read_data( + "miscellaneous", "expression_skip_magic_trailing_comma.diff" + ) + tmp_file = Path(black.dump_to_file(source)) + diff_header = re.compile( + rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " + r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + ) + try: + result = BlackRunner().invoke( + black.main, ["-C", "--diff", str(tmp_file), f"--config={EMPTY_CONFIG}"] + ) + self.assertEqual(result.exit_code, 0) + finally: + os.unlink(tmp_file) + actual = result.output + actual = diff_header.sub(DETERMINISTIC_HEADER, actual) + actual = actual.rstrip() + "\n" # the diff output has a trailing space + if expected != actual: + dump = black.dump_to_file(actual) + msg = ( + "Expected diff isn't equal to the actual. If you made changes to" + " expression.py and this is an anticipated difference, overwrite" + f" tests/data/expression_skip_magic_trailing_comma.diff with {dump}" + ) + self.assertEqual(expected, actual, msg) @patch("black.dump_to_file", dump_to_stderr) - def test_long_strings__regression(self) -> None: - """Regression tests for splitting long strings.""" - source, expected = read_data("long_strings__regression") + def test_async_as_identifier(self) -> None: + source_path = get_case_path("miscellaneous", "async_as_identifier") + source, expected = read_data_from_file(source_path) actual = fs(source) self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) + major, minor = sys.version_info[:2] + if major < 3 or (major <= 3 and minor < 7): + black.assert_equivalent(source, actual) black.assert_stable(source, actual, DEFAULT_MODE) + # ensure black can parse this when the target is 3.6 + self.invokeBlack([str(source_path), "--target-version", "py36"]) + # but not on 3.7, because async/await is no longer an identifier + self.invokeBlack([str(source_path), "--target-version", "py37"], exit_code=123) @patch("black.dump_to_file", dump_to_stderr) - def test_slices(self) -> None: - source, expected = read_data("slices") + def test_python37(self) -> None: + source_path = get_case_path("py_37", "python37") + source, expected = read_data_from_file(source_path) actual = fs(source) self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) + major, minor = sys.version_info[:2] + if major > 3 or (major == 3 and minor >= 7): + black.assert_equivalent(source, actual) black.assert_stable(source, actual, DEFAULT_MODE) + # ensure black can parse this when the target is 3.7 + self.invokeBlack([str(source_path), "--target-version", "py37"]) + # but not on 3.6, because we use async as a reserved keyword + self.invokeBlack([str(source_path), "--target-version", "py36"], exit_code=123) - @patch("black.dump_to_file", dump_to_stderr) - def test_percent_precedence(self) -> None: - source, expected = read_data("percent_precedence") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + def test_tab_comment_indentation(self) -> None: + contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n" + contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" + self.assertFormatEqual(contents_spc, fs(contents_spc)) + self.assertFormatEqual(contents_spc, fs(contents_tab)) - @patch("black.dump_to_file", dump_to_stderr) - def test_comments(self) -> None: - source, expected = read_data("comments") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t\t# comment\n\tpass\n" + contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" + self.assertFormatEqual(contents_spc, fs(contents_spc)) + self.assertFormatEqual(contents_spc, fs(contents_tab)) - @patch("black.dump_to_file", dump_to_stderr) - def test_comments2(self) -> None: - source, expected = read_data("comments2") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + # mixed tabs and spaces (valid Python 2 code) + contents_tab = "if 1:\n if 2:\n\t\tpass\n\t# comment\n pass\n" + contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" + self.assertFormatEqual(contents_spc, fs(contents_spc)) + self.assertFormatEqual(contents_spc, fs(contents_tab)) - @patch("black.dump_to_file", dump_to_stderr) - def test_comments3(self) -> None: - source, expected = read_data("comments3") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + contents_tab = "if 1:\n if 2:\n\t\tpass\n\t\t# comment\n pass\n" + contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" + self.assertFormatEqual(contents_spc, fs(contents_spc)) + self.assertFormatEqual(contents_spc, fs(contents_tab)) - @patch("black.dump_to_file", dump_to_stderr) - def test_comments4(self) -> None: - source, expected = read_data("comments4") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + def test_report_verbose(self) -> None: + report = Report(verbose=True) + out_lines = [] + err_lines = [] - @patch("black.dump_to_file", dump_to_stderr) - def test_comments5(self) -> None: - source, expected = read_data("comments5") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + def out(msg: str, **kwargs: Any) -> None: + out_lines.append(msg) - @patch("black.dump_to_file", dump_to_stderr) - def test_comments6(self) -> None: - source, expected = read_data("comments6") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + def err(msg: str, **kwargs: Any) -> None: + err_lines.append(msg) - @patch("black.dump_to_file", dump_to_stderr) - def test_comments7(self) -> None: - source, expected = read_data("comments7") - mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38}) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_comment_after_escaped_newline(self) -> None: - source, expected = read_data("comment_after_escaped_newline") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_cantfit(self) -> None: - source, expected = read_data("cantfit") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_import_spacing(self) -> None: - source, expected = read_data("import_spacing") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_composition(self) -> None: - source, expected = read_data("composition") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_composition_no_trailing_comma(self) -> None: - source, expected = read_data("composition_no_trailing_comma") - mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38}) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_empty_lines(self) -> None: - source, expected = read_data("empty_lines") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_remove_parens(self) -> None: - source, expected = read_data("remove_parens") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_string_prefixes(self) -> None: - source, expected = read_data("string_prefixes") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_numeric_literals(self) -> None: - source, expected = read_data("numeric_literals") - mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_numeric_literals_ignoring_underscores(self) -> None: - source, expected = read_data("numeric_literals_skip_underscores") - mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_numeric_literals_py2(self) -> None: - source, expected = read_data("numeric_literals_py2") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_python2(self) -> None: - source, expected = read_data("python2") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_python2_print_function(self) -> None: - source, expected = read_data("python2_print_function") - mode = replace(DEFAULT_MODE, target_versions={TargetVersion.PY27}) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_python2_unicode_literals(self) -> None: - source, expected = read_data("python2_unicode_literals") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_stub(self) -> None: - mode = replace(DEFAULT_MODE, is_pyi=True) - source, expected = read_data("stub.pyi") - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_async_as_identifier(self) -> None: - source_path = (THIS_DIR / "data" / "async_as_identifier.py").resolve() - source, expected = read_data("async_as_identifier") - actual = fs(source) - self.assertFormatEqual(expected, actual) - major, minor = sys.version_info[:2] - if major < 3 or (major <= 3 and minor < 7): - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - # ensure black can parse this when the target is 3.6 - self.invokeBlack([str(source_path), "--target-version", "py36"]) - # but not on 3.7, because async/await is no longer an identifier - self.invokeBlack([str(source_path), "--target-version", "py37"], exit_code=123) - - @patch("black.dump_to_file", dump_to_stderr) - def test_python37(self) -> None: - source_path = (THIS_DIR / "data" / "python37.py").resolve() - source, expected = read_data("python37") - actual = fs(source) - self.assertFormatEqual(expected, actual) - major, minor = sys.version_info[:2] - if major > 3 or (major == 3 and minor >= 7): - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - # ensure black can parse this when the target is 3.7 - self.invokeBlack([str(source_path), "--target-version", "py37"]) - # but not on 3.6, because we use async as a reserved keyword - self.invokeBlack([str(source_path), "--target-version", "py36"], exit_code=123) - - @patch("black.dump_to_file", dump_to_stderr) - def test_python38(self) -> None: - source, expected = read_data("python38") - actual = fs(source) - self.assertFormatEqual(expected, actual) - major, minor = sys.version_info[:2] - if major > 3 or (major == 3 and minor >= 8): - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_fmtonoff(self) -> None: - source, expected = read_data("fmtonoff") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_fmtonoff2(self) -> None: - source, expected = read_data("fmtonoff2") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_fmtonoff3(self) -> None: - source, expected = read_data("fmtonoff3") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_fmtonoff4(self) -> None: - source, expected = read_data("fmtonoff4") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_remove_empty_parentheses_after_class(self) -> None: - source, expected = read_data("class_blank_parentheses") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_new_line_between_class_and_code(self) -> None: - source, expected = read_data("class_methods_new_line") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_bracket_match(self) -> None: - source, expected = read_data("bracketmatch") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_tuple_assign(self) -> None: - source, expected = read_data("tupleassign") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_beginning_backslash(self) -> None: - source, expected = read_data("beginning_backslash") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - def test_tab_comment_indentation(self) -> None: - contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n" - contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" - self.assertFormatEqual(contents_spc, fs(contents_spc)) - self.assertFormatEqual(contents_spc, fs(contents_tab)) - - contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t\t# comment\n\tpass\n" - contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" - self.assertFormatEqual(contents_spc, fs(contents_spc)) - self.assertFormatEqual(contents_spc, fs(contents_tab)) - - # mixed tabs and spaces (valid Python 2 code) - contents_tab = "if 1:\n if 2:\n\t\tpass\n\t# comment\n pass\n" - contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" - self.assertFormatEqual(contents_spc, fs(contents_spc)) - self.assertFormatEqual(contents_spc, fs(contents_tab)) - - contents_tab = "if 1:\n if 2:\n\t\tpass\n\t\t# comment\n pass\n" - contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" - self.assertFormatEqual(contents_spc, fs(contents_spc)) - self.assertFormatEqual(contents_spc, fs(contents_tab)) - - def test_report_verbose(self) -> None: - report = black.Report(verbose=True) - out_lines = [] - err_lines = [] - - def out(msg: str, **kwargs: Any) -> None: - out_lines.append(msg) - - def err(msg: str, **kwargs: Any) -> None: - err_lines.append(msg) - - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): report.done(Path("f1"), black.Changed.NO) self.assertEqual(len(out_lines), 1) self.assertEqual(len(err_lines), 0) @@ -978,19 +513,19 @@ def err(msg: str, **kwargs: Any) -> None: report.check = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_report_quiet(self) -> None: - report = black.Report(quiet=True) + report = Report(quiet=True) out_lines = [] err_lines = [] @@ -1000,7 +535,7 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): report.done(Path("f1"), black.Changed.NO) self.assertEqual(len(out_lines), 0) self.assertEqual(len(err_lines), 0) @@ -1072,15 +607,15 @@ def err(msg: str, **kwargs: Any) -> None: report.check = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_report_normal(self) -> None: @@ -1094,7 +629,7 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): report.done(Path("f1"), black.Changed.NO) self.assertEqual(len(out_lines), 0) self.assertEqual(len(err_lines), 0) @@ -1169,15 +704,15 @@ def err(msg: str, **kwargs: Any) -> None: report.check = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_lib2to3_parse(self) -> None: @@ -1186,24 +721,48 @@ def test_lib2to3_parse(self) -> None: straddling = "x + y" black.lib2to3_parse(straddling) - black.lib2to3_parse(straddling, {TargetVersion.PY27}) black.lib2to3_parse(straddling, {TargetVersion.PY36}) - black.lib2to3_parse(straddling, {TargetVersion.PY27, TargetVersion.PY36}) py2_only = "print x" - black.lib2to3_parse(py2_only) - black.lib2to3_parse(py2_only, {TargetVersion.PY27}) with self.assertRaises(black.InvalidInput): black.lib2to3_parse(py2_only, {TargetVersion.PY36}) - with self.assertRaises(black.InvalidInput): - black.lib2to3_parse(py2_only, {TargetVersion.PY27, TargetVersion.PY36}) py3_only = "exec(x, end=y)" black.lib2to3_parse(py3_only) - with self.assertRaises(black.InvalidInput): - black.lib2to3_parse(py3_only, {TargetVersion.PY27}) black.lib2to3_parse(py3_only, {TargetVersion.PY36}) - black.lib2to3_parse(py3_only, {TargetVersion.PY27, TargetVersion.PY36}) + + def test_get_features_used_decorator(self) -> None: + # Test the feature detection of new decorator syntax + # since this makes some test cases of test_get_features_used() + # fails if it fails, this is tested first so that a useful case + # is identified + simples, relaxed = read_data("miscellaneous", "decorators") + # skip explanation comments at the top of the file + for simple_test in simples.split("##")[1:]: + node = black.lib2to3_parse(simple_test) + decorator = str(node.children[0].children[0]).strip() + self.assertNotIn( + Feature.RELAXED_DECORATORS, + black.get_features_used(node), + msg=( + f"decorator '{decorator}' follows python<=3.8 syntax" + "but is detected as 3.9+" + # f"The full node is\n{node!r}" + ), + ) + # skip the '# output' comment at the top of the output part + for relaxed_test in relaxed.split("##")[1:]: + node = black.lib2to3_parse(relaxed_test) + decorator = str(node.children[0].children[0]).strip() + self.assertIn( + Feature.RELAXED_DECORATORS, + black.get_features_used(node), + msg=( + f"decorator '{decorator}' uses python3.9+ syntax" + "but is detected as python<=3.8" + # f"The full node is\n{node!r}" + ), + ) def test_get_features_used(self) -> None: node = black.lib2to3_parse("def f(*, arg): ...\n") @@ -1220,7 +779,7 @@ def test_get_features_used(self) -> None: self.assertEqual(black.get_features_used(node), {Feature.NUMERIC_UNDERSCORES}) node = black.lib2to3_parse("123456\n") self.assertEqual(black.get_features_used(node), set()) - source, expected = read_data("function") + source, expected = read_data("simple_cases", "function") node = black.lib2to3_parse(source) expected_features = { Feature.TRAILING_COMMA_IN_CALL, @@ -1230,11 +789,65 @@ def test_get_features_used(self) -> None: self.assertEqual(black.get_features_used(node), expected_features) node = black.lib2to3_parse(expected) self.assertEqual(black.get_features_used(node), expected_features) - source, expected = read_data("expression") + source, expected = read_data("simple_cases", "expression") node = black.lib2to3_parse(source) self.assertEqual(black.get_features_used(node), set()) node = black.lib2to3_parse(expected) self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("lambda a, /, b: ...") + self.assertEqual(black.get_features_used(node), {Feature.POS_ONLY_ARGUMENTS}) + node = black.lib2to3_parse("def fn(a, /, b): ...") + self.assertEqual(black.get_features_used(node), {Feature.POS_ONLY_ARGUMENTS}) + node = black.lib2to3_parse("def fn(): yield a, b") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("def fn(): return a, b") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("def fn(): yield *b, c") + self.assertEqual(black.get_features_used(node), {Feature.UNPACKING_ON_FLOW}) + node = black.lib2to3_parse("def fn(): return a, *b, c") + self.assertEqual(black.get_features_used(node), {Feature.UNPACKING_ON_FLOW}) + node = black.lib2to3_parse("x = a, *b, c") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Any = regular") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Any = (regular, regular)") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Any = Complex(Type(1))[something]") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Tuple[int, ...] = a, b, c") + self.assertEqual( + black.get_features_used(node), {Feature.ANN_ASSIGN_EXTENDED_RHS} + ) + node = black.lib2to3_parse("try: pass\nexcept Something: pass") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("try: pass\nexcept (*Something,): pass") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("try: pass\nexcept *Group: pass") + self.assertEqual(black.get_features_used(node), {Feature.EXCEPT_STAR}) + node = black.lib2to3_parse("a[*b]") + self.assertEqual(black.get_features_used(node), {Feature.VARIADIC_GENERICS}) + node = black.lib2to3_parse("a[x, *y(), z] = t") + self.assertEqual(black.get_features_used(node), {Feature.VARIADIC_GENERICS}) + node = black.lib2to3_parse("def fn(*args: *T): pass") + self.assertEqual(black.get_features_used(node), {Feature.VARIADIC_GENERICS}) + + def test_get_features_used_for_future_flags(self) -> None: + for src, features in [ + ("from __future__ import annotations", {Feature.FUTURE_ANNOTATIONS}), + ( + "from __future__ import (other, annotations)", + {Feature.FUTURE_ANNOTATIONS}, + ), + ("a = 1 + 2\nfrom something import annotations", set()), + ("from __future__ import x, y", set()), + ]: + with self.subTest(src=src, features=features): + node = black.lib2to3_parse(src) + future_imports = black.get_future_imports(node) + self.assertEqual( + black.get_features_used(node, future_imports=future_imports), + features, + ) def test_get_future_imports(self) -> None: node = black.lib2to3_parse("\n") @@ -1266,9 +879,10 @@ def test_get_future_imports(self) -> None: ) self.assertEqual({"unicode_literals", "print"}, black.get_future_imports(node)) + @pytest.mark.incompatible_with_mypyc def test_debug_visitor(self) -> None: - source, _ = read_data("debug_visitor.py") - expected, _ = read_data("debug_visitor.out") + source, _ = read_data("miscellaneous", "debug_visitor") + expected, _ = read_data("miscellaneous", "debug_visitor.out") out_lines = [] err_lines = [] @@ -1278,8 +892,8 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): - black.DebugVisitor.show(source) + with patch("black.debug.out", out): + DebugVisitor.show(source) actual = "\n".join(out_lines) + "\n" log_name = "" if expected != actual: @@ -1316,6 +930,7 @@ def test_endmarker(self) -> None: self.assertEqual(len(n.children), 1) self.assertEqual(n.children[0].type, black.token.ENDMARKER) + @pytest.mark.incompatible_with_mypyc @unittest.skipIf(os.environ.get("SKIP_AST_PRINT"), "user set SKIP_AST_PRINT") def test_assertFormatEqual(self) -> None: out_lines = [] @@ -1327,203 +942,58 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): with self.assertRaises(AssertionError): self.assertFormatEqual("j = [1, 2, 3]", "j = [1, 2, 3,]") out_str = "".join(out_lines) - self.assertTrue("Expected tree:" in out_str) - self.assertTrue("Actual tree:" in out_str) + self.assertIn("Expected tree:", out_str) + self.assertIn("Actual tree:", out_str) self.assertEqual("".join(err_lines), "") - def test_cache_broken_file(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace: - cache_file = black.get_cache_file(mode) - with cache_file.open("w") as fobj: - fobj.write("this is not a pickle") - self.assertEqual(black.read_cache(mode), {}) - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - self.invokeBlack([str(src)]) - cache = black.read_cache(mode) - self.assertIn(src, cache) - - def test_cache_single_file_already_cached(self) -> None: - mode = DEFAULT_MODE + @event_loop() + @patch("concurrent.futures.ProcessPoolExecutor", MagicMock(side_effect=OSError)) + def test_works_in_mono_process_only_environment(self) -> None: with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - black.write_cache({}, [src], mode) - self.invokeBlack([str(src)]) - with src.open("r") as fobj: - self.assertEqual(fobj.read(), "print('hello')") - - @event_loop() - def test_cache_multiple_files(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace, patch( - "black.ProcessPoolExecutor", new=ThreadPoolExecutor - ): - one = (workspace / "one.py").resolve() - with one.open("w") as fobj: - fobj.write("print('hello')") - two = (workspace / "two.py").resolve() - with two.open("w") as fobj: - fobj.write("print('hello')") - black.write_cache({}, [one], mode) - self.invokeBlack([str(workspace)]) - with one.open("r") as fobj: - self.assertEqual(fobj.read(), "print('hello')") - with two.open("r") as fobj: - self.assertEqual(fobj.read(), 'print("hello")\n') - cache = black.read_cache(mode) - self.assertIn(one, cache) - self.assertIn(two, cache) - - def test_no_cache_when_writeback_diff(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - self.invokeBlack([str(src), "--diff"]) - cache_file = black.get_cache_file(mode) - self.assertFalse(cache_file.exists()) - - def test_no_cache_when_stdin(self) -> None: - mode = DEFAULT_MODE - with cache_dir(): - result = CliRunner().invoke( - black.main, ["-"], input=BytesIO(b"print('hello')") - ) - self.assertEqual(result.exit_code, 0) - cache_file = black.get_cache_file(mode) - self.assertFalse(cache_file.exists()) - - def test_read_cache_no_cachefile(self) -> None: - mode = DEFAULT_MODE - with cache_dir(): - self.assertEqual(black.read_cache(mode), {}) - - def test_write_cache_read_cache(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - src.touch() - black.write_cache({}, [src], mode) - cache = black.read_cache(mode) - self.assertIn(src, cache) - self.assertEqual(cache[src], black.get_cache_info(src)) - - def test_filter_cached(self) -> None: - with TemporaryDirectory() as workspace: - path = Path(workspace) - uncached = (path / "uncached").resolve() - cached = (path / "cached").resolve() - cached_but_changed = (path / "changed").resolve() - uncached.touch() - cached.touch() - cached_but_changed.touch() - cache = {cached: black.get_cache_info(cached), cached_but_changed: (0.0, 0)} - todo, done = black.filter_cached( - cache, {uncached, cached, cached_but_changed} - ) - self.assertEqual(todo, {uncached, cached_but_changed}) - self.assertEqual(done, {cached}) - - def test_write_cache_creates_directory_if_needed(self) -> None: - mode = DEFAULT_MODE - with cache_dir(exists=False) as workspace: - self.assertFalse(workspace.exists()) - black.write_cache({}, [], mode) - self.assertTrue(workspace.exists()) - - @event_loop() - def test_failed_formatting_does_not_get_cached(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace, patch( - "black.ProcessPoolExecutor", new=ThreadPoolExecutor - ): - failing = (workspace / "failing.py").resolve() - with failing.open("w") as fobj: - fobj.write("not actually python") - clean = (workspace / "clean.py").resolve() - with clean.open("w") as fobj: - fobj.write('print("hello")\n') - self.invokeBlack([str(workspace)], exit_code=123) - cache = black.read_cache(mode) - self.assertNotIn(failing, cache) - self.assertIn(clean, cache) - - def test_write_cache_write_fail(self) -> None: - mode = DEFAULT_MODE - with cache_dir(), patch.object(Path, "open") as mock: - mock.side_effect = OSError - black.write_cache({}, [], mode) - - @event_loop() - @patch("black.ProcessPoolExecutor", MagicMock(side_effect=OSError)) - def test_works_in_mono_process_only_environment(self) -> None: - with cache_dir() as workspace: - for f in [ - (workspace / "one.py").resolve(), - (workspace / "two.py").resolve(), - ]: - f.write_text('print("hello")\n') - self.invokeBlack([str(workspace)]) + for f in [ + (workspace / "one.py").resolve(), + (workspace / "two.py").resolve(), + ]: + f.write_text('print("hello")\n') + self.invokeBlack([str(workspace)]) @event_loop() def test_check_diff_use_together(self) -> None: with cache_dir(): # Files which will be reformatted. - src1 = (THIS_DIR / "data" / "string_quotes.py").resolve() + src1 = get_case_path("miscellaneous", "string_quotes") self.invokeBlack([str(src1), "--diff", "--check"], exit_code=1) # Files which will not be reformatted. - src2 = (THIS_DIR / "data" / "composition.py").resolve() + src2 = get_case_path("simple_cases", "composition") self.invokeBlack([str(src2), "--diff", "--check"]) # Multi file command. self.invokeBlack([str(src1), str(src2), "--diff", "--check"], exit_code=1) - def test_no_files(self) -> None: + def test_no_src_fails(self) -> None: with cache_dir(): - # Without an argument, black exits with error code 0. - self.invokeBlack([]) + self.invokeBlack([], exit_code=1) + + def test_src_and_code_fails(self) -> None: + with cache_dir(): + self.invokeBlack([".", "-c", "0"], exit_code=1) def test_broken_symlink(self) -> None: with cache_dir() as workspace: symlink = workspace / "broken_link.py" try: symlink.symlink_to("nonexistent.py") - except OSError as e: + except (OSError, NotImplementedError) as e: self.skipTest(f"Can't create symlinks: {e}") self.invokeBlack([str(workspace.resolve())]) - def test_read_cache_line_lengths(self) -> None: - mode = DEFAULT_MODE - short_mode = replace(DEFAULT_MODE, line_length=1) - with cache_dir() as workspace: - path = (workspace / "file.py").resolve() - path.touch() - black.write_cache({}, [path], mode) - one = black.read_cache(mode) - self.assertIn(path, one) - two = black.read_cache(short_mode) - self.assertNotIn(path, two) - - def test_tricky_unicode_symbols(self) -> None: - source, expected = read_data("tricky_unicode_symbols") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - def test_single_file_force_pyi(self) -> None: - reg_mode = DEFAULT_MODE pyi_mode = replace(DEFAULT_MODE, is_pyi=True) - contents, expected = read_data("force_pyi") + contents, expected = read_data("miscellaneous", "force_pyi") with cache_dir() as workspace: path = (workspace / "file.py").resolve() with open(path, "w") as fh: @@ -1533,16 +1003,18 @@ def test_single_file_force_pyi(self) -> None: actual = fh.read() # verify cache with --pyi is separate pyi_cache = black.read_cache(pyi_mode) - self.assertIn(path, pyi_cache) - normal_cache = black.read_cache(reg_mode) - self.assertNotIn(path, normal_cache) - self.assertEqual(actual, expected) + self.assertIn(str(path), pyi_cache) + normal_cache = black.read_cache(DEFAULT_MODE) + self.assertNotIn(str(path), normal_cache) + self.assertFormatEqual(expected, actual) + black.assert_equivalent(contents, actual) + black.assert_stable(contents, actual, pyi_mode) @event_loop() def test_multi_file_force_pyi(self) -> None: reg_mode = DEFAULT_MODE pyi_mode = replace(DEFAULT_MODE, is_pyi=True) - contents, expected = read_data("force_pyi") + contents, expected = read_data("miscellaneous", "force_pyi") with cache_dir() as workspace: paths = [ (workspace / "file1.py").resolve(), @@ -1560,11 +1032,11 @@ def test_multi_file_force_pyi(self) -> None: pyi_cache = black.read_cache(pyi_mode) normal_cache = black.read_cache(reg_mode) for path in paths: - self.assertIn(path, pyi_cache) - self.assertNotIn(path, normal_cache) + self.assertIn(str(path), pyi_cache) + self.assertNotIn(str(path), normal_cache) def test_pipe_force_pyi(self) -> None: - source, expected = read_data("force_pyi") + source, expected = read_data("miscellaneous", "force_pyi") result = CliRunner().invoke( black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf8")) ) @@ -1574,8 +1046,8 @@ def test_pipe_force_pyi(self) -> None: def test_single_file_force_py36(self) -> None: reg_mode = DEFAULT_MODE - py36_mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) - source, expected = read_data("force_py36") + py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS) + source, expected = read_data("miscellaneous", "force_py36") with cache_dir() as workspace: path = (workspace / "file.py").resolve() with open(path, "w") as fh: @@ -1585,16 +1057,16 @@ def test_single_file_force_py36(self) -> None: actual = fh.read() # verify cache with --target-version is separate py36_cache = black.read_cache(py36_mode) - self.assertIn(path, py36_cache) + self.assertIn(str(path), py36_cache) normal_cache = black.read_cache(reg_mode) - self.assertNotIn(path, normal_cache) + self.assertNotIn(str(path), normal_cache) self.assertEqual(actual, expected) @event_loop() def test_multi_file_force_py36(self) -> None: reg_mode = DEFAULT_MODE - py36_mode = replace(DEFAULT_MODE, target_versions=black.PY36_VERSIONS) - source, expected = read_data("force_py36") + py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS) + source, expected = read_data("miscellaneous", "force_py36") with cache_dir() as workspace: paths = [ (workspace / "file1.py").resolve(), @@ -1612,18 +1084,11 @@ def test_multi_file_force_py36(self) -> None: pyi_cache = black.read_cache(py36_mode) normal_cache = black.read_cache(reg_mode) for path in paths: - self.assertIn(path, pyi_cache) - self.assertNotIn(path, normal_cache) - - def test_collections(self) -> None: - source, expected = read_data("collections") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + self.assertIn(str(path), pyi_cache) + self.assertNotIn(str(path), normal_cache) def test_pipe_force_py36(self) -> None: - source, expected = read_data("force_py36") + source, expected = read_data("miscellaneous", "force_py36") result = CliRunner().invoke( black.main, ["-", "-q", "--target-version=py36"], @@ -1633,133 +1098,168 @@ def test_pipe_force_py36(self) -> None: actual = result.output self.assertFormatEqual(actual, expected) - def test_include_exclude(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - include = re.compile(r"\.pyi?$") - exclude = re.compile(r"/exclude/|/\.definitely_exclude/") - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - sources: List[Path] = [] - expected = [ - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), this_abs, include, exclude, None, report, gitignore + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + path = Path("-") + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + report=report, ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_exclude_for_issue_1572(self) -> None: - # Exclude shouldn't touch files that were explicitly given to Black through the - # CLI. Exclude is supposed to only apply to the recursive discovery of files. - # https://github.com/psf/black/issues/1572 - path = THIS_DIR / "data" / "include_exclude_tests" - include = "" - exclude = r"/exclude/|a\.py" - src = str(path / "b/exclude/a.py") - report = black.Report() - expected = [Path(path / "b/exclude/a.py")] - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=include, - exclude=exclude, - force_exclude=None, + fsts.assert_called_once() + report.done.assert_called_with(path, black.Changed.YES) + + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_filename(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + p = "foo.py" + path = Path(f"__BLACK_STDIN_FILENAME__{p}") + expected = Path(p) + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, report=report, ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - def test_gitignore_exclude(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - include = re.compile(r"\.pyi?$") - exclude = re.compile(r"") - report = black.Report() - gitignore = PathSpec.from_lines( - "gitwildmatch", ["exclude/", ".definitely_exclude"] - ) - sources: List[Path] = [] - expected = [ - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), this_abs, include, exclude, None, report, gitignore + fsts.assert_called_once_with( + fast=True, write_back=black.WriteBack.YES, mode=DEFAULT_MODE + ) + # __BLACK_STDIN_FILENAME__ should have been stripped + report.done.assert_called_with(expected, black.Changed.YES) + + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_filename_pyi(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + p = "foo.pyi" + path = Path(f"__BLACK_STDIN_FILENAME__{p}") + expected = Path(p) + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + report=report, + ) + fsts.assert_called_once_with( + fast=True, + write_back=black.WriteBack.YES, + mode=replace(DEFAULT_MODE, is_pyi=True), ) + # __BLACK_STDIN_FILENAME__ should have been stripped + report.done.assert_called_with(expected, black.Changed.YES) + + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_filename_ipynb(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + p = "foo.ipynb" + path = Path(f"__BLACK_STDIN_FILENAME__{p}") + expected = Path(p) + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + report=report, + ) + fsts.assert_called_once_with( + fast=True, + write_back=black.WriteBack.YES, + mode=replace(DEFAULT_MODE, is_ipynb=True), + ) + # __BLACK_STDIN_FILENAME__ should have been stripped + report.done.assert_called_with(expected, black.Changed.YES) + + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_and_existing_path(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + # Even with an existing file, since we are forcing stdin, black + # should output to stdout and not modify the file inplace + p = THIS_DIR / "data" / "simple_cases" / "collections.py" + # Make sure is_file actually returns True + self.assertTrue(p.is_file()) + path = Path(f"__BLACK_STDIN_FILENAME__{p}") + expected = Path(p) + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + report=report, + ) + fsts.assert_called_once() + # __BLACK_STDIN_FILENAME__ should have been stripped + report.done.assert_called_with(expected, black.Changed.YES) + + def test_reformat_one_with_stdin_empty(self) -> None: + output = io.StringIO() + with patch("io.TextIOWrapper", lambda *args, **kwargs: output): + try: + black.format_stdin_to_stdout( + fast=True, + content="", + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + ) + except io.UnsupportedOperation: + pass # StringIO does not support detach + assert output.getvalue() == "" + + def test_invalid_cli_regex(self) -> None: + for option in ["--include", "--exclude", "--extend-exclude", "--force-exclude"]: + self.invokeBlack(["-", option, "**()(!!*)"], exit_code=2) + + def test_required_version_matches_version(self) -> None: + self.invokeBlack( + ["--required-version", black.__version__, "-c", "0"], + exit_code=0, + ignore_config=True, ) - self.assertEqual(sorted(expected), sorted(sources)) - def test_empty_include(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - empty = re.compile(r"") - sources: List[Path] = [] - expected = [ - Path(path / "b/exclude/a.pie"), - Path(path / "b/exclude/a.py"), - Path(path / "b/exclude/a.pyi"), - Path(path / "b/dont_exclude/a.pie"), - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - Path(path / "b/.definitely_exclude/a.pie"), - Path(path / "b/.definitely_exclude/a.py"), - Path(path / "b/.definitely_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), - this_abs, - empty, - re.compile(black.DEFAULT_EXCLUDES), - None, - report, - gitignore, - ) + def test_required_version_matches_partial_version(self) -> None: + self.invokeBlack( + ["--required-version", black.__version__.split(".")[0], "-c", "0"], + exit_code=0, + ignore_config=True, ) - self.assertEqual(sorted(expected), sorted(sources)) - def test_empty_exclude(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - empty = re.compile(r"") - sources: List[Path] = [] - expected = [ - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - Path(path / "b/exclude/a.py"), - Path(path / "b/exclude/a.pyi"), - Path(path / "b/.definitely_exclude/a.py"), - Path(path / "b/.definitely_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), - this_abs, - re.compile(black.DEFAULT_INCLUDES), - empty, - None, - report, - gitignore, - ) + def test_required_version_does_not_match_on_minor_version(self) -> None: + self.invokeBlack( + ["--required-version", black.__version__.split(".")[0] + ".999", "-c", "0"], + exit_code=1, + ignore_config=True, ) - self.assertEqual(sorted(expected), sorted(sources)) - def test_invalid_include_exclude(self) -> None: - for option in ["--include", "--exclude"]: - self.invokeBlack(["-", option, "**()(!!*)"], exit_code=2) + def test_required_version_does_not_match_version(self) -> None: + result = BlackRunner().invoke( + black.main, + ["--required-version", "20.99b", "-c", "0"], + ) + self.assertEqual(result.exit_code, 1) + self.assertIn("required version", result.stderr) def test_preserves_line_endings(self) -> None: with TemporaryDirectory() as workspace: @@ -1781,75 +1281,47 @@ def test_preserves_line_endings_via_stdin(self) -> None: black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf8")) ) self.assertEqual(result.exit_code, 0) - output = runner.stdout_bytes + output = result.stdout_bytes self.assertIn(nl.encode("utf8"), output) if nl == "\n": self.assertNotIn(b"\r\n", output) + def test_normalize_line_endings(self) -> None: + with TemporaryDirectory() as workspace: + test_file = Path(workspace) / "test.py" + for data, expected in ( + (b"c\r\nc\n ", b"c\r\nc\r\n"), + (b"l\nl\r\n ", b"l\nl\n"), + ): + test_file.write_bytes(data) + ff(test_file, write_back=black.WriteBack.YES) + self.assertEqual(test_file.read_bytes(), expected) + def test_assert_equivalent_different_asts(self) -> None: with self.assertRaises(AssertionError): black.assert_equivalent("{}", "None") - def test_symlink_out_of_root_directory(self) -> None: - path = MagicMock() - root = THIS_DIR.resolve() - child = MagicMock() - include = re.compile(black.DEFAULT_INCLUDES) - exclude = re.compile(black.DEFAULT_EXCLUDES) - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - # `child` should behave like a symlink which resolved path is clearly - # outside of the `root` directory. - path.iterdir.return_value = [child] - child.resolve.return_value = Path("/a/b/c") - child.as_posix.return_value = "/a/b/c" - child.is_symlink.return_value = True - try: - list( - black.gen_python_files( - path.iterdir(), root, include, exclude, None, report, gitignore - ) - ) - except ValueError as ve: - self.fail(f"`get_python_files_in_dir()` failed: {ve}") - path.iterdir.assert_called_once() - child.resolve.assert_called_once() - child.is_symlink.assert_called_once() - # `child` should behave like a strange file which resolved path is clearly - # outside of the `root` directory. - child.is_symlink.return_value = False - with self.assertRaises(ValueError): - list( - black.gen_python_files( - path.iterdir(), root, include, exclude, None, report, gitignore - ) - ) - path.iterdir.assert_called() - self.assertEqual(path.iterdir.call_count, 2) - child.resolve.assert_called() - self.assertEqual(child.resolve.call_count, 2) - child.is_symlink.assert_called() - self.assertEqual(child.is_symlink.call_count, 2) - def test_shhh_click(self) -> None: try: from click import _unicodefun # type: ignore - except ModuleNotFoundError: + except ImportError: self.skipTest("Incompatible Click version") - if not hasattr(_unicodefun, "_verify_python3_env"): + + if not hasattr(_unicodefun, "_verify_python_env"): self.skipTest("Incompatible Click version") + # First, let's see if Click is crashing with a preferred ASCII charset. with patch("locale.getpreferredencoding") as gpe: gpe.return_value = "ASCII" with self.assertRaises(RuntimeError): - _unicodefun._verify_python3_env() + _unicodefun._verify_python_env() # Now, let's silence Click... black.patch_click() # ...and confirm it's silent. with patch("locale.getpreferredencoding") as gpe: gpe.return_value = "ASCII" try: - _unicodefun._verify_python3_env() + _unicodefun._verify_python_env() except RuntimeError as re: self.fail(f"`patch_click()` failed, exception still raised: {re}") @@ -1866,15 +1338,7 @@ def fail(*args: Any, **kwargs: Any) -> None: critical=fail, log=fail, ): - ff(THIS_FILE) - - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - def test_blackd_main(self) -> None: - with patch("blackd.web.run_app"): - result = CliRunner().invoke(blackd.main, []) - if result.exception is not None: - raise result.exception - self.assertEqual(result.exit_code, 0) + ff(THIS_DIR / "util.py") def test_invalid_config_return_code(self) -> None: tmp_file = Path(black.dump_to_file()) @@ -1895,6 +1359,7 @@ def test_parse_pyproject_toml(self) -> None: self.assertEqual(config["color"], True) self.assertEqual(config["line_length"], 79) self.assertEqual(config["target_version"], ["py36", "py37", "py38"]) + self.assertEqual(config["python_cell_magics"], ["custom1", "custom2"]) self.assertEqual(config["exclude"], r"\.pyi?$") self.assertEqual(config["include"], r"\.py?$") @@ -1912,6 +1377,7 @@ def test_read_pyproject_toml(self) -> None: self.assertEqual(config["exclude"], r"\.pyi?$") self.assertEqual(config["include"], r"\.py?$") + @pytest.mark.incompatible_with_mypyc def test_find_project_root(self) -> None: with TemporaryDirectory() as workspace: root = Path(workspace) @@ -1929,185 +1395,782 @@ def test_find_project_root(self) -> None: src_python.touch() self.assertEqual( - black.find_project_root((src_dir, test_dir)), root.resolve() + black.find_project_root((src_dir, test_dir)), + (root.resolve(), "pyproject.toml"), + ) + self.assertEqual( + black.find_project_root((src_dir,)), + (src_dir.resolve(), "pyproject.toml"), + ) + self.assertEqual( + black.find_project_root((src_python,)), + (src_dir.resolve(), "pyproject.toml"), + ) + + with change_directory(test_dir): + self.assertEqual( + black.find_project_root(("-",), stdin_filename="../src/a.py"), + (src_dir.resolve(), "pyproject.toml"), + ) + + @patch( + "black.files.find_user_pyproject_toml", + ) + def test_find_pyproject_toml(self, find_user_pyproject_toml: MagicMock) -> None: + find_user_pyproject_toml.side_effect = RuntimeError() + + with redirect_stderr(io.StringIO()) as stderr: + result = black.files.find_pyproject_toml( + path_search_start=(str(Path.cwd().root),) + ) + + assert result is None + err = stderr.getvalue() + assert "Ignoring user configuration" in err + + @patch( + "black.files.find_user_pyproject_toml", + black.files.find_user_pyproject_toml.__wrapped__, + ) + def test_find_user_pyproject_toml_linux(self) -> None: + if system() == "Windows": + return + + # Test if XDG_CONFIG_HOME is checked + with TemporaryDirectory() as workspace: + tmp_user_config = Path(workspace) / "black" + with patch.dict("os.environ", {"XDG_CONFIG_HOME": workspace}): + self.assertEqual( + black.files.find_user_pyproject_toml(), tmp_user_config.resolve() + ) + + # Test fallback for XDG_CONFIG_HOME + with patch.dict("os.environ"): + os.environ.pop("XDG_CONFIG_HOME", None) + fallback_user_config = Path("~/.config").expanduser() / "black" + self.assertEqual( + black.files.find_user_pyproject_toml(), fallback_user_config.resolve() + ) + + def test_find_user_pyproject_toml_windows(self) -> None: + if system() != "Windows": + return + + user_config_path = Path.home() / ".black" + self.assertEqual( + black.files.find_user_pyproject_toml(), user_config_path.resolve() + ) + + def test_bpo_33660_workaround(self) -> None: + if system() == "Windows": + return + + # https://bugs.python.org/issue33660 + root = Path("/") + with change_directory(root): + path = Path("workspace") / "project" + report = black.Report(verbose=True) + normalized_path = black.normalize_path_maybe_ignore(path, root, report) + self.assertEqual(normalized_path, "workspace/project") + + def test_normalize_path_ignore_windows_junctions_outside_of_root(self) -> None: + if system() != "Windows": + return + + with TemporaryDirectory() as workspace: + root = Path(workspace) + junction_dir = root / "junction" + junction_target_outside_of_root = root / ".." + os.system(f"mklink /J {junction_dir} {junction_target_outside_of_root}") + + report = black.Report(verbose=True) + normalized_path = black.normalize_path_maybe_ignore( + junction_dir, root, report ) - self.assertEqual(black.find_project_root((src_dir,)), src_dir.resolve()) - self.assertEqual(black.find_project_root((src_python,)), src_dir.resolve()) - - -class BlackDTestCase(AioHTTPTestCase): - async def get_application(self) -> web.Application: - return blackd.make_app() - - # TODO: remove these decorators once the below is released - # https://github.com/aio-libs/aiohttp/pull/3727 - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_request_needs_formatting(self) -> None: - response = await self.client.post("/", data=b"print('hello world')") - self.assertEqual(response.status, 200) - self.assertEqual(response.charset, "utf8") - self.assertEqual(await response.read(), b'print("hello world")\n') - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_request_no_change(self) -> None: - response = await self.client.post("/", data=b'print("hello world")\n') - self.assertEqual(response.status, 204) - self.assertEqual(await response.read(), b"") - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_request_syntax_error(self) -> None: - response = await self.client.post("/", data=b"what even ( is") - self.assertEqual(response.status, 400) - content = await response.text() - self.assertTrue( - content.startswith("Cannot parse"), - msg=f"Expected error to start with 'Cannot parse', got {repr(content)}", + # Manually delete for Python < 3.8 + os.system(f"rmdir {junction_dir}") + + self.assertEqual(normalized_path, None) + + def test_newline_comment_interaction(self) -> None: + source = "class A:\\\r\n# type: ignore\n pass\n" + output = black.format_str(source, mode=DEFAULT_MODE) + black.assert_stable(source, output, mode=DEFAULT_MODE) + + def test_bpo_2142_workaround(self) -> None: + # https://bugs.python.org/issue2142 + + source, _ = read_data("miscellaneous", "missing_final_newline") + # read_data adds a trailing newline + source = source.rstrip() + expected, _ = read_data("miscellaneous", "missing_final_newline.diff") + tmp_file = Path(black.dump_to_file(source, ensure_final_newline=False)) + diff_header = re.compile( + rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " + r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" ) + try: + result = BlackRunner().invoke(black.main, ["--diff", str(tmp_file)]) + self.assertEqual(result.exit_code, 0) + finally: + os.unlink(tmp_file) + actual = result.output + actual = diff_header.sub(DETERMINISTIC_HEADER, actual) + self.assertEqual(actual, expected) + + @staticmethod + def compare_results( + result: click.testing.Result, expected_value: str, expected_exit_code: int + ) -> None: + """Helper method to test the value and exit code of a click Result.""" + assert ( + result.output == expected_value + ), "The output did not match the expected value." + assert result.exit_code == expected_exit_code, "The exit code is incorrect." + + def test_code_option(self) -> None: + """Test the code option with no changes.""" + code = 'print("Hello world")\n' + args = ["--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, code, 0) + + def test_code_option_changed(self) -> None: + """Test the code option when changes are required.""" + code = "print('hello world')" + formatted = black.format_str(code, mode=DEFAULT_MODE) + + args = ["--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, formatted, 0) + + def test_code_option_check(self) -> None: + """Test the code option when check is passed.""" + args = ["--check", "--code", 'print("Hello world")\n'] + result = CliRunner().invoke(black.main, args) + self.compare_results(result, "", 0) + + def test_code_option_check_changed(self) -> None: + """Test the code option when changes are required, and check is passed.""" + args = ["--check", "--code", "print('hello world')"] + result = CliRunner().invoke(black.main, args) + self.compare_results(result, "", 1) + + def test_code_option_diff(self) -> None: + """Test the code option when diff is passed.""" + code = "print('hello world')" + formatted = black.format_str(code, mode=DEFAULT_MODE) + result_diff = diff(code, formatted, "STDIN", "STDOUT") + + args = ["--diff", "--code", code] + result = CliRunner().invoke(black.main, args) + + # Remove time from diff + output = DIFF_TIME.sub("", result.output) + + assert output == result_diff, "The output did not match the expected value." + assert result.exit_code == 0, "The exit code is incorrect." + + def test_code_option_color_diff(self) -> None: + """Test the code option when color and diff are passed.""" + code = "print('hello world')" + formatted = black.format_str(code, mode=DEFAULT_MODE) + + result_diff = diff(code, formatted, "STDIN", "STDOUT") + result_diff = color_diff(result_diff) + + args = ["--diff", "--color", "--code", code] + result = CliRunner().invoke(black.main, args) + + # Remove time from diff + output = DIFF_TIME.sub("", result.output) + + assert output == result_diff, "The output did not match the expected value." + assert result.exit_code == 0, "The exit code is incorrect." + + @pytest.mark.incompatible_with_mypyc + def test_code_option_safe(self) -> None: + """Test that the code option throws an error when the sanity checks fail.""" + # Patch black.assert_equivalent to ensure the sanity checks fail + with patch.object(black, "assert_equivalent", side_effect=AssertionError): + code = 'print("Hello world")' + error_msg = f"{code}\nerror: cannot format : \n" + + args = ["--safe", "--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, error_msg, 123) + + def test_code_option_fast(self) -> None: + """Test that the code option ignores errors when the sanity checks fail.""" + # Patch black.assert_equivalent to ensure the sanity checks fail + with patch.object(black, "assert_equivalent", side_effect=AssertionError): + code = 'print("Hello world")' + formatted = black.format_str(code, mode=DEFAULT_MODE) + + args = ["--fast", "--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, formatted, 0) + + @pytest.mark.incompatible_with_mypyc + def test_code_option_config(self) -> None: + """ + Test that the code option finds the pyproject.toml in the current directory. + """ + with patch.object(black, "parse_pyproject_toml", return_value={}) as parse: + args = ["--code", "print"] + # This is the only directory known to contain a pyproject.toml + with change_directory(PROJECT_ROOT): + CliRunner().invoke(black.main, args) + pyproject_path = Path(Path.cwd(), "pyproject.toml").resolve() + + assert ( + len(parse.mock_calls) >= 1 + ), "Expected config parse to be called with the current directory." + + _, call_args, _ = parse.mock_calls[0] + assert ( + call_args[0].lower() == str(pyproject_path).lower() + ), "Incorrect config loaded." + + @pytest.mark.incompatible_with_mypyc + def test_code_option_parent_config(self) -> None: + """ + Test that the code option finds the pyproject.toml in the parent directory. + """ + with patch.object(black, "parse_pyproject_toml", return_value={}) as parse: + with change_directory(THIS_DIR): + args = ["--code", "print"] + CliRunner().invoke(black.main, args) + + pyproject_path = Path(Path().cwd().parent, "pyproject.toml").resolve() + assert ( + len(parse.mock_calls) >= 1 + ), "Expected config parse to be called with the current directory." + + _, call_args, _ = parse.mock_calls[0] + assert ( + call_args[0].lower() == str(pyproject_path).lower() + ), "Incorrect config loaded." + + def test_for_handled_unexpected_eof_error(self) -> None: + """ + Test that an unexpected EOF SyntaxError is nicely presented. + """ + with pytest.raises(black.parsing.InvalidInput) as exc_info: + black.lib2to3_parse("print(", {}) + + exc_info.match("Cannot parse: 2:0: EOF in multi-line statement") + + def test_equivalency_ast_parse_failure_includes_error(self) -> None: + with pytest.raises(AssertionError) as err: + black.assert_equivalent("a«»a = 1", "a«»a = 1") + + err.match("--safe") + # Unfortunately the SyntaxError message has changed in newer versions so we + # can't match it directly. + err.match("invalid character") + err.match(r"\(, line 1\)") + + +class TestCaching: + def test_get_cache_dir( + self, + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + # Create multiple cache directories + workspace1 = tmp_path / "ws1" + workspace1.mkdir() + workspace2 = tmp_path / "ws2" + workspace2.mkdir() + + # Force user_cache_dir to use the temporary directory for easier assertions + patch_user_cache_dir = patch( + target="black.cache.user_cache_dir", + autospec=True, + return_value=str(workspace1), + ) + + # If BLACK_CACHE_DIR is not set, use user_cache_dir + monkeypatch.delenv("BLACK_CACHE_DIR", raising=False) + with patch_user_cache_dir: + assert get_cache_dir() == workspace1 + + # If it is set, use the path provided in the env var. + monkeypatch.setenv("BLACK_CACHE_DIR", str(workspace2)) + assert get_cache_dir() == workspace2 + + def test_cache_broken_file(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + cache_file = get_cache_file(mode) + cache_file.write_text("this is not a pickle") + assert black.read_cache(mode) == {} + src = (workspace / "test.py").resolve() + src.write_text("print('hello')") + invokeBlack([str(src)]) + cache = black.read_cache(mode) + assert str(src) in cache + + def test_cache_single_file_already_cached(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + src = (workspace / "test.py").resolve() + src.write_text("print('hello')") + black.write_cache({}, [src], mode) + invokeBlack([str(src)]) + assert src.read_text() == "print('hello')" + + @event_loop() + def test_cache_multiple_files(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace, patch( + "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor + ): + one = (workspace / "one.py").resolve() + with one.open("w") as fobj: + fobj.write("print('hello')") + two = (workspace / "two.py").resolve() + with two.open("w") as fobj: + fobj.write("print('hello')") + black.write_cache({}, [one], mode) + invokeBlack([str(workspace)]) + with one.open("r") as fobj: + assert fobj.read() == "print('hello')" + with two.open("r") as fobj: + assert fobj.read() == 'print("hello")\n' + cache = black.read_cache(mode) + assert str(one) in cache + assert str(two) in cache + + @pytest.mark.parametrize("color", [False, True], ids=["no-color", "with-color"]) + def test_no_cache_when_writeback_diff(self, color: bool) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + src = (workspace / "test.py").resolve() + with src.open("w") as fobj: + fobj.write("print('hello')") + with patch("black.read_cache") as read_cache, patch( + "black.write_cache" + ) as write_cache: + cmd = [str(src), "--diff"] + if color: + cmd.append("--color") + invokeBlack(cmd) + cache_file = get_cache_file(mode) + assert cache_file.exists() is False + write_cache.assert_not_called() + read_cache.assert_not_called() + + @pytest.mark.parametrize("color", [False, True], ids=["no-color", "with-color"]) + @event_loop() + def test_output_locking_when_writeback_diff(self, color: bool) -> None: + with cache_dir() as workspace: + for tag in range(0, 4): + src = (workspace / f"test{tag}.py").resolve() + with src.open("w") as fobj: + fobj.write("print('hello')") + with patch( + "black.concurrency.Manager", wraps=multiprocessing.Manager + ) as mgr: + cmd = ["--diff", str(workspace)] + if color: + cmd.append("--color") + invokeBlack(cmd, exit_code=0) + # this isn't quite doing what we want, but if it _isn't_ + # called then we cannot be using the lock it provides + mgr.assert_called() + + def test_no_cache_when_stdin(self) -> None: + mode = DEFAULT_MODE + with cache_dir(): + result = CliRunner().invoke( + black.main, ["-"], input=BytesIO(b"print('hello')") + ) + assert not result.exit_code + cache_file = get_cache_file(mode) + assert not cache_file.exists() + + def test_read_cache_no_cachefile(self) -> None: + mode = DEFAULT_MODE + with cache_dir(): + assert black.read_cache(mode) == {} + + def test_write_cache_read_cache(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + src = (workspace / "test.py").resolve() + src.touch() + black.write_cache({}, [src], mode) + cache = black.read_cache(mode) + assert str(src) in cache + assert cache[str(src)] == black.get_cache_info(src) + + def test_filter_cached(self) -> None: + with TemporaryDirectory() as workspace: + path = Path(workspace) + uncached = (path / "uncached").resolve() + cached = (path / "cached").resolve() + cached_but_changed = (path / "changed").resolve() + uncached.touch() + cached.touch() + cached_but_changed.touch() + cache = { + str(cached): black.get_cache_info(cached), + str(cached_but_changed): (0.0, 0), + } + todo, done = black.cache.filter_cached( + cache, {uncached, cached, cached_but_changed} + ) + assert todo == {uncached, cached_but_changed} + assert done == {cached} + + def test_write_cache_creates_directory_if_needed(self) -> None: + mode = DEFAULT_MODE + with cache_dir(exists=False) as workspace: + assert not workspace.exists() + black.write_cache({}, [], mode) + assert workspace.exists() + + @event_loop() + def test_failed_formatting_does_not_get_cached(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace, patch( + "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor + ): + failing = (workspace / "failing.py").resolve() + with failing.open("w") as fobj: + fobj.write("not actually python") + clean = (workspace / "clean.py").resolve() + with clean.open("w") as fobj: + fobj.write('print("hello")\n') + invokeBlack([str(workspace)], exit_code=123) + cache = black.read_cache(mode) + assert str(failing) not in cache + assert str(clean) in cache + + def test_write_cache_write_fail(self) -> None: + mode = DEFAULT_MODE + with cache_dir(), patch.object(Path, "open") as mock: + mock.side_effect = OSError + black.write_cache({}, [], mode) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_unsupported_version(self) -> None: - response = await self.client.post( - "/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "2"} + def test_read_cache_line_lengths(self) -> None: + mode = DEFAULT_MODE + short_mode = replace(DEFAULT_MODE, line_length=1) + with cache_dir() as workspace: + path = (workspace / "file.py").resolve() + path.touch() + black.write_cache({}, [path], mode) + one = black.read_cache(mode) + assert str(path) in one + two = black.read_cache(short_mode) + assert str(path) not in two + + +def assert_collected_sources( + src: Sequence[Union[str, Path]], + expected: Sequence[Union[str, Path]], + *, + ctx: Optional[FakeContext] = None, + exclude: Optional[str] = None, + include: Optional[str] = None, + extend_exclude: Optional[str] = None, + force_exclude: Optional[str] = None, + stdin_filename: Optional[str] = None, +) -> None: + gs_src = tuple(str(Path(s)) for s in src) + gs_expected = [Path(s) for s in expected] + gs_exclude = None if exclude is None else compile_pattern(exclude) + gs_include = DEFAULT_INCLUDE if include is None else compile_pattern(include) + gs_extend_exclude = ( + None if extend_exclude is None else compile_pattern(extend_exclude) + ) + gs_force_exclude = None if force_exclude is None else compile_pattern(force_exclude) + collected = black.get_sources( + ctx=ctx or FakeContext(), + src=gs_src, + quiet=False, + verbose=False, + include=gs_include, + exclude=gs_exclude, + extend_exclude=gs_extend_exclude, + force_exclude=gs_force_exclude, + report=black.Report(), + stdin_filename=stdin_filename, + ) + assert sorted(collected) == sorted(gs_expected) + + +class TestFileCollection: + def test_include_exclude(self) -> None: + path = THIS_DIR / "data" / "include_exclude_tests" + src = [path] + expected = [ + Path(path / "b/dont_exclude/a.py"), + Path(path / "b/dont_exclude/a.pyi"), + ] + assert_collected_sources( + src, + expected, + include=r"\.pyi?$", + exclude=r"/exclude/|/\.definitely_exclude/", ) - self.assertEqual(response.status, 501) - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_supported_version(self) -> None: - response = await self.client.post( - "/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "1"} + + def test_gitignore_used_as_default(self) -> None: + base = Path(DATA_DIR / "include_exclude_tests") + expected = [ + base / "b/.definitely_exclude/a.py", + base / "b/.definitely_exclude/a.pyi", + ] + src = [base / "b/"] + ctx = FakeContext() + ctx.obj["root"] = base + assert_collected_sources(src, expected, ctx=ctx, extend_exclude=r"/exclude/") + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_exclude_for_issue_1572(self) -> None: + # Exclude shouldn't touch files that were explicitly given to Black through the + # CLI. Exclude is supposed to only apply to the recursive discovery of files. + # https://github.com/psf/black/issues/1572 + path = DATA_DIR / "include_exclude_tests" + src = [path / "b/exclude/a.py"] + expected = [path / "b/exclude/a.py"] + assert_collected_sources(src, expected, include="", exclude=r"/exclude/|a\.py") + + def test_gitignore_exclude(self) -> None: + path = THIS_DIR / "data" / "include_exclude_tests" + include = re.compile(r"\.pyi?$") + exclude = re.compile(r"") + report = black.Report() + gitignore = PathSpec.from_lines( + "gitwildmatch", ["exclude/", ".definitely_exclude"] ) - self.assertEqual(response.status, 200) - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_invalid_python_variant(self) -> None: - async def check(header_value: str, expected_status: int = 400) -> None: - response = await self.client.post( - "/", data=b"what", headers={blackd.PYTHON_VARIANT_HEADER: header_value} + sources: List[Path] = [] + expected = [ + Path(path / "b/dont_exclude/a.py"), + Path(path / "b/dont_exclude/a.pyi"), + ] + this_abs = THIS_DIR.resolve() + sources.extend( + black.gen_python_files( + path.iterdir(), + this_abs, + include, + exclude, + None, + None, + report, + gitignore, + verbose=False, + quiet=False, ) - self.assertEqual(response.status, expected_status) - - await check("lol") - await check("ruby3.5") - await check("pyi3.6") - await check("py1.5") - await check("2.8") - await check("py2.8") - await check("3.0") - await check("pypy3.0") - await check("jython3.4") - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_pyi(self) -> None: - source, expected = read_data("stub.pyi") - response = await self.client.post( - "/", data=source, headers={blackd.PYTHON_VARIANT_HEADER: "pyi"} ) - self.assertEqual(response.status, 200) - self.assertEqual(await response.text(), expected) + assert sorted(expected) == sorted(sources) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_diff(self) -> None: - diff_header = re.compile( - r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + def test_nested_gitignore(self) -> None: + path = Path(THIS_DIR / "data" / "nested_gitignore_tests") + include = re.compile(r"\.pyi?$") + exclude = re.compile(r"") + root_gitignore = black.files.get_gitignore(path) + report = black.Report() + expected: List[Path] = [ + Path(path / "x.py"), + Path(path / "root/b.py"), + Path(path / "root/c.py"), + Path(path / "root/child/c.py"), + ] + this_abs = THIS_DIR.resolve() + sources = list( + black.gen_python_files( + path.iterdir(), + this_abs, + include, + exclude, + None, + None, + report, + root_gitignore, + verbose=False, + quiet=False, + ) + ) + assert sorted(expected) == sorted(sources) + + def test_nested_gitignore_directly_in_source_directory(self) -> None: + # https://github.com/psf/black/issues/2598 + path = Path(DATA_DIR / "nested_gitignore_tests") + src = Path(path / "root" / "child") + expected = [src / "a.py", src / "c.py"] + assert_collected_sources([src], expected) + + def test_invalid_gitignore(self) -> None: + path = THIS_DIR / "data" / "invalid_gitignore_tests" + empty_config = path / "pyproject.toml" + result = BlackRunner().invoke( + black.main, ["--verbose", "--config", str(empty_config), str(path)] ) + assert result.exit_code == 1 + assert result.stderr_bytes is not None - source, _ = read_data("blackd_diff.py") - expected, _ = read_data("blackd_diff.diff") + gitignore = path / ".gitignore" + assert f"Could not parse {gitignore}" in result.stderr_bytes.decode() - response = await self.client.post( - "/", data=source, headers={blackd.DIFF_HEADER: "true"} + def test_invalid_nested_gitignore(self) -> None: + path = THIS_DIR / "data" / "invalid_nested_gitignore_tests" + empty_config = path / "pyproject.toml" + result = BlackRunner().invoke( + black.main, ["--verbose", "--config", str(empty_config), str(path)] ) - self.assertEqual(response.status, 200) + assert result.exit_code == 1 + assert result.stderr_bytes is not None - actual = await response.text() - actual = diff_header.sub(DETERMINISTIC_HEADER, actual) - self.assertEqual(actual, expected) + gitignore = path / "a" / ".gitignore" + assert f"Could not parse {gitignore}" in result.stderr_bytes.decode() + + def test_empty_include(self) -> None: + path = DATA_DIR / "include_exclude_tests" + src = [path] + expected = [ + Path(path / "b/exclude/a.pie"), + Path(path / "b/exclude/a.py"), + Path(path / "b/exclude/a.pyi"), + Path(path / "b/dont_exclude/a.pie"), + Path(path / "b/dont_exclude/a.py"), + Path(path / "b/dont_exclude/a.pyi"), + Path(path / "b/.definitely_exclude/a.pie"), + Path(path / "b/.definitely_exclude/a.py"), + Path(path / "b/.definitely_exclude/a.pyi"), + Path(path / ".gitignore"), + Path(path / "pyproject.toml"), + ] + # Setting exclude explicitly to an empty string to block .gitignore usage. + assert_collected_sources(src, expected, include="", exclude="") - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_python_variant(self) -> None: - code = ( - "def f(\n" - " and_has_a_bunch_of,\n" - " very_long_arguments_too,\n" - " and_lots_of_them_as_well_lol,\n" - " **and_very_long_keyword_arguments\n" - "):\n" - " pass\n" + def test_extend_exclude(self) -> None: + path = DATA_DIR / "include_exclude_tests" + src = [path] + expected = [ + Path(path / "b/exclude/a.py"), + Path(path / "b/dont_exclude/a.py"), + ] + assert_collected_sources( + src, expected, exclude=r"\.pyi$", extend_exclude=r"\.definitely_exclude" ) - async def check(header_value: str, expected_status: int) -> None: - response = await self.client.post( - "/", data=code, headers={blackd.PYTHON_VARIANT_HEADER: header_value} - ) - self.assertEqual( - response.status, expected_status, msg=await response.text() + @pytest.mark.incompatible_with_mypyc + def test_symlink_out_of_root_directory(self) -> None: + path = MagicMock() + root = THIS_DIR.resolve() + child = MagicMock() + include = re.compile(black.DEFAULT_INCLUDES) + exclude = re.compile(black.DEFAULT_EXCLUDES) + report = black.Report() + gitignore = PathSpec.from_lines("gitwildmatch", []) + # `child` should behave like a symlink which resolved path is clearly + # outside of the `root` directory. + path.iterdir.return_value = [child] + child.resolve.return_value = Path("/a/b/c") + child.as_posix.return_value = "/a/b/c" + try: + list( + black.gen_python_files( + path.iterdir(), + root, + include, + exclude, + None, + None, + report, + gitignore, + verbose=False, + quiet=False, + ) ) + except ValueError as ve: + pytest.fail(f"`get_python_files_in_dir()` failed: {ve}") + path.iterdir.assert_called_once() + child.resolve.assert_called_once() - await check("3.6", 200) - await check("py3.6", 200) - await check("3.6,3.7", 200) - await check("3.6,py3.7", 200) - await check("py36,py37", 200) - await check("36", 200) - await check("3.6.4", 200) - - await check("2", 204) - await check("2.7", 204) - await check("py2.7", 204) - await check("3.4", 204) - await check("py3.4", 204) - await check("py34,py36", 204) - await check("34", 204) - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_line_length(self) -> None: - response = await self.client.post( - "/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "7"} + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin(self) -> None: + src = ["-"] + expected = ["-"] + assert_collected_sources(src, expected, include="", exclude=r"/exclude/|a\.py") + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename(self) -> None: + src = ["-"] + stdin_filename = str(THIS_DIR / "data/collections.py") + expected = [f"__BLACK_STDIN_FILENAME__{stdin_filename}"] + assert_collected_sources( + src, + expected, + exclude=r"/exclude/a\.py", + stdin_filename=stdin_filename, ) - self.assertEqual(response.status, 200) - - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_invalid_line_length(self) -> None: - response = await self.client.post( - "/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "NaN"} + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename_and_exclude(self) -> None: + # Exclude shouldn't exclude stdin_filename since it is mimicking the + # file being passed directly. This is the same as + # test_exclude_for_issue_1572 + path = DATA_DIR / "include_exclude_tests" + src = ["-"] + stdin_filename = str(path / "b/exclude/a.py") + expected = [f"__BLACK_STDIN_FILENAME__{stdin_filename}"] + assert_collected_sources( + src, + expected, + exclude=r"/exclude/|a\.py", + stdin_filename=stdin_filename, ) - self.assertEqual(response.status, 400) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") - @unittest_run_loop - async def test_blackd_response_black_version_header(self) -> None: - response = await self.client.post("/") - self.assertIsNotNone(response.headers.get(blackd.BLACK_VERSION_HEADER)) + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename_and_extend_exclude(self) -> None: + # Extend exclude shouldn't exclude stdin_filename since it is mimicking the + # file being passed directly. This is the same as + # test_exclude_for_issue_1572 + src = ["-"] + path = THIS_DIR / "data" / "include_exclude_tests" + stdin_filename = str(path / "b/exclude/a.py") + expected = [f"__BLACK_STDIN_FILENAME__{stdin_filename}"] + assert_collected_sources( + src, + expected, + extend_exclude=r"/exclude/|a\.py", + stdin_filename=stdin_filename, + ) + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename_and_force_exclude(self) -> None: + # Force exclude should exclude the file when passing it through + # stdin_filename + path = THIS_DIR / "data" / "include_exclude_tests" + stdin_filename = str(path / "b/exclude/a.py") + assert_collected_sources( + src=["-"], + expected=[], + force_exclude=r"/exclude/|a\.py", + stdin_filename=stdin_filename, + ) -with open(black.__file__, "r", encoding="utf-8") as _bf: - black_source_lines = _bf.readlines() +try: + with open(black.__file__, "r", encoding="utf-8") as _bf: + black_source_lines = _bf.readlines() +except UnicodeDecodeError: + if not black.COMPILED: + raise -def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable: + +def tracefunc( + frame: types.FrameType, event: str, arg: Any +) -> Callable[[types.FrameType, str, Any], Any]: """Show function calls `from black/__init__.py` as they happen. Register this with `sys.settrace()` in a test you're debugging. @@ -2127,7 +2190,3 @@ def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable: if "black/__init__.py" in filename: print(f"{' ' * stack}{lineno}:{funcname}") return tracefunc - - -if __name__ == "__main__": - unittest.main(module="test_black") diff --git a/tests/test_blackd.py b/tests/test_blackd.py new file mode 100644 index 00000000000..db9a1652f8c --- /dev/null +++ b/tests/test_blackd.py @@ -0,0 +1,228 @@ +import re +from typing import TYPE_CHECKING, Any, Callable, TypeVar +from unittest.mock import patch + +import pytest +from click.testing import CliRunner + +from tests.util import DETERMINISTIC_HEADER, read_data + +try: + from aiohttp import web + from aiohttp.test_utils import AioHTTPTestCase + + import blackd +except ImportError as e: + raise RuntimeError("Please install Black with the 'd' extra") from e + +if TYPE_CHECKING: + F = TypeVar("F", bound=Callable[..., Any]) + + unittest_run_loop: Callable[[F], F] = lambda x: x +else: + try: + from aiohttp.test_utils import unittest_run_loop + except ImportError: + # unittest_run_loop is unnecessary and a no-op since aiohttp 3.8, and + # aiohttp 4 removed it. To maintain compatibility we can make our own + # no-op decorator. + def unittest_run_loop(func, *args, **kwargs): + return func + + +@pytest.mark.blackd +class BlackDTestCase(AioHTTPTestCase): # type: ignore[misc] + def test_blackd_main(self) -> None: + with patch("blackd.web.run_app"): + result = CliRunner().invoke(blackd.main, []) + if result.exception is not None: + raise result.exception + self.assertEqual(result.exit_code, 0) + + async def get_application(self) -> web.Application: + return blackd.make_app() + + @unittest_run_loop + async def test_blackd_request_needs_formatting(self) -> None: + response = await self.client.post("/", data=b"print('hello world')") + self.assertEqual(response.status, 200) + self.assertEqual(response.charset, "utf8") + self.assertEqual(await response.read(), b'print("hello world")\n') + + @unittest_run_loop + async def test_blackd_request_no_change(self) -> None: + response = await self.client.post("/", data=b'print("hello world")\n') + self.assertEqual(response.status, 204) + self.assertEqual(await response.read(), b"") + + @unittest_run_loop + async def test_blackd_request_syntax_error(self) -> None: + response = await self.client.post("/", data=b"what even ( is") + self.assertEqual(response.status, 400) + content = await response.text() + self.assertTrue( + content.startswith("Cannot parse"), + msg=f"Expected error to start with 'Cannot parse', got {repr(content)}", + ) + + @unittest_run_loop + async def test_blackd_unsupported_version(self) -> None: + response = await self.client.post( + "/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "2"} + ) + self.assertEqual(response.status, 501) + + @unittest_run_loop + async def test_blackd_supported_version(self) -> None: + response = await self.client.post( + "/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "1"} + ) + self.assertEqual(response.status, 200) + + @unittest_run_loop + async def test_blackd_invalid_python_variant(self) -> None: + async def check(header_value: str, expected_status: int = 400) -> None: + response = await self.client.post( + "/", + data=b"what", + headers={blackd.PYTHON_VARIANT_HEADER: header_value}, + ) + self.assertEqual(response.status, expected_status) + + await check("lol") + await check("ruby3.5") + await check("pyi3.6") + await check("py1.5") + await check("2") + await check("2.7") + await check("py2.7") + await check("2.8") + await check("py2.8") + await check("3.0") + await check("pypy3.0") + await check("jython3.4") + + @unittest_run_loop + async def test_blackd_pyi(self) -> None: + source, expected = read_data("miscellaneous", "stub.pyi") + response = await self.client.post( + "/", data=source, headers={blackd.PYTHON_VARIANT_HEADER: "pyi"} + ) + self.assertEqual(response.status, 200) + self.assertEqual(await response.text(), expected) + + @unittest_run_loop + async def test_blackd_diff(self) -> None: + diff_header = re.compile( + r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + ) + + source, _ = read_data("miscellaneous", "blackd_diff") + expected, _ = read_data("miscellaneous", "blackd_diff.diff") + + response = await self.client.post( + "/", data=source, headers={blackd.DIFF_HEADER: "true"} + ) + self.assertEqual(response.status, 200) + + actual = await response.text() + actual = diff_header.sub(DETERMINISTIC_HEADER, actual) + self.assertEqual(actual, expected) + + @unittest_run_loop + async def test_blackd_python_variant(self) -> None: + code = ( + "def f(\n" + " and_has_a_bunch_of,\n" + " very_long_arguments_too,\n" + " and_lots_of_them_as_well_lol,\n" + " **and_very_long_keyword_arguments\n" + "):\n" + " pass\n" + ) + + async def check(header_value: str, expected_status: int) -> None: + response = await self.client.post( + "/", data=code, headers={blackd.PYTHON_VARIANT_HEADER: header_value} + ) + self.assertEqual( + response.status, expected_status, msg=await response.text() + ) + + await check("3.6", 200) + await check("py3.6", 200) + await check("3.6,3.7", 200) + await check("3.6,py3.7", 200) + await check("py36,py37", 200) + await check("36", 200) + await check("3.6.4", 200) + await check("3.4", 204) + await check("py3.4", 204) + await check("py34,py36", 204) + await check("34", 204) + + @unittest_run_loop + async def test_blackd_line_length(self) -> None: + response = await self.client.post( + "/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "7"} + ) + self.assertEqual(response.status, 200) + + @unittest_run_loop + async def test_blackd_invalid_line_length(self) -> None: + response = await self.client.post( + "/", + data=b'print("hello")\n', + headers={blackd.LINE_LENGTH_HEADER: "NaN"}, + ) + self.assertEqual(response.status, 400) + + @unittest_run_loop + async def test_blackd_preview(self) -> None: + response = await self.client.post( + "/", data=b'print("hello")\n', headers={blackd.PREVIEW: "true"} + ) + self.assertEqual(response.status, 204) + + @unittest_run_loop + async def test_blackd_response_black_version_header(self) -> None: + response = await self.client.post("/") + self.assertIsNotNone(response.headers.get(blackd.BLACK_VERSION_HEADER)) + + @unittest_run_loop + async def test_cors_preflight(self) -> None: + response = await self.client.options( + "/", + headers={ + "Access-Control-Request-Method": "POST", + "Origin": "*", + "Access-Control-Request-Headers": "Content-Type", + }, + ) + self.assertEqual(response.status, 200) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Origin")) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Headers")) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Methods")) + + @unittest_run_loop + async def test_cors_headers_present(self) -> None: + response = await self.client.post("/", headers={"Origin": "*"}) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Origin")) + self.assertIsNotNone(response.headers.get("Access-Control-Expose-Headers")) + + @unittest_run_loop + async def test_preserves_line_endings(self) -> None: + for data in (b"c\r\nc\r\n", b"l\nl\n"): + # test preserved newlines when reformatted + response = await self.client.post("/", data=data + b" ") + self.assertEqual(await response.text(), data.decode()) + # test 204 when no change + response = await self.client.post("/", data=data) + self.assertEqual(response.status, 204) + + @unittest_run_loop + async def test_normalizes_line_endings(self) -> None: + for data, expected in ((b"c\r\nc\n", "c\r\nc\r\n"), (b"l\nl\r\n", "l\nl\n")): + response = await self.client.post("/", data=data) + self.assertEqual(await response.text(), expected) + self.assertEqual(response.status, 200) diff --git a/tests/test_format.py b/tests/test_format.py new file mode 100644 index 00000000000..01cd61eef63 --- /dev/null +++ b/tests/test_format.py @@ -0,0 +1,177 @@ +from dataclasses import replace +from typing import Any, Iterator +from unittest.mock import patch + +import pytest + +import black +from tests.util import ( + DEFAULT_MODE, + PY36_VERSIONS, + all_data_cases, + assert_format, + dump_to_stderr, + read_data, +) + + +@pytest.fixture(autouse=True) +def patch_dump_to_file(request: Any) -> Iterator[None]: + with patch("black.dump_to_file", dump_to_stderr): + yield + + +def check_file( + subdir: str, filename: str, mode: black.Mode, *, data: bool = True +) -> None: + source, expected = read_data(subdir, filename, data=data) + assert_format(source, expected, mode, fast=False) + + +@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning") +@pytest.mark.parametrize("filename", all_data_cases("simple_cases")) +def test_simple_format(filename: str) -> None: + check_file("simple_cases", filename, DEFAULT_MODE) + + +@pytest.mark.parametrize("filename", all_data_cases("preview")) +def test_preview_format(filename: str) -> None: + magic_trailing_comma = filename != "skip_magic_trailing_comma" + check_file( + "preview", + filename, + black.Mode(preview=True, magic_trailing_comma=magic_trailing_comma), + ) + + +@pytest.mark.parametrize("filename", all_data_cases("preview_39")) +def test_preview_minimum_python_39_format(filename: str) -> None: + source, expected = read_data("preview_39", filename) + mode = black.Mode(preview=True) + assert_format(source, expected, mode, minimum_version=(3, 9)) + + +@pytest.mark.parametrize("filename", all_data_cases("preview_310")) +def test_preview_minimum_python_310_format(filename: str) -> None: + source, expected = read_data("preview_310", filename) + mode = black.Mode(preview=True) + assert_format(source, expected, mode, minimum_version=(3, 10)) + + +# =============== # +# Complex cases +# ============= # + + +def test_empty() -> None: + source = expected = "" + assert_format(source, expected) + + +@pytest.mark.parametrize("filename", all_data_cases("py_36")) +def test_python_36(filename: str) -> None: + source, expected = read_data("py_36", filename) + mode = black.Mode(target_versions=PY36_VERSIONS) + assert_format(source, expected, mode, minimum_version=(3, 6)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_37")) +def test_python_37(filename: str) -> None: + source, expected = read_data("py_37", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY37}) + assert_format(source, expected, mode, minimum_version=(3, 7)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_38")) +def test_python_38(filename: str) -> None: + source, expected = read_data("py_38", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY38}) + assert_format(source, expected, mode, minimum_version=(3, 8)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_39")) +def test_python_39(filename: str) -> None: + source, expected = read_data("py_39", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY39}) + assert_format(source, expected, mode, minimum_version=(3, 9)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_310")) +def test_python_310(filename: str) -> None: + source, expected = read_data("py_310", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY310}) + assert_format(source, expected, mode, minimum_version=(3, 10)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_310")) +def test_python_310_without_target_version(filename: str) -> None: + source, expected = read_data("py_310", filename) + mode = black.Mode() + assert_format(source, expected, mode, minimum_version=(3, 10)) + + +def test_patma_invalid() -> None: + source, expected = read_data("miscellaneous", "pattern_matching_invalid") + mode = black.Mode(target_versions={black.TargetVersion.PY310}) + with pytest.raises(black.parsing.InvalidInput) as exc_info: + assert_format(source, expected, mode, minimum_version=(3, 10)) + + exc_info.match("Cannot parse: 10:11") + + +@pytest.mark.parametrize("filename", all_data_cases("py_311")) +def test_python_311(filename: str) -> None: + source, expected = read_data("py_311", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY311}) + assert_format(source, expected, mode, minimum_version=(3, 11)) + + +@pytest.mark.parametrize("filename", all_data_cases("fast")) +def test_fast_cases(filename: str) -> None: + source, expected = read_data("fast", filename) + assert_format(source, expected, fast=True) + + +def test_python_2_hint() -> None: + with pytest.raises(black.parsing.InvalidInput) as exc_info: + assert_format("print 'daylily'", "print 'daylily'") + exc_info.match(black.parsing.PY2_HINT) + + +@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning") +def test_docstring_no_string_normalization() -> None: + """Like test_docstring but with string normalization off.""" + source, expected = read_data("miscellaneous", "docstring_no_string_normalization") + mode = replace(DEFAULT_MODE, string_normalization=False) + assert_format(source, expected, mode) + + +def test_preview_docstring_no_string_normalization() -> None: + """ + Like test_docstring but with string normalization off *and* the preview style + enabled. + """ + source, expected = read_data( + "miscellaneous", "docstring_preview_no_string_normalization" + ) + mode = replace(DEFAULT_MODE, string_normalization=False, preview=True) + assert_format(source, expected, mode) + + +def test_long_strings_flag_disabled() -> None: + """Tests for turning off the string processing logic.""" + source, expected = read_data("miscellaneous", "long_strings_flag_disabled") + mode = replace(DEFAULT_MODE, experimental_string_processing=False) + assert_format(source, expected, mode) + + +def test_stub() -> None: + mode = replace(DEFAULT_MODE, is_pyi=True) + source, expected = read_data("miscellaneous", "stub.pyi") + assert_format(source, expected, mode) + + +def test_power_op_newline() -> None: + # requires line_length=0 + source, expected = read_data("miscellaneous", "power_op_newline") + assert_format(source, expected, mode=black.Mode(line_length=0)) diff --git a/tests/test_ipynb.py b/tests/test_ipynb.py new file mode 100644 index 00000000000..7aa2e91dd00 --- /dev/null +++ b/tests/test_ipynb.py @@ -0,0 +1,524 @@ +import contextlib +import pathlib +import re +from contextlib import ExitStack as does_not_raise +from dataclasses import replace +from typing import ContextManager + +import pytest +from _pytest.monkeypatch import MonkeyPatch +from click.testing import CliRunner + +from black import ( + Mode, + NothingChanged, + format_cell, + format_file_contents, + format_file_in_place, + main, +) +from black.handle_ipynb_magics import jupyter_dependencies_are_installed +from tests.util import DATA_DIR, get_case_path, read_jupyter_notebook + +with contextlib.suppress(ModuleNotFoundError): + import IPython +pytestmark = pytest.mark.jupyter +pytest.importorskip("IPython", reason="IPython is an optional dependency") +pytest.importorskip("tokenize_rt", reason="tokenize-rt is an optional dependency") + +JUPYTER_MODE = Mode(is_ipynb=True) + +EMPTY_CONFIG = DATA_DIR / "empty_pyproject.toml" + +runner = CliRunner() + + +def test_noop() -> None: + src = 'foo = "a"' + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.parametrize("fast", [True, False]) +def test_trailing_semicolon(fast: bool) -> None: + src = 'foo = "a" ;' + result = format_cell(src, fast=fast, mode=JUPYTER_MODE) + expected = 'foo = "a";' + assert result == expected + + +def test_trailing_semicolon_with_comment() -> None: + src = 'foo = "a" ; # bar' + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = 'foo = "a"; # bar' + assert result == expected + + +def test_trailing_semicolon_with_comment_on_next_line() -> None: + src = "import black;\n\n# this is a comment" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_trailing_semicolon_indented() -> None: + src = "with foo:\n plot_bar();" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_trailing_semicolon_noop() -> None: + src = 'foo = "a";' + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.parametrize( + "mode", + [ + pytest.param(JUPYTER_MODE, id="default mode"), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"cust1", "cust1"}), + id="custom cell magics mode", + ), + ], +) +def test_cell_magic(mode: Mode) -> None: + src = "%%time\nfoo =bar" + result = format_cell(src, fast=True, mode=mode) + expected = "%%time\nfoo = bar" + assert result == expected + + +def test_cell_magic_noop() -> None: + src = "%%time\n2 + 2" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.parametrize( + "mode", + [ + pytest.param(JUPYTER_MODE, id="default mode"), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"cust1", "cust1"}), + id="custom cell magics mode", + ), + ], +) +@pytest.mark.parametrize( + "src, expected", + ( + pytest.param("ls =!ls", "ls = !ls", id="System assignment"), + pytest.param("!ls\n'foo'", '!ls\n"foo"', id="System call"), + pytest.param("!!ls\n'foo'", '!!ls\n"foo"', id="Other system call"), + pytest.param("?str\n'foo'", '?str\n"foo"', id="Help"), + pytest.param("??str\n'foo'", '??str\n"foo"', id="Other help"), + pytest.param( + "%matplotlib inline\n'foo'", + '%matplotlib inline\n"foo"', + id="Line magic with argument", + ), + pytest.param("%time\n'foo'", '%time\n"foo"', id="Line magic without argument"), + pytest.param( + "env = %env var", "env = %env var", id="Assignment to environment variable" + ), + pytest.param("env = %env", "env = %env", id="Assignment to magic"), + ), +) +def test_magic(src: str, expected: str, mode: Mode) -> None: + result = format_cell(src, fast=True, mode=mode) + assert result == expected + + +@pytest.mark.parametrize( + "src", + ( + "%%bash\n2+2", + "%%html --isolated\n2+2", + "%%writefile e.txt\n meh\n meh", + ), +) +def test_non_python_magics(src: str) -> None: + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.skipif( + IPython.version_info < (8, 3), + reason="Change in how TransformerManager transforms this input", +) +def test_set_input() -> None: + src = "a = b??" + expected = "??b" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + assert result == expected + + +def test_input_already_contains_transformed_magic() -> None: + src = '%time foo()\nget_ipython().run_cell_magic("time", "", "foo()\\n")' + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_magic_noop() -> None: + src = "ls = !ls" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_cell_magic_with_magic() -> None: + src = "%%timeit -n1\nls =!ls" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = "%%timeit -n1\nls = !ls" + assert result == expected + + +@pytest.mark.parametrize( + "mode, expected_output, expectation", + [ + pytest.param( + JUPYTER_MODE, + "%%custom_python_magic -n1 -n2\nx=2", + pytest.raises(NothingChanged), + id="No change when cell magic not registered", + ), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"cust1", "cust1"}), + "%%custom_python_magic -n1 -n2\nx=2", + pytest.raises(NothingChanged), + id="No change when other cell magics registered", + ), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"custom_python_magic", "cust1"}), + "%%custom_python_magic -n1 -n2\nx = 2", + does_not_raise(), + id="Correctly change when cell magic registered", + ), + ], +) +def test_cell_magic_with_custom_python_magic( + mode: Mode, expected_output: str, expectation: ContextManager[object] +) -> None: + with expectation: + result = format_cell( + "%%custom_python_magic -n1 -n2\nx=2", + fast=True, + mode=mode, + ) + assert result == expected_output + + +def test_cell_magic_nested() -> None: + src = "%%time\n%%time\n2+2" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = "%%time\n%%time\n2 + 2" + assert result == expected + + +def test_cell_magic_with_magic_noop() -> None: + src = "%%t -n1\nls = !ls" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_automagic() -> None: + src = "pip install black" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_multiline_magic() -> None: + src = "%time 1 + \\\n2" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_multiline_no_magic() -> None: + src = "1 + \\\n2" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = "1 + 2" + assert result == expected + + +def test_cell_magic_with_invalid_body() -> None: + src = "%%time\nif True" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_empty_cell() -> None: + src = "" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_entire_notebook_empty_metadata() -> None: + content = read_jupyter_notebook("jupyter", "notebook_empty_metadata") + result = format_file_contents(content, fast=True, mode=JUPYTER_MODE) + expected = ( + "{\n" + ' "cells": [\n' + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {\n' + ' "tags": []\n' + " },\n" + ' "outputs": [],\n' + ' "source": [\n' + ' "%%time\\n",\n' + ' "\\n",\n' + ' "print(\\"foo\\")"\n' + " ]\n" + " },\n" + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {},\n' + ' "outputs": [],\n' + ' "source": []\n' + " }\n" + " ],\n" + ' "metadata": {},\n' + ' "nbformat": 4,\n' + ' "nbformat_minor": 4\n' + "}\n" + ) + assert result == expected + + +def test_entire_notebook_trailing_newline() -> None: + content = read_jupyter_notebook("jupyter", "notebook_trailing_newline") + result = format_file_contents(content, fast=True, mode=JUPYTER_MODE) + expected = ( + "{\n" + ' "cells": [\n' + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {\n' + ' "tags": []\n' + " },\n" + ' "outputs": [],\n' + ' "source": [\n' + ' "%%time\\n",\n' + ' "\\n",\n' + ' "print(\\"foo\\")"\n' + " ]\n" + " },\n" + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {},\n' + ' "outputs": [],\n' + ' "source": []\n' + " }\n" + " ],\n" + ' "metadata": {\n' + ' "interpreter": {\n' + ' "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8"\n' # noqa:B950 + " },\n" + ' "kernelspec": {\n' + ' "display_name": "Python 3.8.10 64-bit (\'black\': venv)",\n' + ' "name": "python3"\n' + " },\n" + ' "language_info": {\n' + ' "name": "python",\n' + ' "version": ""\n' + " }\n" + " },\n" + ' "nbformat": 4,\n' + ' "nbformat_minor": 4\n' + "}\n" + ) + assert result == expected + + +def test_entire_notebook_no_trailing_newline() -> None: + content = read_jupyter_notebook("jupyter", "notebook_no_trailing_newline") + result = format_file_contents(content, fast=True, mode=JUPYTER_MODE) + expected = ( + "{\n" + ' "cells": [\n' + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {\n' + ' "tags": []\n' + " },\n" + ' "outputs": [],\n' + ' "source": [\n' + ' "%%time\\n",\n' + ' "\\n",\n' + ' "print(\\"foo\\")"\n' + " ]\n" + " },\n" + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {},\n' + ' "outputs": [],\n' + ' "source": []\n' + " }\n" + " ],\n" + ' "metadata": {\n' + ' "interpreter": {\n' + ' "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8"\n' # noqa: B950 + " },\n" + ' "kernelspec": {\n' + ' "display_name": "Python 3.8.10 64-bit (\'black\': venv)",\n' + ' "name": "python3"\n' + " },\n" + ' "language_info": {\n' + ' "name": "python",\n' + ' "version": ""\n' + " }\n" + " },\n" + ' "nbformat": 4,\n' + ' "nbformat_minor": 4\n' + "}" + ) + assert result == expected + + +def test_entire_notebook_without_changes() -> None: + content = read_jupyter_notebook("jupyter", "notebook_without_changes") + with pytest.raises(NothingChanged): + format_file_contents(content, fast=True, mode=JUPYTER_MODE) + + +def test_non_python_notebook() -> None: + content = read_jupyter_notebook("jupyter", "non_python_notebook") + + with pytest.raises(NothingChanged): + format_file_contents(content, fast=True, mode=JUPYTER_MODE) + + +def test_empty_string() -> None: + with pytest.raises(NothingChanged): + format_file_contents("", fast=True, mode=JUPYTER_MODE) + + +def test_unparseable_notebook() -> None: + path = get_case_path("jupyter", "notebook_which_cant_be_parsed.ipynb") + msg = rf"File '{re.escape(str(path))}' cannot be parsed as valid Jupyter notebook\." + with pytest.raises(ValueError, match=msg): + format_file_in_place(path, fast=True, mode=JUPYTER_MODE) + + +def test_ipynb_diff_with_change() -> None: + result = runner.invoke( + main, + [ + str(get_case_path("jupyter", "notebook_trailing_newline.ipynb")), + "--diff", + f"--config={EMPTY_CONFIG}", + ], + ) + expected = "@@ -1,3 +1,3 @@\n %%time\n \n-print('foo')\n+print(\"foo\")\n" + assert expected in result.output + + +def test_ipynb_diff_with_no_change() -> None: + result = runner.invoke( + main, + [ + str(get_case_path("jupyter", "notebook_without_changes.ipynb")), + "--diff", + f"--config={EMPTY_CONFIG}", + ], + ) + expected = "1 file would be left unchanged." + assert expected in result.output + + +def test_cache_isnt_written_if_no_jupyter_deps_single( + monkeypatch: MonkeyPatch, tmp_path: pathlib.Path +) -> None: + # Check that the cache isn't written to if Jupyter dependencies aren't installed. + jupyter_dependencies_are_installed.cache_clear() + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + monkeypatch.setattr( + "black.jupyter_dependencies_are_installed", lambda verbose, quiet: False + ) + result = runner.invoke( + main, [str(tmp_path / "notebook.ipynb"), f"--config={EMPTY_CONFIG}"] + ) + assert "No Python files are present to be formatted. Nothing to do" in result.output + jupyter_dependencies_are_installed.cache_clear() + monkeypatch.setattr( + "black.jupyter_dependencies_are_installed", lambda verbose, quiet: True + ) + result = runner.invoke( + main, [str(tmp_path / "notebook.ipynb"), f"--config={EMPTY_CONFIG}"] + ) + assert "reformatted" in result.output + + +def test_cache_isnt_written_if_no_jupyter_deps_dir( + monkeypatch: MonkeyPatch, tmp_path: pathlib.Path +) -> None: + # Check that the cache isn't written to if Jupyter dependencies aren't installed. + jupyter_dependencies_are_installed.cache_clear() + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + monkeypatch.setattr( + "black.files.jupyter_dependencies_are_installed", lambda verbose, quiet: False + ) + result = runner.invoke(main, [str(tmp_path), f"--config={EMPTY_CONFIG}"]) + assert "No Python files are present to be formatted. Nothing to do" in result.output + jupyter_dependencies_are_installed.cache_clear() + monkeypatch.setattr( + "black.files.jupyter_dependencies_are_installed", lambda verbose, quiet: True + ) + result = runner.invoke(main, [str(tmp_path), f"--config={EMPTY_CONFIG}"]) + assert "reformatted" in result.output + + +def test_ipynb_flag(tmp_path: pathlib.Path) -> None: + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.a_file_extension_which_is_definitely_not_ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + result = runner.invoke( + main, + [ + str(tmp_nb), + "--diff", + "--ipynb", + f"--config={EMPTY_CONFIG}", + ], + ) + expected = "@@ -1,3 +1,3 @@\n %%time\n \n-print('foo')\n+print(\"foo\")\n" + assert expected in result.output + + +def test_ipynb_and_pyi_flags() -> None: + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + result = runner.invoke( + main, + [ + str(nb), + "--pyi", + "--ipynb", + "--diff", + f"--config={EMPTY_CONFIG}", + ], + ) + assert isinstance(result.exception, SystemExit) + expected = "Cannot pass both `pyi` and `ipynb` flags!\n" + assert result.output == expected + + +def test_unable_to_replace_magics(monkeypatch: MonkeyPatch) -> None: + src = "%%time\na = 'foo'" + monkeypatch.setattr("black.handle_ipynb_magics.TOKEN_HEX", lambda _: "foo") + with pytest.raises( + AssertionError, match="Black was not able to replace IPython magic" + ): + format_cell(src, fast=True, mode=JUPYTER_MODE) diff --git a/tests/test_no_ipynb.py b/tests/test_no_ipynb.py new file mode 100644 index 00000000000..3e0b1593bf0 --- /dev/null +++ b/tests/test_no_ipynb.py @@ -0,0 +1,37 @@ +import pathlib + +import pytest +from click.testing import CliRunner + +from black import jupyter_dependencies_are_installed, main +from tests.util import get_case_path + +pytestmark = pytest.mark.no_jupyter + +runner = CliRunner() + + +def test_ipynb_diff_with_no_change_single() -> None: + jupyter_dependencies_are_installed.cache_clear() + path = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + result = runner.invoke(main, [str(path)]) + expected_output = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``\n" + ) + assert expected_output in result.output + + +def test_ipynb_diff_with_no_change_dir(tmp_path: pathlib.Path) -> None: + jupyter_dependencies_are_installed.cache_clear() + runner = CliRunner() + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + result = runner.invoke(main, [str(tmp_path)]) + expected_output = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``\n" + ) + assert expected_output in result.output diff --git a/tests/test_primer.py b/tests/test_primer.py deleted file mode 100644 index a8ad8a7c5af..00000000000 --- a/tests/test_primer.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python3 - -import asyncio -import sys -import unittest -from contextlib import contextmanager -from copy import deepcopy -from io import StringIO -from os import getpid -from pathlib import Path -from platform import system -from subprocess import CalledProcessError -from tempfile import TemporaryDirectory, gettempdir -from typing import Any, Callable, Generator, Iterator, Tuple -from unittest.mock import Mock, patch - -from click.testing import CliRunner - -from black_primer import cli, lib - - -EXPECTED_ANALYSIS_OUTPUT = """\ --- primer results 📊 -- - -68 / 69 succeeded (98.55%) ✅ -1 / 69 FAILED (1.45%) 💩 - - 0 projects disabled by config - - 0 projects skipped due to Python version - - 0 skipped due to long checkout - -Failed projects: - -## black: - - Returned 69 - - stdout: -Black didn't work - -""" -FAKE_PROJECT_CONFIG = { - "cli_arguments": ["--unittest"], - "expect_formatting_changes": False, - "git_clone_url": "https://github.com/psf/black.git", -} - - -@contextmanager -def capture_stdout(command: Callable, *args: Any, **kwargs: Any) -> Generator: - old_stdout, sys.stdout = sys.stdout, StringIO() - try: - command(*args, **kwargs) - sys.stdout.seek(0) - yield sys.stdout.read() - finally: - sys.stdout = old_stdout - - -@contextmanager -def event_loop() -> Iterator[None]: - policy = asyncio.get_event_loop_policy() - loop = policy.new_event_loop() - asyncio.set_event_loop(loop) - if sys.platform == "win32": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - try: - yield - finally: - loop.close() - - -async def raise_subprocess_error_1(*args: Any, **kwargs: Any) -> None: - raise CalledProcessError(1, ["unittest", "error"], b"", b"") - - -async def raise_subprocess_error_123(*args: Any, **kwargs: Any) -> None: - raise CalledProcessError(123, ["unittest", "error"], b"", b"") - - -async def return_false(*args: Any, **kwargs: Any) -> bool: - return False - - -async def return_subproccess_output(*args: Any, **kwargs: Any) -> Tuple[bytes, bytes]: - return (b"stdout", b"stderr") - - -async def return_zero(*args: Any, **kwargs: Any) -> int: - return 0 - - -class PrimerLibTests(unittest.TestCase): - def test_analyze_results(self) -> None: - fake_results = lib.Results( - { - "disabled": 0, - "failed": 1, - "skipped_long_checkout": 0, - "success": 68, - "wrong_py_ver": 0, - }, - {"black": CalledProcessError(69, ["black"], b"Black didn't work", b"")}, - ) - with capture_stdout(lib.analyze_results, 69, fake_results) as analyze_stdout: - self.assertEqual(EXPECTED_ANALYSIS_OUTPUT, analyze_stdout) - - @event_loop() - def test_black_run(self) -> None: - """Pretend to run Black to ensure we cater for all scenarios""" - loop = asyncio.get_event_loop() - repo_path = Path(gettempdir()) - project_config = deepcopy(FAKE_PROJECT_CONFIG) - results = lib.Results({"failed": 0, "success": 0}, {}) - - # Test a successful Black run - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(1, results.stats["success"]) - self.assertFalse(results.failed_projects) - - # Test a fail based on expecting formatting changes but not getting any - project_config["expect_formatting_changes"] = True - results = lib.Results({"failed": 0, "success": 0}, {}) - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(1, results.stats["failed"]) - self.assertTrue(results.failed_projects) - - # Test a fail based on returning 1 and not expecting formatting changes - project_config["expect_formatting_changes"] = False - results = lib.Results({"failed": 0, "success": 0}, {}) - with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(1, results.stats["failed"]) - self.assertTrue(results.failed_projects) - - # Test a formatting error based on returning 123 - with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(2, results.stats["failed"]) - - @event_loop() - def test_gen_check_output(self) -> None: - loop = asyncio.get_event_loop() - stdout, stderr = loop.run_until_complete( - lib._gen_check_output([lib.BLACK_BINARY, "--help"]) - ) - self.assertTrue("The uncompromising code formatter" in stdout.decode("utf8")) - self.assertEqual(None, stderr) - - # TODO: Add a test to see failure works on Windows - if lib.WINDOWS: - return - - false_bin = "/usr/bin/false" if system() == "Darwin" else "/bin/false" - with self.assertRaises(CalledProcessError): - loop.run_until_complete(lib._gen_check_output([false_bin])) - - with self.assertRaises(asyncio.TimeoutError): - loop.run_until_complete( - lib._gen_check_output(["/bin/sleep", "2"], timeout=0.1) - ) - - @event_loop() - def test_git_checkout_or_rebase(self) -> None: - loop = asyncio.get_event_loop() - project_config = deepcopy(FAKE_PROJECT_CONFIG) - work_path = Path(gettempdir()) - - expected_repo_path = work_path / "black" - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - returned_repo_path = loop.run_until_complete( - lib.git_checkout_or_rebase(work_path, project_config) - ) - self.assertEqual(expected_repo_path, returned_repo_path) - - @patch("sys.stdout", new_callable=StringIO) - @event_loop() - def test_process_queue(self, mock_stdout: Mock) -> None: - loop = asyncio.get_event_loop() - config_path = Path(lib.__file__).parent / "primer.json" - with patch("black_primer.lib.git_checkout_or_rebase", return_false): - with TemporaryDirectory() as td: - return_val = loop.run_until_complete( - lib.process_queue(str(config_path), td, 2) - ) - self.assertEqual(0, return_val) - - -class PrimerCLITests(unittest.TestCase): - @event_loop() - def test_async_main(self) -> None: - loop = asyncio.get_event_loop() - work_dir = Path(gettempdir()) / f"primer_ut_{getpid()}" - args = { - "config": "/config", - "debug": False, - "keep": False, - "long_checkouts": False, - "rebase": False, - "workdir": str(work_dir), - "workers": 69, - } - with patch("black_primer.cli.lib.process_queue", return_zero): - return_val = loop.run_until_complete(cli.async_main(**args)) - self.assertEqual(0, return_val) - - def test_handle_debug(self) -> None: - self.assertTrue(cli._handle_debug(None, None, True)) - - def test_help_output(self) -> None: - runner = CliRunner() - result = runner.invoke(cli.main, ["--help"]) - self.assertEqual(result.exit_code, 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_trans.py b/tests/test_trans.py new file mode 100644 index 00000000000..dce8a939677 --- /dev/null +++ b/tests/test_trans.py @@ -0,0 +1,51 @@ +from typing import List, Tuple + +from black.trans import iter_fexpr_spans + + +def test_fexpr_spans() -> None: + def check( + string: str, expected_spans: List[Tuple[int, int]], expected_slices: List[str] + ) -> None: + spans = list(iter_fexpr_spans(string)) + + # Checking slices isn't strictly necessary, but it's easier to verify at + # a glance than only spans + assert len(spans) == len(expected_slices) + for (i, j), slice in zip(spans, expected_slices): + assert len(string[i:j]) == j - i + assert string[i:j] == slice + + assert spans == expected_spans + + # Most of these test cases omit the leading 'f' and leading / closing quotes + # for convenience + # Some additional property-based tests can be found in + # https://github.com/psf/black/pull/2654#issuecomment-981411748 + check("""{var}""", [(0, 5)], ["{var}"]) + check("""f'{var}'""", [(2, 7)], ["{var}"]) + check("""f'{1 + f() + 2 + "asdf"}'""", [(2, 24)], ["""{1 + f() + 2 + "asdf"}"""]) + check("""text {var} text""", [(5, 10)], ["{var}"]) + check("""text {{ {var} }} text""", [(8, 13)], ["{var}"]) + check("""{a} {b} {c}""", [(0, 3), (4, 7), (8, 11)], ["{a}", "{b}", "{c}"]) + check("""f'{a} {b} {c}'""", [(2, 5), (6, 9), (10, 13)], ["{a}", "{b}", "{c}"]) + check("""{ {} }""", [(0, 6)], ["{ {} }"]) + check("""{ {{}} }""", [(0, 8)], ["{ {{}} }"]) + check("""{ {{{}}} }""", [(0, 10)], ["{ {{{}}} }"]) + check("""{{ {{{}}} }}""", [(5, 7)], ["{}"]) + check("""{{ {{{var}}} }}""", [(5, 10)], ["{var}"]) + check("""{f"{0}"}""", [(0, 8)], ["""{f"{0}"}"""]) + check("""{"'"}""", [(0, 5)], ["""{"'"}"""]) + check("""{"{"}""", [(0, 5)], ["""{"{"}"""]) + check("""{"}"}""", [(0, 5)], ["""{"}"}"""]) + check("""{"{{"}""", [(0, 6)], ["""{"{{"}"""]) + check("""{''' '''}""", [(0, 9)], ["""{''' '''}"""]) + check("""{'''{'''}""", [(0, 9)], ["""{'''{'''}"""]) + check("""{''' {'{ '''}""", [(0, 13)], ["""{''' {'{ '''}"""]) + check( + '''f\'\'\'-{f"""*{f"+{f'.{x}.'}+"}*"""}-'y\\'\'\'\'''', + [(5, 33)], + ['''{f"""*{f"+{f'.{x}.'}+"}*"""}'''], + ) + check(r"""{}{""", [(0, 2)], ["{}"]) + check("""f"{'{'''''''''}\"""", [(2, 15)], ["{'{'''''''''}"]) diff --git a/tests/util.py b/tests/util.py new file mode 100644 index 00000000000..d65c2e651ae --- /dev/null +++ b/tests/util.py @@ -0,0 +1,161 @@ +import os +import sys +import unittest +from contextlib import contextmanager +from functools import partial +from pathlib import Path +from typing import Any, Iterator, List, Optional, Tuple + +import black +from black.debug import DebugVisitor +from black.mode import TargetVersion +from black.output import diff, err, out + +PYTHON_SUFFIX = ".py" +ALLOWED_SUFFIXES = (PYTHON_SUFFIX, ".pyi", ".out", ".diff", ".ipynb") + +THIS_DIR = Path(__file__).parent +DATA_DIR = THIS_DIR / "data" +PROJECT_ROOT = THIS_DIR.parent +EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)" +DETERMINISTIC_HEADER = "[Deterministic header]" + +PY36_VERSIONS = { + TargetVersion.PY36, + TargetVersion.PY37, + TargetVersion.PY38, + TargetVersion.PY39, +} + +DEFAULT_MODE = black.Mode() +ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True) +fs = partial(black.format_str, mode=DEFAULT_MODE) + + +def _assert_format_equal(expected: str, actual: str) -> None: + if actual != expected and not os.environ.get("SKIP_AST_PRINT"): + bdv: DebugVisitor[Any] + out("Expected tree:", fg="green") + try: + exp_node = black.lib2to3_parse(expected) + bdv = DebugVisitor() + list(bdv.visit(exp_node)) + except Exception as ve: + err(str(ve)) + out("Actual tree:", fg="red") + try: + exp_node = black.lib2to3_parse(actual) + bdv = DebugVisitor() + list(bdv.visit(exp_node)) + except Exception as ve: + err(str(ve)) + + if actual != expected: + out(diff(expected, actual, "expected", "actual")) + + assert actual == expected + + +def assert_format( + source: str, + expected: str, + mode: black.Mode = DEFAULT_MODE, + *, + fast: bool = False, + minimum_version: Optional[Tuple[int, int]] = None, +) -> None: + """Convenience function to check that Black formats as expected. + + You can pass @minimum_version if you're passing code with newer syntax to guard + safety guards so they don't just crash with a SyntaxError. Please note this is + separate from TargetVerson Mode configuration. + """ + actual = black.format_str(source, mode=mode) + _assert_format_equal(expected, actual) + # It's not useful to run safety checks if we're expecting no changes anyway. The + # assertion right above will raise if reality does actually make changes. This just + # avoids wasted CPU cycles. + if not fast and source != expected: + # Unfortunately the AST equivalence check relies on the built-in ast module + # being able to parse the code being formatted. This doesn't always work out + # when checking modern code on older versions. + if minimum_version is None or sys.version_info >= minimum_version: + black.assert_equivalent(source, actual) + black.assert_stable(source, actual, mode=mode) + + +def dump_to_stderr(*output: str) -> str: + return "\n" + "\n".join(output) + "\n" + + +class BlackBaseTestCase(unittest.TestCase): + def assertFormatEqual(self, expected: str, actual: str) -> None: + _assert_format_equal(expected, actual) + + +def get_base_dir(data: bool) -> Path: + return DATA_DIR if data else PROJECT_ROOT + + +def all_data_cases(subdir_name: str, data: bool = True) -> List[str]: + cases_dir = get_base_dir(data) / subdir_name + assert cases_dir.is_dir() + return [case_path.stem for case_path in cases_dir.iterdir()] + + +def get_case_path( + subdir_name: str, name: str, data: bool = True, suffix: str = PYTHON_SUFFIX +) -> Path: + """Get case path from name""" + case_path = get_base_dir(data) / subdir_name / name + if not name.endswith(ALLOWED_SUFFIXES): + case_path = case_path.with_suffix(suffix) + assert case_path.is_file(), f"{case_path} is not a file." + return case_path + + +def read_data(subdir_name: str, name: str, data: bool = True) -> Tuple[str, str]: + """read_data('test_name') -> 'input', 'output'""" + return read_data_from_file(get_case_path(subdir_name, name, data)) + + +def read_data_from_file(file_name: Path) -> Tuple[str, str]: + with open(file_name, "r", encoding="utf8") as test: + lines = test.readlines() + _input: List[str] = [] + _output: List[str] = [] + result = _input + for line in lines: + line = line.replace(EMPTY_LINE, "") + if line.rstrip() == "# output": + result = _output + continue + + result.append(line) + if _input and not _output: + # If there's no output marker, treat the entire file as already pre-formatted. + _output = _input[:] + return "".join(_input).strip() + "\n", "".join(_output).strip() + "\n" + + +def read_jupyter_notebook(subdir_name: str, name: str, data: bool = True) -> str: + return read_jupyter_notebook_from_file( + get_case_path(subdir_name, name, data, suffix=".ipynb") + ) + + +def read_jupyter_notebook_from_file(file_name: Path) -> str: + with open(file_name, mode="rb") as fd: + content_bytes = fd.read() + return content_bytes.decode() + + +@contextmanager +def change_directory(path: Path) -> Iterator[None]: + """Context manager to temporarily chdir to a different directory.""" + previous_dir = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(previous_dir) diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000000..4934514264b --- /dev/null +++ b/tox.ini @@ -0,0 +1,100 @@ +[tox] +isolated_build = true +envlist = {,ci-}py{37,38,39,310,311,py3},fuzz,run_self + +[testenv] +setenv = PYTHONPATH = {toxinidir}/src +skip_install = True +# We use `recreate=True` because otherwise, on the second run of `tox -e py`, +# the `no_jupyter` tests would run with the jupyter extra dependencies installed. +# See https://github.com/psf/black/issues/2367. +recreate = True +deps = + -r{toxinidir}/test_requirements.txt +; parallelization is disabled on CI because pytest-dev/pytest-xdist#620 occurs too frequently +; local runs can stay parallelized since they aren't rolling the dice so many times as like on CI +commands = + pip install -e .[d] + coverage erase + pytest tests --run-optional no_jupyter \ + !ci: --numprocesses auto \ + --cov {posargs} + pip install -e .[jupyter] + pytest tests --run-optional jupyter \ + -m jupyter \ + !ci: --numprocesses auto \ + --cov --cov-append {posargs} + coverage report + +[testenv:{,ci-}pypy3] +setenv = PYTHONPATH = {toxinidir}/src +skip_install = True +recreate = True +deps = + -r{toxinidir}/test_requirements.txt +; a separate worker is required in ci due to https://foss.heptapod.net/pypy/pypy/-/issues/3317 +; this seems to cause tox to wait forever +; remove this when pypy releases the bugfix +commands = + pip install -e .[d] + coverage erase + pytest tests \ + --run-optional no_jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov {posargs} + pip install -e .[jupyter] + pytest tests --run-optional jupyter \ + -m jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov --cov-append {posargs} + coverage report + +[testenv:{,ci-}311] +setenv = + PYTHONPATH = {toxinidir}/src + AIOHTTP_NO_EXTENSIONS = 1 +skip_install = True +recreate = True +deps = +; We currently need > aiohttp 3.8.1 that is on PyPI for 3.11 + git+https://github.com/aio-libs/aiohttp + -r{toxinidir}/test_requirements.txt +; a separate worker is required in ci due to https://foss.heptapod.net/pypy/pypy/-/issues/3317 +; this seems to cause tox to wait forever +; remove this when pypy releases the bugfix +commands = + pip install -e .[d] + coverage erase + pytest tests \ + --run-optional no_jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov {posargs} + pip install -e .[jupyter] + pytest tests --run-optional jupyter \ + -m jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov --cov-append {posargs} + coverage report + +[testenv:fuzz] +skip_install = True +deps = + -r{toxinidir}/test_requirements.txt + hypothesmith + lark-parser +commands = + pip install -e .[d] + coverage erase + coverage run {toxinidir}/scripts/fuzz.py + coverage report + +[testenv:run_self] +setenv = PYTHONPATH = {toxinidir}/src +skip_install = True +commands = + pip install -e .[d] + black --check {toxinidir}/src {toxinidir}/tests