From a5c42b2db40ea06424efd4b80d4c8ea11cf5bfd8 Mon Sep 17 00:00:00 2001 From: Don Jayamanne Date: Fri, 5 Jul 2024 06:52:30 +1000 Subject: [PATCH 1/7] Support category being undefined (#23745) --- .../base/locators/common/nativePythonFinder.ts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts index d7ed825728e8..bbd07c7763b4 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts @@ -49,7 +49,7 @@ export interface NativeEnvManagerInfo { export interface NativeGlobalPythonFinder extends Disposable { resolve(executable: string): Promise; refresh(): AsyncIterable; - categoryToKind(category: string): PythonEnvKind; + categoryToKind(category?: string): PythonEnvKind; } interface NativeLog { @@ -79,7 +79,10 @@ class NativeGlobalPythonFinderImpl extends DisposableBase implements NativeGloba return environment; } - categoryToKind(category: string): PythonEnvKind { + categoryToKind(category?: string): PythonEnvKind { + if (!category) { + return PythonEnvKind.Unknown; + } switch (category.toLowerCase()) { case 'conda': return PythonEnvKind.Conda; @@ -109,8 +112,6 @@ class NativeGlobalPythonFinderImpl extends DisposableBase implements NativeGloba return PythonEnvKind.VirtualEnvWrapper; case 'windowsstore': return PythonEnvKind.MicrosoftStore; - case 'unknown': - return PythonEnvKind.Unknown; default: { this.outputChannel.info(`Unknown Python Environment category '${category}' from Native Locator.`); return PythonEnvKind.Unknown; From 5470d60c33c7f16d3240941b6864103fbc50079a Mon Sep 17 00:00:00 2001 From: Don Jayamanne Date: Mon, 8 Jul 2024 19:34:36 +1000 Subject: [PATCH 2/7] Handle & track conda envs not found (#23753) --- .../locators/common/nativePythonFinder.ts | 2 + .../locators/common/nativePythonTelemetry.ts | 40 +++++++++++++++++++ src/client/telemetry/constants.ts | 1 + src/client/telemetry/index.ts | 40 +++++++++++++++++++ 4 files changed, 83 insertions(+) create mode 100644 src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts index bbd07c7763b4..029c131188fa 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts @@ -17,6 +17,7 @@ import { VENVFOLDERS_SETTING_KEY, VENVPATH_SETTING_KEY } from '../lowLevel/custo import { getUserHomeDir } from '../../../../common/utils/platform'; import { createLogOutputChannel } from '../../../../common/vscodeApis/windowApis'; import { PythonEnvKind } from '../../info'; +import { sendNativeTelemetry, NativePythonTelemetry } from './nativePythonTelemetry'; const untildify = require('untildify'); @@ -253,6 +254,7 @@ class NativeGlobalPythonFinderImpl extends DisposableBase implements NativeGloba this.outputChannel.trace(data.message); } }), + connection.onNotification('telemetry', (data: NativePythonTelemetry) => sendNativeTelemetry(data)), connection.onClose(() => { disposables.forEach((d) => d.dispose()); }), diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts new file mode 100644 index 000000000000..3634ef5008f4 --- /dev/null +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { traceError } from '../../../../logging'; +import { sendTelemetryEvent } from '../../../../telemetry'; +import { EventName } from '../../../../telemetry/constants'; + +export type NativePythonTelemetry = MissingCondaEnvironments; + +export type MissingCondaEnvironments = { + event: 'MissingCondaEnvironments'; + data: { + missing: number; + userProvidedCondaExe?: boolean; + rootPrefixNotFound?: boolean; + condaPrefixNotFound?: boolean; + condaManagerNotFound?: boolean; + sysRcNotFound?: boolean; + userRcNotFound?: boolean; + otherRcNotFound?: boolean; + missingEnvDirsFromSysRc?: number; + missingEnvDirsFromUserRc?: number; + missingEnvDirsFromOtherRc?: number; + missingFromSysRcEnvDirs?: number; + missingFromUserRcEnvDirs?: number; + missingFromOtherRcEnvDirs?: number; + }; +}; + +export function sendNativeTelemetry(data: NativePythonTelemetry): void { + switch (data.event) { + case 'MissingCondaEnvironments': { + sendTelemetryEvent(EventName.NATIVE_FINDER_MISSING_CONDA_ENVS, undefined, data.data); + break; + } + default: { + traceError(`Unhandled Telemetry Event type ${data.event}`); + } + } +} diff --git a/src/client/telemetry/constants.ts b/src/client/telemetry/constants.ts index c24f179baed1..48ed3195d4e4 100644 --- a/src/client/telemetry/constants.ts +++ b/src/client/telemetry/constants.ts @@ -19,6 +19,7 @@ export enum EventName { ENVIRONMENT_WITHOUT_PYTHON_SELECTED = 'ENVIRONMENT_WITHOUT_PYTHON_SELECTED', PYTHON_ENVIRONMENTS_API = 'PYTHON_ENVIRONMENTS_API', PYTHON_INTERPRETER_DISCOVERY = 'PYTHON_INTERPRETER_DISCOVERY', + NATIVE_FINDER_MISSING_CONDA_ENVS = 'NATIVE_FINDER_MISSING_CONDA_ENVS', PYTHON_INTERPRETER_DISCOVERY_INVALID_NATIVE = 'PYTHON_INTERPRETER_DISCOVERY_INVALID_NATIVE', PYTHON_INTERPRETER_AUTO_SELECTION = 'PYTHON_INTERPRETER_AUTO_SELECTION', PYTHON_INTERPRETER_ACTIVATION_ENVIRONMENT_VARIABLES = 'PYTHON_INTERPRETER.ACTIVATION_ENVIRONMENT_VARIABLES', diff --git a/src/client/telemetry/index.ts b/src/client/telemetry/index.ts index 4dcffdfcc81e..e0a90e9192e6 100644 --- a/src/client/telemetry/index.ts +++ b/src/client/telemetry/index.ts @@ -1396,6 +1396,46 @@ export interface IEventNamePropertyMapping { */ missingNativeOtherGlobalEnvs?: number; }; + /** + * Telemetry event sent when Native finder fails to find some conda envs. + */ + /* __GDPR__ + "native_finder_missing_conda_envs" : { + "missing" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "userProvidedCondaExe" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "rootPrefixNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "condaPrefixNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "condaManagerNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "missingEnvDirsFromSysRc" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "missingEnvDirsFromUserRc" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "missingEnvDirsFromOtherRc" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "missingFromSysRcEnvDirs" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "missingFromUserRcEnvDirs" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "missingFromOtherRcEnvDirs" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + } + */ + [EventName.NATIVE_FINDER_MISSING_CONDA_ENVS]: { + /** + * Number of missing conda environments. + */ + missing: number; + /** + * Whether a conda exe was provided by the user. + */ + userProvidedCondaExe?: boolean; + rootPrefixNotFound?: boolean; + condaPrefixNotFound?: boolean; + condaManagerNotFound?: boolean; + sysRcNotFound?: boolean; + userRcNotFound?: boolean; + otherRcNotFound?: boolean; + missingEnvDirsFromSysRc?: number; + missingEnvDirsFromUserRc?: number; + missingEnvDirsFromOtherRc?: number; + missingFromSysRcEnvDirs?: number; + missingFromUserRcEnvDirs?: number; + missingFromOtherRcEnvDirs?: number; + }; /** * Telemetry event sent when discovery of all python environments using the native locator(virtualenv, conda, pipenv etc.) finishes. */ From d8ae5750d402cdffacbdfc07010eb98184ccd6b2 Mon Sep 17 00:00:00 2001 From: Don Jayamanne Date: Tue, 9 Jul 2024 08:32:05 +1000 Subject: [PATCH 3/7] Add more data for conda envs not found (#23770) --- .../locators/common/nativePythonTelemetry.ts | 37 +++++++++------- src/client/telemetry/index.ts | 43 +++++++++++++++++++ 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts index 3634ef5008f4..b693f81e7e38 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts @@ -10,27 +10,34 @@ export type NativePythonTelemetry = MissingCondaEnvironments; export type MissingCondaEnvironments = { event: 'MissingCondaEnvironments'; data: { - missing: number; - userProvidedCondaExe?: boolean; - rootPrefixNotFound?: boolean; - condaPrefixNotFound?: boolean; - condaManagerNotFound?: boolean; - sysRcNotFound?: boolean; - userRcNotFound?: boolean; - otherRcNotFound?: boolean; - missingEnvDirsFromSysRc?: number; - missingEnvDirsFromUserRc?: number; - missingEnvDirsFromOtherRc?: number; - missingFromSysRcEnvDirs?: number; - missingFromUserRcEnvDirs?: number; - missingFromOtherRcEnvDirs?: number; + missingCondaEnvironments: { + missing: number; + envDirsNotFound?: number; + userProvidedCondaExe?: boolean; + rootPrefixNotFound?: boolean; + condaPrefixNotFound?: boolean; + condaManagerNotFound?: boolean; + sysRcNotFound?: boolean; + userRcNotFound?: boolean; + otherRcNotFound?: boolean; + missingEnvDirsFromSysRc?: number; + missingEnvDirsFromUserRc?: number; + missingEnvDirsFromOtherRc?: number; + missingFromSysRcEnvDirs?: number; + missingFromUserRcEnvDirs?: number; + missingFromOtherRcEnvDirs?: number; + }; }; }; export function sendNativeTelemetry(data: NativePythonTelemetry): void { switch (data.event) { case 'MissingCondaEnvironments': { - sendTelemetryEvent(EventName.NATIVE_FINDER_MISSING_CONDA_ENVS, undefined, data.data); + sendTelemetryEvent( + EventName.NATIVE_FINDER_MISSING_CONDA_ENVS, + undefined, + data.data.missingCondaEnvironments, + ); break; } default: { diff --git a/src/client/telemetry/index.ts b/src/client/telemetry/index.ts index e0a90e9192e6..ade7ec8a8c15 100644 --- a/src/client/telemetry/index.ts +++ b/src/client/telemetry/index.ts @@ -1402,6 +1402,7 @@ export interface IEventNamePropertyMapping { /* __GDPR__ "native_finder_missing_conda_envs" : { "missing" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "envDirsNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, "userProvidedCondaExe" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, "rootPrefixNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, "condaPrefixNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, @@ -1419,21 +1420,63 @@ export interface IEventNamePropertyMapping { * Number of missing conda environments. */ missing: number; + /** + * Total number of env_dirs not found even after parsing the conda_rc files. + * This will tell us that we are either unable to parse some of the conda_rc files or there are other + * env_dirs that we are not able to find. + */ + envDirsNotFound?: number; /** * Whether a conda exe was provided by the user. */ userProvidedCondaExe?: boolean; + /** + * Whether the user provided a conda executable. + */ rootPrefixNotFound?: boolean; + /** + * Whether the conda prefix returned by conda was not found by us. + */ condaPrefixNotFound?: boolean; + /** + * Whether we found a conda manager or not. + */ condaManagerNotFound?: boolean; + /** + * Whether we failed to find the system rc path. + */ sysRcNotFound?: boolean; + /** + * Whether we failed to find the user rc path. + */ userRcNotFound?: boolean; + /** + * Number of config files (excluding sys and user rc) that were not found. + */ otherRcNotFound?: boolean; + /** + * Number of conda envs that were not found by us, and the envs belong to env_dirs in the sys config rc. + */ missingEnvDirsFromSysRc?: number; + /** + * Number of conda envs that were not found by us, and the envs belong to env_dirs in the user config rc. + */ missingEnvDirsFromUserRc?: number; + /** + * Number of conda envs that were not found by us, and the envs belong to env_dirs in the other config rc. + */ missingEnvDirsFromOtherRc?: number; + /** + * Number of conda envs that were not found by us, and the envs belong to env_dirs in the sys config rc. + */ missingFromSysRcEnvDirs?: number; + /** + * Number of conda envs that were not found by us, and the envs belong to env_dirs in the user config rc. + */ missingFromUserRcEnvDirs?: number; + /** + * Number of conda envs that were not found by us, and the envs belong to env_dirs in the other config rc. + */ missingFromOtherRcEnvDirs?: number; }; /** From a5c539d7074d9328ff847f7f29475c2cab35acb6 Mon Sep 17 00:00:00 2001 From: Don Jayamanne Date: Tue, 9 Jul 2024 12:36:35 +1000 Subject: [PATCH 4/7] Determine reasons for Poetry find failures (#23771) --- .../locators/common/nativePythonTelemetry.ts | 30 +++++++++- src/client/telemetry/constants.ts | 1 + src/client/telemetry/index.ts | 59 +++++++++++++++++++ 3 files changed, 88 insertions(+), 2 deletions(-) diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts index b693f81e7e38..1bedbaf23699 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonTelemetry.ts @@ -5,7 +5,7 @@ import { traceError } from '../../../../logging'; import { sendTelemetryEvent } from '../../../../telemetry'; import { EventName } from '../../../../telemetry/constants'; -export type NativePythonTelemetry = MissingCondaEnvironments; +export type NativePythonTelemetry = MissingCondaEnvironments | MissingPoetryEnvironments; export type MissingCondaEnvironments = { event: 'MissingCondaEnvironments'; @@ -30,6 +30,24 @@ export type MissingCondaEnvironments = { }; }; +export type MissingPoetryEnvironments = { + event: 'MissingPoetryEnvironments'; + data: { + missingPoetryEnvironments: { + missing: number; + missingInPath: number; + userProvidedPoetryExe?: boolean; + poetryExeNotFound?: boolean; + globalConfigNotFound?: boolean; + cacheDirNotFound?: boolean; + cacheDirIsDifferent?: boolean; + virtualenvsPathNotFound?: boolean; + virtualenvsPathIsDifferent?: boolean; + inProjectIsDifferent?: boolean; + }; + }; +}; + export function sendNativeTelemetry(data: NativePythonTelemetry): void { switch (data.event) { case 'MissingCondaEnvironments': { @@ -40,8 +58,16 @@ export function sendNativeTelemetry(data: NativePythonTelemetry): void { ); break; } + case 'MissingPoetryEnvironments': { + sendTelemetryEvent( + EventName.NATIVE_FINDER_MISSING_POETRY_ENVS, + undefined, + data.data.missingPoetryEnvironments, + ); + break; + } default: { - traceError(`Unhandled Telemetry Event type ${data.event}`); + traceError(`Unhandled Telemetry Event type ${JSON.stringify(data)}`); } } } diff --git a/src/client/telemetry/constants.ts b/src/client/telemetry/constants.ts index 48ed3195d4e4..69c3a58385d0 100644 --- a/src/client/telemetry/constants.ts +++ b/src/client/telemetry/constants.ts @@ -20,6 +20,7 @@ export enum EventName { PYTHON_ENVIRONMENTS_API = 'PYTHON_ENVIRONMENTS_API', PYTHON_INTERPRETER_DISCOVERY = 'PYTHON_INTERPRETER_DISCOVERY', NATIVE_FINDER_MISSING_CONDA_ENVS = 'NATIVE_FINDER_MISSING_CONDA_ENVS', + NATIVE_FINDER_MISSING_POETRY_ENVS = 'NATIVE_FINDER_MISSING_POETRY_ENVS', PYTHON_INTERPRETER_DISCOVERY_INVALID_NATIVE = 'PYTHON_INTERPRETER_DISCOVERY_INVALID_NATIVE', PYTHON_INTERPRETER_AUTO_SELECTION = 'PYTHON_INTERPRETER_AUTO_SELECTION', PYTHON_INTERPRETER_ACTIVATION_ENVIRONMENT_VARIABLES = 'PYTHON_INTERPRETER.ACTIVATION_ENVIRONMENT_VARIABLES', diff --git a/src/client/telemetry/index.ts b/src/client/telemetry/index.ts index ade7ec8a8c15..e8e26f884dfe 100644 --- a/src/client/telemetry/index.ts +++ b/src/client/telemetry/index.ts @@ -1479,6 +1479,65 @@ export interface IEventNamePropertyMapping { */ missingFromOtherRcEnvDirs?: number; }; + /** + * Telemetry event sent when Native finder fails to find some conda envs. + */ + /* __GDPR__ + "native_finder_missing_poetry_envs" : { + "missing" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "missingInPath" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true, "owner": "donjayamanne" }, + "userProvidedPoetryExe" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "poetryExeNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "globalConfigNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "cacheDirNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "cacheDirIsDifferent" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "virtualenvsPathNotFound" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "virtualenvsPathIsDifferent" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + "inProjectIsDifferent" : { "classification": "SystemMetaData", "purpose": "FeatureInsight", "owner": "donjayamanne" }, + } + */ + [EventName.NATIVE_FINDER_MISSING_POETRY_ENVS]: { + /** + * Number of missing poetry environments. + */ + missing: number; + /** + * Total number of missing envs, where the envs are created in the virtualenvs_path directory. + */ + missingInPath: number; + /** + * Whether a poetry exe was provided by the user. + */ + userProvidedPoetryExe?: boolean; + /** + * Whether poetry exe was not found. + */ + poetryExeNotFound?: boolean; + /** + * Whether poetry config was not found. + */ + globalConfigNotFound?: boolean; + /** + * Whether cache_dir was not found. + */ + cacheDirNotFound?: boolean; + /** + * Whether cache_dir found was different from that returned by poetry exe. + */ + cacheDirIsDifferent?: boolean; + /** + * Whether virtualenvs.path was not found. + */ + virtualenvsPathNotFound?: boolean; + /** + * Whether virtualenvs.path found was different from that returned by poetry exe. + */ + virtualenvsPathIsDifferent?: boolean; + /** + * Whether virtualenvs.in-project found was different from that returned by poetry exe. + */ + inProjectIsDifferent?: boolean; + }; /** * Telemetry event sent when discovery of all python environments using the native locator(virtualenv, conda, pipenv etc.) finishes. */ From 462b9bf2cbb98450f9a47e4627cd39d400968405 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Tue, 9 Jul 2024 13:01:15 -0700 Subject: [PATCH 5/7] Enable explicit Ruff check rules (#23741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: RafaƂ <23004737+rafrafek@users.noreply.github.com> --- build/ci/addEnvPath.py | 3 +- build/update_ext_version.py | 2 +- python_files/create_conda.py | 19 +- python_files/create_microvenv.py | 6 +- python_files/create_venv.py | 24 +- python_files/download_get_pip.py | 8 +- python_files/get_output_via_markers.py | 4 +- python_files/installed_check.py | 4 +- python_files/linter.py | 1 - python_files/normalizeSelection.py | 42 +-- python_files/printEnvVariables.py | 2 +- python_files/printEnvVariablesToFile.py | 2 +- python_files/pyproject.toml | 63 +++- python_files/python_server.py | 45 +-- python_files/pythonrc.py | 18 +- python_files/run-jedi-language-server.py | 7 +- python_files/shell_exec.py | 6 +- python_files/tensorboard_launcher.py | 18 +- .../testing_tools/adapter/__main__.py | 9 +- .../testing_tools/adapter/discovery.py | 6 +- python_files/testing_tools/adapter/errors.py | 8 +- python_files/testing_tools/adapter/info.py | 30 +- .../testing_tools/adapter/pytest/__init__.py | 1 - .../testing_tools/adapter/pytest/_cli.py | 1 - .../adapter/pytest/_discovery.py | 27 +- .../adapter/pytest/_pytest_item.py | 41 +-- python_files/testing_tools/adapter/report.py | 15 +- python_files/testing_tools/adapter/util.py | 18 +- .../testing_tools/process_json_util.py | 2 +- python_files/testing_tools/run_adapter.py | 12 +- python_files/testing_tools/socket_manager.py | 11 +- .../testing_tools/unittest_discovery.py | 15 +- python_files/testlauncher.py | 19 +- python_files/tests/__init__.py | 1 + python_files/tests/__main__.py | 5 +- .../expected_discovery_test_output.py | 13 +- python_files/tests/pytestadapter/helpers.py | 37 +- .../tests/pytestadapter/test_discovery.py | 66 ++-- .../tests/pytestadapter/test_execution.py | 36 +- .../tests/pytestadapter/test_utils.py | 6 +- python_files/tests/run_all.py | 5 +- python_files/tests/test_create_conda.py | 3 +- python_files/tests/test_create_venv.py | 4 +- python_files/tests/test_dynamic_cursor.py | 43 +-- python_files/tests/test_installed_check.py | 9 +- .../tests/test_normalize_selection.py | 20 +- python_files/tests/test_shell_integration.py | 13 +- python_files/tests/test_smart_selection.py | 66 +--- .../testing_tools/adapter/pytest/test_cli.py | 8 +- .../adapter/pytest/test_discovery.py | 346 ++++++++---------- .../testing_tools/adapter/test___main__.py | 5 +- .../testing_tools/adapter/test_discovery.py | 33 +- .../testing_tools/adapter/test_functional.py | 57 +-- .../testing_tools/adapter/test_report.py | 92 ++--- .../tests/testing_tools/adapter/test_util.py | 23 +- .../expected_discovery_test_output.py | 3 +- .../tests/unittestadapter/test_discovery.py | 23 +- .../tests/unittestadapter/test_execution.py | 57 ++- .../tests/unittestadapter/test_utils.py | 23 +- python_files/tests/util.py | 6 +- python_files/unittestadapter/discovery.py | 10 +- python_files/unittestadapter/execution.py | 89 +++-- python_files/unittestadapter/pvsc_utils.py | 29 +- python_files/visualstudio_py_testlauncher.py | 84 ++--- .../vscode_datascience_helpers/__init__.py | 0 .../tests/__init__.py | 0 .../tests/logParser.py | 79 ++-- python_files/vscode_pytest/__init__.py | 139 ++++--- .../vscode_pytest/run_pytest_script.py | 17 +- 69 files changed, 893 insertions(+), 1046 deletions(-) create mode 100644 python_files/vscode_datascience_helpers/__init__.py create mode 100644 python_files/vscode_datascience_helpers/tests/__init__.py diff --git a/build/ci/addEnvPath.py b/build/ci/addEnvPath.py index abad9ec3b5c9..66eff2a7b25d 100644 --- a/build/ci/addEnvPath.py +++ b/build/ci/addEnvPath.py @@ -3,7 +3,8 @@ #Adds the virtual environment's executable path to json file -import json,sys +import json +import sys import os.path jsonPath = sys.argv[1] key = sys.argv[2] diff --git a/build/update_ext_version.py b/build/update_ext_version.py index fe2b6ae0b81c..6d709ae05f7f 100644 --- a/build/update_ext_version.py +++ b/build/update_ext_version.py @@ -86,7 +86,7 @@ def main(package_json: pathlib.Path, argv: Sequence[str]) -> None: raise ValueError( f"Major version [{major}] must be the current year [{current_year}].", f"If changing major version after new year's, change to {current_year}.1.0", - f"Minor version must be updated based on release or pre-release channel.", + "Minor version must be updated based on release or pre-release channel.", ) if args.release and not is_even(minor): diff --git a/python_files/create_conda.py b/python_files/create_conda.py index 15320a8a1ce6..284f734081b2 100644 --- a/python_files/create_conda.py +++ b/python_files/create_conda.py @@ -48,19 +48,19 @@ def parse_args(argv: Sequence[str]) -> argparse.Namespace: def file_exists(path: Union[str, pathlib.PurePath]) -> bool: - return os.path.exists(path) + return os.path.exists(path) # noqa: PTH110 def conda_env_exists(name: Union[str, pathlib.PurePath]) -> bool: - return os.path.exists(CWD / name) + return os.path.exists(CWD / name) # noqa: PTH110 def run_process(args: Sequence[str], error_message: str) -> None: try: print("Running: " + " ".join(args)) - subprocess.run(args, cwd=os.getcwd(), check=True) - except subprocess.CalledProcessError: - raise VenvError(error_message) + subprocess.run(args, cwd=os.getcwd(), check=True) # noqa: PTH109 + except subprocess.CalledProcessError as exc: + raise VenvError(error_message) from exc def get_conda_env_path(name: str) -> str: @@ -89,11 +89,10 @@ def install_packages(env_path: str) -> None: def add_gitignore(name: str) -> None: - git_ignore = os.fspath(CWD / name / ".gitignore") - if not file_exists(git_ignore): - print(f"Creating: {git_ignore}") - with open(git_ignore, "w") as f: - f.write("*") + git_ignore = CWD / name / ".gitignore" + if not git_ignore.is_file(): + print(f"Creating: {os.fsdecode(git_ignore)}") + git_ignore.write_text("*") def main(argv: Optional[Sequence[str]] = None) -> None: diff --git a/python_files/create_microvenv.py b/python_files/create_microvenv.py index 10eae38ab977..2f2135444bc1 100644 --- a/python_files/create_microvenv.py +++ b/python_files/create_microvenv.py @@ -20,9 +20,9 @@ class MicroVenvError(Exception): def run_process(args: Sequence[str], error_message: str) -> None: try: print("Running: " + " ".join(args)) - subprocess.run(args, cwd=os.getcwd(), check=True) - except subprocess.CalledProcessError: - raise MicroVenvError(error_message) + subprocess.run(args, cwd=os.getcwd(), check=True) # noqa: PTH109 + except subprocess.CalledProcessError as exc: + raise MicroVenvError(error_message) from exc def parse_args(argv: Sequence[str]) -> argparse.Namespace: diff --git a/python_files/create_venv.py b/python_files/create_venv.py index 94724923cda5..020c119fc1d5 100644 --- a/python_files/create_venv.py +++ b/python_files/create_venv.py @@ -89,9 +89,9 @@ def venv_exists(name: str) -> bool: def run_process(args: Sequence[str], error_message: str) -> None: try: print("Running: " + " ".join(args)) - subprocess.run(args, cwd=os.getcwd(), check=True) - except subprocess.CalledProcessError: - raise VenvError(error_message) + subprocess.run(args, cwd=os.getcwd(), check=True) # noqa: PTH109 + except subprocess.CalledProcessError as exc: + raise VenvError(error_message) from exc def get_venv_path(name: str) -> str: @@ -136,10 +136,9 @@ def upgrade_pip(venv_path: str) -> None: def add_gitignore(name: str) -> None: git_ignore = CWD / name / ".gitignore" - if not file_exists(git_ignore): - print("Creating: " + os.fspath(git_ignore)) - with open(git_ignore, "w") as f: - f.write("*") + if git_ignore.is_file(): + print("Creating:", os.fspath(git_ignore)) + git_ignore.write_text("*") def download_pip_pyz(name: str): @@ -148,13 +147,10 @@ def download_pip_pyz(name: str): try: with url_lib.urlopen(url) as response: - pip_pyz_path = os.fspath(CWD / name / "pip.pyz") - with open(pip_pyz_path, "wb") as out_file: - data = response.read() - out_file.write(data) - out_file.flush() - except Exception: - raise VenvError("CREATE_VENV.DOWNLOAD_PIP_FAILED") + pip_pyz_path = CWD / name / "pip.pyz" + pip_pyz_path.write_bytes(data=response.read()) + except Exception as exc: + raise VenvError("CREATE_VENV.DOWNLOAD_PIP_FAILED") from exc def install_pip(name: str): diff --git a/python_files/download_get_pip.py b/python_files/download_get_pip.py index 0df610ef3547..91ab107760d8 100644 --- a/python_files/download_get_pip.py +++ b/python_files/download_get_pip.py @@ -2,9 +2,9 @@ # Licensed under the MIT License. import json -import os import pathlib import urllib.request as url_lib + from packaging.version import parse as version_parser EXTENSION_ROOT = pathlib.Path(__file__).parent.parent @@ -14,7 +14,7 @@ def _get_package_data(): - json_uri = "https://pypi.org/pypi/{0}/json".format(PIP_PACKAGE) + json_uri = f"https://pypi.org/pypi/{PIP_PACKAGE}/json" # Response format: https://warehouse.readthedocs.io/api-reference/json/#project # Release metadata format: https://github.com/pypa/interoperability-peps/blob/master/pep-0426-core-metadata.rst with url_lib.urlopen(json_uri) as response: @@ -22,12 +22,12 @@ def _get_package_data(): def _download_and_save(root, version): - root = os.getcwd() if root is None or root == "." else root + root = pathlib.Path.cwd() if root is None or root == "." else pathlib.Path(root) url = f"https://raw.githubusercontent.com/pypa/get-pip/{version}/public/get-pip.py" print(url) with url_lib.urlopen(url) as response: data = response.read() - get_pip_file = pathlib.Path(root) / "get-pip.py" + get_pip_file = root / "get-pip.py" get_pip_file.write_bytes(data) diff --git a/python_files/get_output_via_markers.py b/python_files/get_output_via_markers.py index 00dd57065b3c..e37f7f8c5df0 100644 --- a/python_files/get_output_via_markers.py +++ b/python_files/get_output_via_markers.py @@ -18,9 +18,9 @@ del sys.argv[0] exec(code, ns, ns) elif module.startswith("-m"): - moduleName = sys.argv[2] + module_name = sys.argv[2] sys.argv = sys.argv[2:] # It should begin with the module name. - runpy.run_module(moduleName, run_name="__main__", alter_sys=True) + runpy.run_module(module_name, run_name="__main__", alter_sys=True) elif module.endswith(".py"): sys.argv = sys.argv[1:] runpy.run_path(module, run_name="__main__") diff --git a/python_files/installed_check.py b/python_files/installed_check.py index 6dafe23b5121..4fa3cdbb2385 100644 --- a/python_files/installed_check.py +++ b/python_files/installed_check.py @@ -36,9 +36,7 @@ def parse_args(argv: Optional[Sequence[str]] = None): def parse_requirements(line: str) -> Optional[Requirement]: try: req = Requirement(line.strip("\\")) - if req.marker is None: - return req - elif req.marker.evaluate(): + if req.marker is None or req.marker.evaluate(): return req except Exception: pass diff --git a/python_files/linter.py b/python_files/linter.py index af9634f83f4b..edbbe9dfafe5 100644 --- a/python_files/linter.py +++ b/python_files/linter.py @@ -1,7 +1,6 @@ import subprocess import sys - linter_settings = { "pylint": { "args": ["--reports=n", "--output-format=json"], diff --git a/python_files/normalizeSelection.py b/python_files/normalizeSelection.py index 71d28bb9c35c..981251289e57 100644 --- a/python_files/normalizeSelection.py +++ b/python_files/normalizeSelection.py @@ -21,12 +21,11 @@ def split_lines(source): def _get_statements(selection): - """ - Process a multiline selection into a list of its top-level statements. + """Process a multiline selection into a list of its top-level statements. + This will remove empty newlines around and within the selection, dedent it, and split it using the result of `ast.parse()`. """ - # Remove blank lines within the selection to prevent the REPL from thinking the block is finished. lines = (line for line in split_lines(selection) if line.strip() != "") @@ -57,7 +56,7 @@ def _get_statements(selection): # Also, not all AST objects can have decorators. if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. - line_end -= len(getattr(node, "decorator_list")) + line_end -= len(getattr(node, "decorator_list")) # noqa: B009 ends.append(line_end) ends.append(len(lines)) @@ -74,7 +73,7 @@ def _get_statements(selection): # Special handling of decorators similar to what's above. if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. - start -= len(getattr(node, "decorator_list")) + start -= len(getattr(node, "decorator_list")) # noqa: B009 block = "\n".join(lines[start:end]) # If the block is multiline, add an extra newline character at its end. @@ -134,18 +133,16 @@ def normalize_lines(selection): def check_exact_exist(top_level_nodes, start_line, end_line): - exact_nodes = [] - for node in top_level_nodes: - if node.lineno == start_line and node.end_lineno == end_line: - exact_nodes.append(node) + return [ + node + for node in top_level_nodes + if node.lineno == start_line and node.end_lineno == end_line + ] - return exact_nodes +def traverse_file(whole_file_content, start_line, end_line, was_highlighted): # noqa: ARG001 + """Intended to traverse through a user's given file content and find, collect all appropriate lines that should be sent to the REPL in case of smart selection. -def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): - """ - Intended to traverse through a user's given file content and find, collect all appropriate lines - that should be sent to the REPL in case of smart selection. This could be exact statement such as just a single line print statement, or a multiline dictionary, or differently styled multi-line list comprehension, etc. Then call the normalize_lines function to normalize our smartly selected code block. @@ -153,7 +150,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): parsed_file_content = None try: - parsed_file_content = ast.parse(wholeFileContent) + parsed_file_content = ast.parse(whole_file_content) except Exception: # Handle case where user is attempting to run code where file contains deprecated Python code. # Let typescript side know and show warning message. @@ -192,8 +189,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): ast.ExceptHandler, ) if isinstance(node, ast_types_with_nodebody) and isinstance(node.body, Iterable): - for child_nodes in node.body: - top_level_nodes.append(child_nodes) + top_level_nodes.extend(node.body) exact_nodes = check_exact_exist(top_level_nodes, start_line, end_line) @@ -202,7 +198,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): which_line_next = 0 for same_line_node in exact_nodes: should_run_top_blocks.append(same_line_node) - smart_code += f"{ast.get_source_segment(wholeFileContent, same_line_node)}\n" + smart_code += f"{ast.get_source_segment(whole_file_content, same_line_node)}\n" which_line_next = get_next_block_lineno(should_run_top_blocks) return { "normalized_smart_result": smart_code, @@ -216,7 +212,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): if start_line == top_node.lineno and end_line == top_node.end_lineno: should_run_top_blocks.append(top_node) - smart_code += f"{ast.get_source_segment(wholeFileContent, top_node)}\n" + smart_code += f"{ast.get_source_segment(whole_file_content, top_node)}\n" break # If we found exact match, don't waste computation in parsing extra nodes. elif start_line >= top_node.lineno and end_line <= top_node.end_lineno: # Case to apply smart selection for multiple line. @@ -231,7 +227,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): should_run_top_blocks.append(top_node) - smart_code += str(ast.get_source_segment(wholeFileContent, top_node)) + smart_code += str(ast.get_source_segment(whole_file_content, top_node)) smart_code += "\n" normalized_smart_result = normalize_lines(smart_code) @@ -262,7 +258,7 @@ def get_next_block_lineno(which_line_next): raw = stdin.read() contents = json.loads(raw.decode("utf-8")) # Empty highlight means user has not explicitly selected specific text. - empty_Highlight = contents.get("emptyHighlight", False) + empty_highlight = contents.get("emptyHighlight", False) # We also get the activeEditor selection start line and end line from the typescript VS Code side. # Remember to add 1 to each of the received since vscode starts line counting from 0 . @@ -273,12 +269,12 @@ def get_next_block_lineno(which_line_next): data = None which_line_next = 0 - if empty_Highlight and contents.get("smartSendSettingsEnabled"): + if empty_highlight and contents.get("smartSendSettingsEnabled"): result = traverse_file( contents["wholeFileContent"], vscode_start_line, vscode_end_line, - not empty_Highlight, + not empty_highlight, ) normalized = result["normalized_smart_result"] which_line_next = result["which_line_next"] diff --git a/python_files/printEnvVariables.py b/python_files/printEnvVariables.py index 353149f237de..bf2cfd80e666 100644 --- a/python_files/printEnvVariables.py +++ b/python_files/printEnvVariables.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -import os import json +import os print(json.dumps(dict(os.environ))) diff --git a/python_files/printEnvVariablesToFile.py b/python_files/printEnvVariablesToFile.py index a4e0d24abbe0..c7ec70dd9684 100644 --- a/python_files/printEnvVariablesToFile.py +++ b/python_files/printEnvVariablesToFile.py @@ -7,6 +7,6 @@ # Last argument is the target file into which we'll write the env variables line by line. output_file = sys.argv[-1] -with open(output_file, "w") as outfile: +with open(output_file, "w") as outfile: # noqa: PTH123 for key, val in os.environ.items(): outfile.write(f"{key}={val}\n") diff --git a/python_files/pyproject.toml b/python_files/pyproject.toml index 0f1b0f466940..afb9d372285c 100644 --- a/python_files/pyproject.toml +++ b/python_files/pyproject.toml @@ -1,15 +1,3 @@ -[tool.black] -exclude = ''' - -( - /( - .data - | .vscode - | lib - )/ -) -''' - [tool.pyright] exclude = ['lib'] extraPaths = ['lib/python', 'lib/jedilsp'] @@ -36,12 +24,59 @@ ignore = [ [tool.ruff] line-length = 100 exclude = [ - "tests/testing_tools/adapter/.data", - "tests/unittestadapter/.data" + "**/.data", + "lib", ] [tool.ruff.format] docstring-code-format = true +[tool.ruff.lint] +# Ruff's defaults are F and a subset of E. +# https://docs.astral.sh/ruff/rules/#rules +# Compatible w/ ruff formatter. https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules +# Up-to-date as of Ruff 0.5.0. +select = [ + "A", # flake8-builtins + "ARG", # flake8-unused-argument + "ASYNC", # flake8-async + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "D2", "D400", "D403", "D419", # pydocstyle + "DJ", # flake8-django + "DTZ", # flake8-dasetimez + "E4", "E7", "E9", # pycodestyle (errors) + "EXE", # flake8-executable + "F", # Pyflakes + "FBT", # flake8-boolean-trap + "FLY", # flynt + "FURB", # refurb + "I", # isort + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "LOG", # flake8-logging + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PD", # pandas-vet + "PERF", # Perflint + "PIE", # flake8-pie + "PTH", # flake8-pathlib + # flake8-pytest-style + "PT006", "PT007", "PT009", "PT012", "PT014", "PT015", "PT016", "PT017", "PT018", "PT019", + "PT020", "PT021", "PT022", "PT024", "PT025", "PT026", "PT027", + "PYI", # flake8-pyi + "Q", # flake8-quotes + "RET502", "RET503", "RET504", # flake8-return + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "SIM", # flake8-simplify + "SLF", # flake8-self + "SLOT", # flake8-slots + "TCH", # flake8-type-checking + "UP", # pyupgrade + "W", # pycodestyle (warnings) + "YTT", # flake8-2020 +] + [tool.ruff.lint.pydocstyle] convention = "pep257" diff --git a/python_files/python_server.py b/python_files/python_server.py index 30be834631c6..a4b15f2cbaae 100644 --- a/python_files/python_server.py +++ b/python_files/python_server.py @@ -1,11 +1,11 @@ -from typing import Dict, List, Optional, Union -import sys -import json +import ast import contextlib import io +import json +import sys import traceback import uuid -import ast +from typing import Dict, List, Optional, Union STDIN = sys.stdin STDOUT = sys.stdout @@ -15,7 +15,7 @@ def send_message(msg: str): length_msg = len(msg) - STDOUT.buffer.write(f"Content-Length: {length_msg}\r\n\r\n{msg}".encode(encoding="utf-8")) + STDOUT.buffer.write(f"Content-Length: {length_msg}\r\n\r\n{msg}".encode()) STDOUT.buffer.flush() @@ -50,15 +50,14 @@ def custom_input(prompt=""): if content_length: message_text = STDIN.read(content_length) message_json = json.loads(message_text) - our_user_input = message_json["result"]["userInput"] - return our_user_input + return message_json["result"]["userInput"] except Exception: print_log(traceback.format_exc()) # Set input to our custom input USER_GLOBALS["input"] = custom_input -input = custom_input +input = custom_input # noqa: A001 def handle_response(request_id): @@ -76,7 +75,7 @@ def handle_response(request_id): elif message_json["method"] == "exit": sys.exit(0) - except Exception: + except Exception: # noqa: PERF203 print_log(traceback.format_exc()) @@ -100,12 +99,15 @@ def check_valid_command(request): def execute(request, user_globals): str_output = CustomIO("", encoding="utf-8") str_error = CustomIO("", encoding="utf-8") + str_input = CustomIO("", encoding="utf-8", newline="\n") - with redirect_io("stdout", str_output): - with redirect_io("stderr", str_error): - str_input = CustomIO("", encoding="utf-8", newline="\n") - with redirect_io("stdin", str_input): - exec_user_input(request["params"], user_globals) + with contextlib.redirect_stdout(str_output), contextlib.redirect_stderr(str_error): + original_stdin = sys.stdin + try: + sys.stdin = str_input + exec_user_input(request["params"], user_globals) + finally: + sys.stdin = original_stdin send_response(str_output.get_value(), request["id"]) @@ -113,8 +115,8 @@ def exec_user_input(user_input, user_globals): user_input = user_input[0] if isinstance(user_input, list) else user_input try: - callable = exec_function(user_input) - retval = callable(user_input, user_globals) + callable_ = exec_function(user_input) + retval = callable_(user_input, user_globals) if retval is not None: print(retval) except KeyboardInterrupt: @@ -141,15 +143,6 @@ def get_value(self) -> str: return self.read() -@contextlib.contextmanager -def redirect_io(stream: str, new_stream): - """Redirect stdio streams to a custom stream.""" - old_stream = getattr(sys, stream) - setattr(sys, stream, new_stream) - yield - setattr(sys, stream, old_stream) - - def get_headers(): headers = {} while line := STDIN.readline().strip(): @@ -174,5 +167,5 @@ def get_headers(): elif request_json["method"] == "exit": sys.exit(0) - except Exception: + except Exception: # noqa: PERF203 print_log(traceback.format_exc()) diff --git a/python_files/pythonrc.py b/python_files/pythonrc.py index 2edd88874674..2595143feade 100644 --- a/python_files/pythonrc.py +++ b/python_files/pythonrc.py @@ -6,7 +6,7 @@ original_ps1 = ">>> " -class repl_hooks: +class REPLHooks: def __init__(self): self.global_exit = None self.failure_flag = False @@ -21,11 +21,11 @@ def my_displayhook(self, value): self.original_displayhook(value) - def my_excepthook(self, type, value, traceback): + def my_excepthook(self, type_, value, traceback): self.global_exit = value self.failure_flag = True - self.original_excepthook(type, value, traceback) + self.original_excepthook(type_, value, traceback) def get_last_command(): @@ -37,18 +37,14 @@ def get_last_command(): return last_command -class ps1: - hooks = repl_hooks() +class PS1: + hooks = REPLHooks() sys.excepthook = hooks.my_excepthook sys.displayhook = hooks.my_displayhook # str will get called for every prompt with exit code to show success/failure def __str__(self): - exit_code = 0 - if self.hooks.failure_flag: - exit_code = 1 - else: - exit_code = 0 + exit_code = int(bool(self.hooks.failure_flag)) self.hooks.failure_flag = False # Guide following official VS Code doc for shell integration sequence: result = "" @@ -77,4 +73,4 @@ def __str__(self): if sys.platform != "win32": - sys.ps1 = ps1() + sys.ps1 = PS1() diff --git a/python_files/run-jedi-language-server.py b/python_files/run-jedi-language-server.py index 5a972799bc33..47bf503d596c 100644 --- a/python_files/run-jedi-language-server.py +++ b/python_files/run-jedi-language-server.py @@ -1,9 +1,12 @@ import os +import pathlib import sys # Add the lib path to our sys path so jedi_language_server can find its references -EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, os.path.join(EXTENSION_ROOT, "python_files", "lib", "jedilsp")) +extension_dir = pathlib.Path(__file__).parent.parent +EXTENSION_ROOT = os.fsdecode(extension_dir) +sys.path.insert(0, os.fsdecode(extension_dir / "python_files" / "lib" / "jedilsp")) +del extension_dir from jedi_language_server.cli import cli # noqa: E402 diff --git a/python_files/shell_exec.py b/python_files/shell_exec.py index 4987399a53ea..62b6b28af6cd 100644 --- a/python_files/shell_exec.py +++ b/python_files/shell_exec.py @@ -16,7 +16,7 @@ print("Executing command in shell >> " + " ".join(shell_args)) -with open(lock_file, "w") as fp: +with open(lock_file, "w") as fp: # noqa: PTH123 try: # Signal start of execution. fp.write("START\n") @@ -36,7 +36,7 @@ fp.flush() try: # ALso log the error for use from the other side. - with open(lock_file + ".error", "w") as fpError: - fpError.write(traceback.format_exc()) + with open(lock_file + ".error", "w") as fp_error: # noqa: PTH123 + fp_error.write(traceback.format_exc()) except Exception: pass diff --git a/python_files/tensorboard_launcher.py b/python_files/tensorboard_launcher.py index bad1ef09fc6e..a04d51e7eb74 100644 --- a/python_files/tensorboard_launcher.py +++ b/python_files/tensorboard_launcher.py @@ -1,7 +1,9 @@ -import time -import sys -import os +import contextlib import mimetypes +import os +import sys +import time + from tensorboard import program @@ -17,14 +19,12 @@ def main(logdir): tb = program.TensorBoard() tb.configure(bind_all=False, logdir=logdir) url = tb.launch() - sys.stdout.write("TensorBoard started at %s\n" % (url)) + sys.stdout.write(f"TensorBoard started at {url}\n") sys.stdout.flush() - while True: - try: + with contextlib.suppress(KeyboardInterrupt): + while True: time.sleep(60) - except KeyboardInterrupt: - break sys.stdout.write("TensorBoard is shutting down") sys.stdout.flush() @@ -32,5 +32,5 @@ def main(logdir): if __name__ == "__main__": if len(sys.argv) == 2: logdir = str(sys.argv[1]) - sys.stdout.write("Starting TensorBoard with logdir %s" % (logdir)) + sys.stdout.write(f"Starting TensorBoard with logdir {logdir}") main(logdir) diff --git a/python_files/testing_tools/adapter/__main__.py b/python_files/testing_tools/adapter/__main__.py index cc7084eb9439..c4d5c10c95ab 100644 --- a/python_files/testing_tools/adapter/__main__.py +++ b/python_files/testing_tools/adapter/__main__.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from __future__ import absolute_import import argparse import sys @@ -85,14 +84,14 @@ def main( ): try: tool = _tools[toolname] - except KeyError: - raise UnsupportedToolError(toolname) + except KeyError as exc: + raise UnsupportedToolError(toolname) from exc try: run = tool[cmdname] report_result = _reporters[cmdname] - except KeyError: - raise UnsupportedCommandError(cmdname) + except KeyError as exc: + raise UnsupportedCommandError(cmdname) from exc parents, result = run(toolargs, **subargs) report_result(result, parents, **subargs) diff --git a/python_files/testing_tools/adapter/discovery.py b/python_files/testing_tools/adapter/discovery.py index 798aea1e93f1..a5fa2e0d6888 100644 --- a/python_files/testing_tools/adapter/discovery.py +++ b/python_files/testing_tools/adapter/discovery.py @@ -1,13 +1,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from __future__ import absolute_import, print_function import re -from .util import fix_fileid, DIRNAME, NORMCASE from .info import ParentInfo - +from .util import DIRNAME, NORMCASE, fix_fileid FILE_ID_RE = re.compile( r""" @@ -47,7 +45,7 @@ def fix_nodeid( return fileid + (remainder or "") -class DiscoveredTests(object): +class DiscoveredTests: """A container for the discovered tests and their parents.""" def __init__(self): diff --git a/python_files/testing_tools/adapter/errors.py b/python_files/testing_tools/adapter/errors.py index 3e6ae5189cb8..aa6febe315fc 100644 --- a/python_files/testing_tools/adapter/errors.py +++ b/python_files/testing_tools/adapter/errors.py @@ -4,13 +4,13 @@ class UnsupportedToolError(ValueError): def __init__(self, tool): - msg = "unsupported tool {!r}".format(tool) - super(UnsupportedToolError, self).__init__(msg) + msg = f"unsupported tool {tool!r}" + super().__init__(msg) self.tool = tool class UnsupportedCommandError(ValueError): def __init__(self, cmd): - msg = "unsupported cmd {!r}".format(cmd) - super(UnsupportedCommandError, self).__init__(msg) + msg = f"unsupported cmd {cmd!r}" + super().__init__(msg) self.cmd = cmd diff --git a/python_files/testing_tools/adapter/info.py b/python_files/testing_tools/adapter/info.py index 8e5d0442ce15..1e84ee7961f5 100644 --- a/python_files/testing_tools/adapter/info.py +++ b/python_files/testing_tools/adapter/info.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# ruff:noqa: PYI024, SLOT002 from collections import namedtuple @@ -8,16 +9,15 @@ class SingleTestPath(namedtuple("TestPath", "root relfile func sub")): """Where to find a single test.""" def __new__(cls, root, relfile, func, sub=None): - self = super(SingleTestPath, cls).__new__( + return super().__new__( cls, str(root) if root else None, str(relfile) if relfile else None, str(func) if func else None, [str(s) for s in sub] if sub else None, ) - return self - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs): # noqa: ARG002 if self.root is None: raise TypeError("missing id") if self.relfile is None: @@ -29,8 +29,8 @@ def __init__(self, *args, **kwargs): class ParentInfo(namedtuple("ParentInfo", "id kind name root relpath parentid")): KINDS = ("folder", "file", "suite", "function", "subtest") - def __new__(cls, id, kind, name, root=None, relpath=None, parentid=None): - self = super(ParentInfo, cls).__new__( + def __new__(cls, id, kind, name, root=None, relpath=None, parentid=None): # noqa: A002 + return super().__new__( cls, id=str(id) if id else None, kind=str(kind) if kind else None, @@ -39,22 +39,21 @@ def __new__(cls, id, kind, name, root=None, relpath=None, parentid=None): relpath=str(relpath) if relpath else None, parentid=str(parentid) if parentid else None, ) - return self - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs): # noqa: ARG002 if self.id is None: raise TypeError("missing id") if self.kind is None: raise TypeError("missing kind") if self.kind not in self.KINDS: - raise ValueError("unsupported kind {!r}".format(self.kind)) + raise ValueError(f"unsupported kind {self.kind!r}") if self.name is None: raise TypeError("missing name") if self.root is None: if self.parentid is not None or self.kind != "folder": raise TypeError("missing root") if self.relpath is not None: - raise TypeError("unexpected relpath {}".format(self.relpath)) + raise TypeError(f"unexpected relpath {self.relpath}") elif self.parentid is None: raise TypeError("missing parentid") elif self.relpath is None and self.kind in ("folder", "file"): @@ -67,8 +66,8 @@ class SingleTestInfo(namedtuple("TestInfo", "id name path source markers parenti MARKERS = ("skip", "skip-if", "expected-failure") KINDS = ("function", "doctest") - def __new__(cls, id, name, path, source, markers, parentid, kind="function"): - self = super(SingleTestInfo, cls).__new__( + def __new__(cls, id, name, path, source, markers, parentid, kind="function"): # noqa: A002 + return super().__new__( cls, str(id) if id else None, str(name) if name else None, @@ -78,9 +77,8 @@ def __new__(cls, id, name, path, source, markers, parentid, kind="function"): str(parentid) if parentid else None, str(kind) if kind else None, ) - return self - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs): # noqa: ARG002 if self.id is None: raise TypeError("missing id") if self.name is None: @@ -92,17 +90,17 @@ def __init__(self, *args, **kwargs): else: srcfile, _, lineno = self.source.rpartition(":") if not srcfile or not lineno or int(lineno) < 0: - raise ValueError("bad source {!r}".format(self.source)) + raise ValueError(f"bad source {self.source!r}") if self.markers: badmarkers = [m for m in self.markers if m not in self.MARKERS] if badmarkers: - raise ValueError("unsupported markers {!r}".format(badmarkers)) + raise ValueError(f"unsupported markers {badmarkers!r}") if self.parentid is None: raise TypeError("missing parentid") if self.kind is None: raise TypeError("missing kind") elif self.kind not in self.KINDS: - raise ValueError("unsupported kind {!r}".format(self.kind)) + raise ValueError(f"unsupported kind {self.kind!r}") @property def root(self): diff --git a/python_files/testing_tools/adapter/pytest/__init__.py b/python_files/testing_tools/adapter/pytest/__init__.py index 89b7c066a459..ce1a1c4d694a 100644 --- a/python_files/testing_tools/adapter/pytest/__init__.py +++ b/python_files/testing_tools/adapter/pytest/__init__.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from __future__ import absolute_import from ._cli import add_subparser as add_cli_subparser # noqa: F401 from ._discovery import discover # noqa: F401 diff --git a/python_files/testing_tools/adapter/pytest/_cli.py b/python_files/testing_tools/adapter/pytest/_cli.py index 3d3eec09a199..1556b9ac754c 100644 --- a/python_files/testing_tools/adapter/pytest/_cli.py +++ b/python_files/testing_tools/adapter/pytest/_cli.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from __future__ import absolute_import from ..errors import UnsupportedCommandError diff --git a/python_files/testing_tools/adapter/pytest/_discovery.py b/python_files/testing_tools/adapter/pytest/_discovery.py index bbe5ae9856c8..c1cfc9e7cbbd 100644 --- a/python_files/testing_tools/adapter/pytest/_discovery.py +++ b/python_files/testing_tools/adapter/pytest/_discovery.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from __future__ import absolute_import, print_function import sys @@ -13,7 +12,7 @@ def discover( pytestargs=None, - hidestdio=False, + hidestdio=False, # noqa: FBT002 # *, _pytest_main=pytest.main, _plugin=None, @@ -36,28 +35,20 @@ def discover( # Some tests where collected but with errors. pass elif ec != 0: - print( - "equivalent command: {} -m pytest {}".format( - sys.executable, util.shlex_unsplit(pytestargs) - ) - ) + print(f"equivalent command: {sys.executable} -m pytest {util.shlex_unsplit(pytestargs)}") if hidestdio: print(stdio.getvalue(), file=sys.stderr) sys.stdout.flush() - raise Exception("pytest discovery failed (exit code {})".format(ec)) - if not _plugin._started: - print( - "equivalent command: {} -m pytest {}".format( - sys.executable, util.shlex_unsplit(pytestargs) - ) - ) + raise Exception(f"pytest discovery failed (exit code {ec})") + if not _plugin._started: # noqa: SLF001 + print(f"equivalent command: {sys.executable} -m pytest {util.shlex_unsplit(pytestargs)}") if hidestdio: print(stdio.getvalue(), file=sys.stderr) sys.stdout.flush() raise Exception("pytest discovery did not start") return ( - _plugin._tests.parents, - list(_plugin._tests), + _plugin._tests.parents, # noqa: SLF001 + list(_plugin._tests), # noqa: SLF001 ) @@ -72,7 +63,7 @@ def _adjust_pytest_args(pytestargs): return pytestargs -class TestCollector(object): +class TestCollector: """This is a pytest plugin that collects the discovered tests.""" @classmethod @@ -88,7 +79,7 @@ def __init__(self, tests=None): # Relevant plugin hooks: # https://docs.pytest.org/en/latest/reference.html#collection-hooks - def pytest_collection_modifyitems(self, session, config, items): + def pytest_collection_modifyitems(self, session, config, items): # noqa: ARG002 self._started = True self._tests.reset() for item in items: diff --git a/python_files/testing_tools/adapter/pytest/_pytest_item.py b/python_files/testing_tools/adapter/pytest/_pytest_item.py index 724b71a1ac44..c7cbbe5684a6 100644 --- a/python_files/testing_tools/adapter/pytest/_pytest_item.py +++ b/python_files/testing_tools/adapter/pytest/_pytest_item.py @@ -89,9 +89,7 @@ + __code__ + __closure__ * own_markers -""" - -from __future__ import absolute_import, print_function +""" # noqa: D205 import sys @@ -112,7 +110,7 @@ def should_never_reach_here(item, **extra): print("and paste the following output there.") print() for field, info in _summarize_item(item): - print("{}: {}".format(field, info)) + print(f"{field}: {info}") if extra: print() print("extra info:") @@ -166,8 +164,8 @@ def parse_item( (parentid, parents, fileid, testfunc, _) = _parse_node_id( item.nodeid[: -len(parameterized)], kind ) - nodeid = "{}{}".format(parentid, parameterized) - parents = [(parentid, item.originalname, kind)] + parents + nodeid = f"{parentid}{parameterized}" + parents = [(parentid, item.originalname, kind), *parents] name = parameterized[1:-1] or "" else: (nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id(item.nodeid, kind) @@ -311,7 +309,7 @@ def _get_location( lineno = -1 # i.e. "unknown" # from pytest, line numbers are 0-based - location = "{}:{}".format(srcfile, int(lineno) + 1) + location = f"{srcfile}:{int(lineno) + 1}" return location, fullname @@ -327,14 +325,11 @@ def _matches_relfile( testroot = _normcase(testroot) srcfile = _normcase(srcfile) relfile = _normcase(relfile) - if srcfile == relfile: - return True - elif srcfile == relfile[len(_pathsep) + 1 :]: - return True - elif srcfile == testroot + relfile[1:]: - return True - else: - return False + return bool( + srcfile == relfile + or srcfile == relfile[len(_pathsep) + 1 :] + or srcfile == testroot + relfile[1:] + ) def _is_legacy_wrapper( @@ -350,9 +345,7 @@ def _is_legacy_wrapper( """ if _pyversion > (3,): return False - if (_pathsep + "unittest" + _pathsep + "case.py") not in srcfile: - return False - return True + return not _pathsep + "unittest" + _pathsep + "case.py" not in srcfile def _unwrap_decorator(func): @@ -579,16 +572,16 @@ def _summarize_item(item): yield field, dir(item) else: yield field, getattr(item, field, "") - except Exception as exc: - yield field, "".format(exc) + except Exception as exc: # noqa: PERF203 + yield field, f"" -def _debug_item(item, showsummary=False): - item._debugging = True +def _debug_item(item, showsummary=False): # noqa: FBT002 + item._debugging = True # noqa: SLF001 try: summary = dict(_summarize_item(item)) finally: - item._debugging = False + item._debugging = False # noqa: SLF001 if showsummary: print(item.nodeid) @@ -602,7 +595,7 @@ def _debug_item(item, showsummary=False): "markers", "props", ): - print(" {:12} {}".format(key, summary[key])) + print(f" {key:12} {summary[key]}") print() return summary diff --git a/python_files/testing_tools/adapter/report.py b/python_files/testing_tools/adapter/report.py index 1ad02fe7bcd4..3fe2fe48c26c 100644 --- a/python_files/testing_tools/adapter/report.py +++ b/python_files/testing_tools/adapter/report.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from __future__ import print_function import json @@ -10,8 +9,8 @@ def report_discovered( tests, parents, # *, - pretty=False, - simple=False, + pretty=False, # noqa: FBT002 + simple=False, # noqa: FBT002 _send=print, **_ignored, ): @@ -83,12 +82,12 @@ def report_discovered( kwargs = {} if pretty: # human-formatted - kwargs = dict( - sort_keys=True, - indent=4, - separators=(",", ": "), + kwargs = { + "sort_keys": True, + "indent": 4, + "separators": (",", ": "), # ... - ) + } serialized = json.dumps(data, **kwargs) _send(serialized) diff --git a/python_files/testing_tools/adapter/util.py b/python_files/testing_tools/adapter/util.py index 9f3089fb29d0..52c0fac757f8 100644 --- a/python_files/testing_tools/adapter/util.py +++ b/python_files/testing_tools/adapter/util.py @@ -83,9 +83,8 @@ def fix_relpath( path = _fix_path(path) if path in (".", ".."): return path - if not _path_isabs(path): - if not path.startswith("." + _pathsep): - path = "." + _pathsep + path + if not _path_isabs(path) and not path.startswith("." + _pathsep): + path = "." + _pathsep + path return path @@ -125,7 +124,7 @@ def fix_fileid( fileid, rootdir=None, # *, - normalize=False, + normalize=False, # noqa: FBT002 strictpathsep=None, _pathsep=PATH_SEP, **kwargs, @@ -171,10 +170,7 @@ def fix_fileid( @contextlib.contextmanager def _replace_fd(file, target): - """ - Temporarily replace the file descriptor for `file`, - for which sys.stdout or sys.stderr is passed. - """ + """Temporarily replace the file descriptor for `file`, for which sys.stdout or sys.stderr is passed.""" try: fd = file.fileno() except (AttributeError, io.UnsupportedOperation): @@ -233,7 +229,7 @@ def _temp_io(): @contextlib.contextmanager def hide_stdio(): """Swallow stdout and stderr.""" - with _temp_io() as (sio, fileobj): + with _temp_io() as (sio, fileobj): # noqa: SIM117 with _replace_fd(sys.stdout, fileobj): with _replace_stdout(fileobj): with _replace_fd(sys.stderr, fileobj): @@ -261,9 +257,7 @@ def shlex_unsplit(argv): def _quote_arg(arg): parts = None for i, c in enumerate(arg): - if c.isspace(): - pass - elif c == '"': + if c.isspace() or c == '"': pass elif c == "'": c = "'\"'\"'" diff --git a/python_files/testing_tools/process_json_util.py b/python_files/testing_tools/process_json_util.py index 36067521ea27..8ca9f7261d9e 100644 --- a/python_files/testing_tools/process_json_util.py +++ b/python_files/testing_tools/process_json_util.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. import io import json -from typing import List, Dict +from typing import Dict, List CONTENT_LENGTH: str = "Content-Length:" diff --git a/python_files/testing_tools/run_adapter.py b/python_files/testing_tools/run_adapter.py index 8af4e49dd31c..af3c8ce87479 100644 --- a/python_files/testing_tools/run_adapter.py +++ b/python_files/testing_tools/run_adapter.py @@ -2,20 +2,16 @@ # Licensed under the MIT License. # Replace the "." entry. -import os.path +import os +import pathlib import sys sys.path.insert( 1, - os.path.dirname( # python_files - os.path.dirname( # python_files/testing_tools - os.path.abspath(__file__) # this file - ) - ), + os.fsdecode(pathlib.Path(__file__).parent.parent), ) -from testing_tools.adapter.__main__ import parse_args, main - +from testing_tools.adapter.__main__ import main, parse_args if __name__ == "__main__": tool, cmd, subargs, toolargs = parse_args() diff --git a/python_files/testing_tools/socket_manager.py b/python_files/testing_tools/socket_manager.py index 31b78b254bba..347453a6ca1a 100644 --- a/python_files/testing_tools/socket_manager.py +++ b/python_files/testing_tools/socket_manager.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +import contextlib import socket import sys @@ -20,7 +21,7 @@ def __exit__(self, *_): def connect(self): if sys.platform == "win32": - self._writer = open(self.name, "wt", encoding="utf-8") + self._writer = open(self.name, "w", encoding="utf-8") # noqa: SIM115, PTH123 # reader created in read method else: self._socket = _SOCKET(socket.AF_UNIX, socket.SOCK_STREAM) @@ -65,7 +66,7 @@ def read(self, bufsize=1024) -> str: if sys.platform == "win32": # returns a string automatically from read if not hasattr(self, "_reader"): - self._reader = open(self.name, "rt", encoding="utf-8") + self._reader = open(self.name, encoding="utf-8") # noqa: SIM115, PTH123 return self._reader.read(bufsize) else: # receive bytes and convert to string @@ -75,7 +76,7 @@ def read(self, bufsize=1024) -> str: return data -class SocketManager(object): +class SocketManager: """Create a socket and connect to the given address. The address is a (host: str, port: int) tuple. @@ -111,8 +112,6 @@ def connect(self): def close(self): if self.socket: - try: + with contextlib.suppress(Exception): self.socket.shutdown(socket.SHUT_RDWR) - except Exception: - pass self.socket.close() diff --git a/python_files/testing_tools/unittest_discovery.py b/python_files/testing_tools/unittest_discovery.py index 5d5e9bcc6601..9b792d8e5102 100644 --- a/python_files/testing_tools/unittest_discovery.py +++ b/python_files/testing_tools/unittest_discovery.py @@ -8,7 +8,7 @@ start_dir = sys.argv[1] pattern = sys.argv[2] top_level_dir = sys.argv[3] if len(sys.argv) >= 4 else None -sys.path.insert(0, os.getcwd()) +sys.path.insert(0, os.getcwd()) # noqa: PTH109 def get_sourceline(obj): @@ -34,8 +34,7 @@ def generate_test_cases(suite): if isinstance(test, unittest.TestCase): yield test else: - for test_case in generate_test_cases(test): - yield test_case + yield from generate_test_cases(test) try: @@ -45,12 +44,12 @@ def generate_test_cases(suite): print("start") # Don't remove this line loader_errors = [] for s in generate_test_cases(suite): - tm = getattr(s, s._testMethodName) - testId = s.id() - if testId.startswith("unittest.loader._FailedTest"): - loader_errors.append(s._exception) + tm = getattr(s, s._testMethodName) # noqa: SLF001 + test_id = s.id() + if test_id.startswith("unittest.loader._FailedTest"): + loader_errors.append(s._exception) # noqa: SLF001 else: - print(testId.replace(".", ":") + ":" + get_sourceline(tm)) + print(test_id.replace(".", ":") + ":" + get_sourceline(tm)) except Exception: print("=== exception start ===") traceback.print_exc() diff --git a/python_files/testlauncher.py b/python_files/testlauncher.py index 3278815b380c..2309a203363b 100644 --- a/python_files/testlauncher.py +++ b/python_files/testlauncher.py @@ -7,30 +7,31 @@ def parse_argv(): """Parses arguments for use with the test launcher. + Arguments are: 1. Working directory. 2. Test runner `pytest` 3. Rest of the arguments are passed into the test runner. """ cwd = sys.argv[1] - testRunner = sys.argv[2] + test_runner = sys.argv[2] args = sys.argv[3:] - return (cwd, testRunner, args) + return (cwd, test_runner, args) + +def run(cwd, test_runner, args): + """Runs the test. -def run(cwd, testRunner, args): - """Runs the test cwd -- the current directory to be set testRunner -- test runner to be used `pytest` args -- arguments passed into the test runner """ - - sys.path[0] = os.getcwd() + sys.path[0] = os.getcwd() # noqa: PTH109 os.chdir(cwd) try: - if testRunner == "pytest": + if test_runner == "pytest": import pytest pytest.main(args) @@ -40,5 +41,5 @@ def run(cwd, testRunner, args): if __name__ == "__main__": - cwd, testRunner, args = parse_argv() - run(cwd, testRunner, args) + cwd, test_runner, args = parse_argv() + run(cwd, test_runner, args) diff --git a/python_files/tests/__init__.py b/python_files/tests/__init__.py index 4f762cd1f81a..86bc29ff33e8 100644 --- a/python_files/tests/__init__.py +++ b/python_files/tests/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# ruff:noqa: PTH118, PTH120 import os.path TEST_ROOT = os.path.dirname(__file__) diff --git a/python_files/tests/__main__.py b/python_files/tests/__main__.py index 347222bd85db..2595fce358e4 100644 --- a/python_files/tests/__main__.py +++ b/python_files/tests/__main__.py @@ -34,7 +34,7 @@ def parse_args(): return ns, remainder -def main(pytestargs, markers=None, specific=False): +def main(pytestargs, markers=None, specific=False): # noqa: FBT002 sys.path.insert(1, TESTING_TOOLS_ROOT) sys.path.insert(1, DEBUG_ADAPTER_ROOT) @@ -46,8 +46,7 @@ def main(pytestargs, markers=None, specific=False): pytestargs.insert(0, marker) pytestargs.insert(0, "-m") - ec = pytest.main(pytestargs) - return ec + return pytest.main(pytestargs) if __name__ == "__main__": diff --git a/python_files/tests/pytestadapter/expected_discovery_test_output.py b/python_files/tests/pytestadapter/expected_discovery_test_output.py index 723adaabc3e5..56b116e7dfd5 100644 --- a/python_files/tests/pytestadapter/expected_discovery_test_output.py +++ b/python_files/tests/pytestadapter/expected_discovery_test_output.py @@ -1,6 +1,5 @@ import os - from .helpers import TEST_DATA_PATH, find_test_line_number, get_absolute_test_id # This file contains the expected output dictionaries for tests discovery and is used in test_discovery.py. @@ -850,10 +849,10 @@ "children": [ { "name": "test_a_function", - "path": os.fspath(os.path.join(tests_path, "test_a.py")), + "path": os.fspath(os.path.join(tests_path, "test_a.py")), # noqa: PTH118 "lineno": find_test_line_number( "test_a_function", - os.path.join(tests_path, "test_a.py"), + os.path.join(tests_path, "test_a.py"), # noqa: PTH118 ), "type_": "test", "id_": get_absolute_test_id("tests/test_a.py::test_a_function", tests_a_path), @@ -869,10 +868,10 @@ "children": [ { "name": "test_b_function", - "path": os.fspath(os.path.join(tests_path, "test_b.py")), + "path": os.fspath(os.path.join(tests_path, "test_b.py")), # noqa: PTH118 "lineno": find_test_line_number( "test_b_function", - os.path.join(tests_path, "test_b.py"), + os.path.join(tests_path, "test_b.py"), # noqa: PTH118 ), "type_": "test", "id_": get_absolute_test_id("tests/test_b.py::test_b_function", tests_b_path), @@ -1033,7 +1032,7 @@ "path": str(SYMLINK_FOLDER_PATH_TESTS_TEST_A), "lineno": find_test_line_number( "test_a_function", - os.path.join(tests_path, "test_a.py"), + os.path.join(tests_path, "test_a.py"), # noqa: PTH118 ), "type_": "test", "id_": get_absolute_test_id( @@ -1058,7 +1057,7 @@ "path": str(SYMLINK_FOLDER_PATH_TESTS_TEST_B), "lineno": find_test_line_number( "test_b_function", - os.path.join(tests_path, "test_b.py"), + os.path.join(tests_path, "test_b.py"), # noqa: PTH118 ), "type_": "test", "id_": get_absolute_test_id( diff --git a/python_files/tests/pytestadapter/helpers.py b/python_files/tests/pytestadapter/helpers.py index 978fd7f9ce08..9ec0550fb4b9 100644 --- a/python_files/tests/pytestadapter/helpers.py +++ b/python_files/tests/pytestadapter/helpers.py @@ -11,8 +11,8 @@ import sys import tempfile import threading -from typing import Any, Dict, List, Optional, Tuple import uuid +from typing import Any, Dict, List, Optional, Tuple if sys.platform == "win32": from namedpipe import NPopen @@ -41,7 +41,7 @@ def text_to_python_file(text_file_path: pathlib.Path): yield python_file finally: if python_file: - os.unlink(os.fspath(python_file)) + python_file.unlink() @contextlib.contextmanager @@ -64,13 +64,14 @@ def create_symlink(root: pathlib.Path, target_ext: str, destination_ext: str): def process_data_received(data: str) -> List[Dict[str, Any]]: - """Process the all JSON data which comes from the server. After listen is finished, this function will be called. + """Process the all JSON data which comes from the server. + + After listen is finished, this function will be called. Here the data must be split into individual JSON messages and then parsed. This function also: - Checks that the jsonrpc value is 2.0 - Checks that the last JSON message contains the `eot` token. - """ json_messages = [] remaining = data @@ -99,7 +100,8 @@ def parse_rpc_message(data: str) -> Tuple[Dict[str, str], str]: returns: json_data: A single rpc payload of JSON data from the server. - remaining: The remaining data after the JSON data.""" + remaining: The remaining data after the JSON data. + """ str_stream: io.StringIO = io.StringIO(data) length: int = 0 @@ -133,6 +135,7 @@ def parse_rpc_message(data: str) -> Tuple[Dict[str, str], str]: def _listen_on_pipe_new(listener, result: List[str], completed: threading.Event): """Listen on the named pipe or Unix domain socket for JSON data from the server. + Created as a separate function for clarity in threading context. """ # Windows design @@ -197,14 +200,7 @@ def runner(args: List[str]) -> Optional[List[Dict[str, Any]]]: def runner_with_cwd(args: List[str], path: pathlib.Path) -> Optional[List[Dict[str, Any]]]: """Run the pytest discovery and return the JSON data from the server.""" - process_args: List[str] = [ - sys.executable, - "-m", - "pytest", - "-p", - "vscode_pytest", - "-s", - ] + args + process_args: List[str] = [sys.executable, "-m", "pytest", "-p", "vscode_pytest", "-s", *args] # Generate pipe name, pipe name specific per OS type. pipe_name = generate_random_pipe_name("pytest-discovery-test") @@ -281,7 +277,7 @@ def find_test_line_number(test_name: str, test_file_path) -> str: test_file_path: The path to the test file where the test is located. """ test_file_unique_id: str = "test_marker--" + test_name.split("[")[0] - with open(test_file_path) as f: + with open(test_file_path) as f: # noqa: PTH123 for i, line in enumerate(f): if test_file_unique_id in line: return str(i + 1) @@ -289,11 +285,10 @@ def find_test_line_number(test_name: str, test_file_path) -> str: raise ValueError(error_str) -def get_absolute_test_id(test_id: str, testPath: pathlib.Path) -> str: +def get_absolute_test_id(test_id: str, test_path: pathlib.Path) -> str: """Get the absolute test id by joining the testPath with the test_id.""" split_id = test_id.split("::")[1:] - absolute_test_id = "::".join([str(testPath), *split_id]) - return absolute_test_id + return "::".join([str(test_path), *split_id]) def generate_random_pipe_name(prefix=""): @@ -310,9 +305,9 @@ def generate_random_pipe_name(prefix=""): # For Unix-like systems, use either the XDG_RUNTIME_DIR or a temporary directory. xdg_runtime_dir = os.getenv("XDG_RUNTIME_DIR") if xdg_runtime_dir: - return os.path.join(xdg_runtime_dir, f"{prefix}-{random_suffix}.sock") + return os.path.join(xdg_runtime_dir, f"{prefix}-{random_suffix}.sock") # noqa: PTH118 else: - return os.path.join(tempfile.gettempdir(), f"{prefix}-{random_suffix}.sock") + return os.path.join(tempfile.gettempdir(), f"{prefix}-{random_suffix}.sock") # noqa: PTH118 class UnixPipeServer: @@ -328,9 +323,9 @@ def __init__(self, name): self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # Ensure the socket does not already exist try: - os.unlink(self.name) + os.unlink(self.name) # noqa: PTH108 except OSError: - if os.path.exists(self.name): + if os.path.exists(self.name): # noqa: PTH110 raise def start(self): diff --git a/python_files/tests/pytestadapter/test_discovery.py b/python_files/tests/pytestadapter/test_discovery.py index f8c4890658c9..c7752cf490ca 100644 --- a/python_files/tests/pytestadapter/test_discovery.py +++ b/python_files/tests/pytestadapter/test_discovery.py @@ -7,9 +7,9 @@ import pytest -from tests.tree_comparison_helper import is_same_tree # noqa: E402 +from tests.tree_comparison_helper import is_same_tree -from . import expected_discovery_test_output, helpers # noqa: E402 +from . import expected_discovery_test_output, helpers def test_import_error(): @@ -31,7 +31,7 @@ def test_import_error(): actual_list: List[Dict[str, Any]] = actual if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "error" assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH) @@ -42,10 +42,10 @@ def test_import_error(): ): # You can add other types if needed assert len(error_content) == 2 else: - assert False + pytest.fail(f"{error_content} is None or not a list, str, or tuple") -def test_syntax_error(tmp_path): +def test_syntax_error(tmp_path): # noqa: ARG001 """Test pytest discovery on a file that has a syntax error. Copies the contents of a .txt file to a .py file in the temporary directory @@ -67,7 +67,7 @@ def test_syntax_error(tmp_path): actual_list: List[Dict[str, Any]] = actual if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "error" assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH) @@ -78,7 +78,7 @@ def test_syntax_error(tmp_path): ): # You can add other types if needed assert len(error_content) == 2 else: - assert False + pytest.fail(f"{error_content} is None or not a list, str, or tuple") def test_parameterized_error_collect(): @@ -92,7 +92,7 @@ def test_parameterized_error_collect(): actual_list: List[Dict[str, Any]] = actual if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "error" assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH) @@ -103,11 +103,11 @@ def test_parameterized_error_collect(): ): # You can add other types if needed assert len(error_content) == 2 else: - assert False + pytest.fail(f"{error_content} is None or not a list, str, or tuple") @pytest.mark.parametrize( - "file, expected_const", + ("file", "expected_const"), [ ( "test_param_span_class.py", @@ -121,10 +121,6 @@ def test_parameterized_error_collect(): "same_function_new_class_param.py", expected_discovery_test_output.same_function_new_class_param_expected_output, ), - ( - "test_multi_class_nest.py", - expected_discovery_test_output.nested_classes_expected_test_output, - ), ( "unittest_skiptest_file_level.py", expected_discovery_test_output.unittest_skip_file_level_expected_output, @@ -168,11 +164,12 @@ def test_parameterized_error_collect(): ], ) def test_pytest_collect(file, expected_const): - """ - Test to test pytest discovery on a variety of test files/ folder structures. - Uses variables from expected_discovery_test_output.py to store the expected dictionary return. - Only handles discovery and therefore already contains the arg --collect-only. - All test discovery will succeed, be in the correct cwd, and match expected test output. + """Test to test pytest discovery on a variety of test files/ folder structures. + + Uses variables from expected_discovery_test_output.py to store the expected + dictionary return. Only handles discovery and therefore already contains the arg + --collect-only. All test discovery will succeed, be in the correct cwd, and match + expected test output. Keyword arguments: file -- a string with the file or folder to run pytest discovery on. @@ -189,7 +186,7 @@ def test_pytest_collect(file, expected_const): actual_list: List[Dict[str, Any]] = actual if actual_list is not None: actual_item = actual_list.pop(0) - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert ( actual_item.get("status") == "success" ), f"Status is not 'success', error is: {actual_item.get('error')}" @@ -206,8 +203,8 @@ def test_pytest_collect(file, expected_const): reason="See https://stackoverflow.com/questions/32877260/privlege-error-trying-to-create-symlink-using-python-on-windows-10", ) def test_symlink_root_dir(): - """ - Test to test pytest discovery with the command line arg --rootdir specified as a symlink path. + """Test to test pytest discovery with the command line arg --rootdir specified as a symlink path. + Discovery should succeed and testids should be relative to the symlinked root directory. """ with helpers.create_symlink(helpers.TEST_DATA_PATH, "root", "symlink_folder") as ( @@ -228,7 +225,7 @@ def test_symlink_root_dir(): try: # Check if all requirements assert all( - item in actual_item.keys() for item in ("status", "cwd", "error") + item in actual_item for item in ("status", "cwd", "error") ), "Required keys are missing" assert actual_item.get("status") == "success", "Status is not 'success'" assert actual_item.get("cwd") == os.fspath( @@ -242,9 +239,9 @@ def test_symlink_root_dir(): def test_pytest_root_dir(): - """ - Test to test pytest discovery with the command line arg --rootdir specified to be a subfolder - of the workspace root. Discovery should succeed and testids should be relative to workspace root. + """Test to test pytest discovery with the command line arg --rootdir specified to be a subfolder of the workspace root. + + Discovery should succeed and testids should be relative to workspace root. """ rd = f"--rootdir={helpers.TEST_DATA_PATH / 'root' / 'tests'}" actual = helpers.runner_with_cwd( @@ -259,7 +256,7 @@ def test_pytest_root_dir(): if actual_list is not None: actual_item = actual_list.pop(0) - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "success" assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH / "root") assert is_same_tree( @@ -270,9 +267,9 @@ def test_pytest_root_dir(): def test_pytest_config_file(): - """ - Test to test pytest discovery with the command line arg -c with a specified config file which - changes the workspace root. Discovery should succeed and testids should be relative to workspace root. + """Test to test pytest discovery with the command line arg -c with a specified config file which changes the workspace root. + + Discovery should succeed and testids should be relative to workspace root. """ actual = helpers.runner_with_cwd( [ @@ -286,7 +283,7 @@ def test_pytest_config_file(): if actual_list is not None: actual_item = actual_list.pop(0) - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "success" assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH / "root") assert is_same_tree( @@ -298,7 +295,10 @@ def test_pytest_config_file(): def test_config_sub_folder(): """Here the session node will be a subfolder of the workspace root and the test are in another subfolder. - This tests checks to see if test node path are under the session node and if so the session node is correctly updated to the common path.""" + + This tests checks to see if test node path are under the session node and if so the + session node is correctly updated to the common path. + """ folder_path = helpers.TEST_DATA_PATH / "config_sub_folder" actual = helpers.runner_with_cwd( [ @@ -314,7 +314,7 @@ def test_config_sub_folder(): actual_list: List[Dict[str, Any]] = actual if actual_list is not None: actual_item = actual_list.pop(0) - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "success" assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH / "config_sub_folder") assert actual_item.get("tests") is not None diff --git a/python_files/tests/pytestadapter/test_execution.py b/python_files/tests/pytestadapter/test_execution.py index 98ed00954d60..3ea8c685a9fe 100644 --- a/python_files/tests/pytestadapter/test_execution.py +++ b/python_files/tests/pytestadapter/test_execution.py @@ -36,10 +36,10 @@ def test_config_file(): assert actual actual_list: List[Dict[str, Any]] = actual assert len(actual_list) == len(expected_const) - actual_result_dict = dict() + actual_result_dict = {} if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "result")) + assert all(item in actual_item for item in ("status", "cwd", "result")) assert actual_item.get("status") == "success" assert actual_item.get("cwd") == os.fspath(new_cwd) actual_result_dict.update(actual_item["result"]) @@ -56,10 +56,10 @@ def test_rootdir_specified(): assert actual actual_list: List[Dict[str, Dict[str, Any]]] = actual assert len(actual_list) == len(expected_const) - actual_result_dict = dict() + actual_result_dict = {} if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "result")) + assert all(item in actual_item for item in ("status", "cwd", "result")) assert actual_item.get("status") == "success" assert actual_item.get("cwd") == os.fspath(new_cwd) actual_result_dict.update(actual_item["result"]) @@ -95,7 +95,7 @@ def test_syntax_error_execution(tmp_path): if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "error" assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) error_content = actual_item.get("error") @@ -104,7 +104,7 @@ def test_syntax_error_execution(tmp_path): ): # You can add other types if needed assert len(error_content) == 1 else: - assert False + pytest.fail(f"{error_content!r} is None or not a list, str, or tuple") def test_bad_id_error_execution(): @@ -117,20 +117,20 @@ def test_bad_id_error_execution(): actual_list: List[Dict[str, Dict[str, Any]]] = actual if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert all(item in actual_item for item in ("status", "cwd", "error")) assert actual_item.get("status") == "error" assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) error_content = actual_item.get("error") if error_content is not None and isinstance( error_content, (list, tuple, str) - ): # You can add other types if needed + ): # You can add other types if needed. assert len(error_content) == 1 else: - assert False + pytest.fail(f"{error_content!r} is None or not a list, str, or tuple") @pytest.mark.parametrize( - "test_ids, expected_const", + ("test_ids", "expected_const"), [ ( [ @@ -231,8 +231,8 @@ def test_bad_id_error_execution(): ) def test_pytest_execution(test_ids, expected_const): """ - Test that pytest discovery works as expected where run pytest is always successful - but the actual test results are both successes and failures.: + Test that pytest discovery works as expected where run pytest is always successful, but the actual test results are both successes and failures. + 1: skip_tests_execution_expected_output: test run on a file with skipped tests. 2. error_raised_exception_execution_expected_output: test run on a file that raises an exception. 3. uf_execution_expected_output: unittest tests run on multiple files. @@ -258,10 +258,10 @@ def test_pytest_execution(test_ids, expected_const): assert actual actual_list: List[Dict[str, Dict[str, Any]]] = actual assert len(actual_list) == len(expected_const) - actual_result_dict = dict() + actual_result_dict = {} if actual_list is not None: for actual_item in actual_list: - assert all(item in actual_item.keys() for item in ("status", "cwd", "result")) + assert all(item in actual_item for item in ("status", "cwd", "result")) assert actual_item.get("status") == "success" assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) actual_result_dict.update(actual_item["result"]) @@ -277,8 +277,8 @@ def test_pytest_execution(test_ids, expected_const): def test_symlink_run(): - """ - Test to test pytest discovery with the command line arg --rootdir specified as a symlink path. + """Test to test pytest discovery with the command line arg --rootdir specified as a symlink path. + Discovery should succeed and testids should be relative to the symlinked root directory. """ with create_symlink(TEST_DATA_PATH, "root", "symlink_folder") as ( @@ -303,13 +303,13 @@ def test_symlink_run(): try: # Check if all requirements assert all( - item in actual_item.keys() for item in ("status", "cwd", "result") + item in actual_item for item in ("status", "cwd", "result") ), "Required keys are missing" assert actual_item.get("status") == "success", "Status is not 'success'" assert actual_item.get("cwd") == os.fspath( destination ), f"CWD does not match: {os.fspath(destination)}" - actual_result_dict = dict() + actual_result_dict = {} actual_result_dict.update(actual_item["result"]) assert actual_result_dict == expected_const except AssertionError as e: diff --git a/python_files/tests/pytestadapter/test_utils.py b/python_files/tests/pytestadapter/test_utils.py index 9a1a58376ad8..ef0ed2daf4e9 100644 --- a/python_files/tests/pytestadapter/test_utils.py +++ b/python_files/tests/pytestadapter/test_utils.py @@ -1,12 +1,12 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -import pathlib -import tempfile import os +import pathlib import sys +import tempfile -from .helpers import ( # noqa: E402 +from .helpers import ( TEST_DATA_PATH, ) diff --git a/python_files/tests/run_all.py b/python_files/tests/run_all.py index 7c864ba7c5c1..3edb3cd3440c 100644 --- a/python_files/tests/run_all.py +++ b/python_files/tests/run_all.py @@ -2,10 +2,11 @@ # Licensed under the MIT License. # Replace the "." entry. -import os.path +import os +import pathlib import sys -sys.path[0] = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path[0] = os.fsdecode(pathlib.Path(__file__).parent.parent) from tests.__main__ import main, parse_args # noqa: E402 diff --git a/python_files/tests/test_create_conda.py b/python_files/tests/test_create_conda.py index 8681184ba821..82daafbea9dc 100644 --- a/python_files/tests/test_create_conda.py +++ b/python_files/tests/test_create_conda.py @@ -4,9 +4,10 @@ import importlib import sys -import create_conda import pytest +import create_conda + @pytest.mark.parametrize("env_exists", [True, False]) @pytest.mark.parametrize("git_ignore", [True, False]) diff --git a/python_files/tests/test_create_venv.py b/python_files/tests/test_create_venv.py index 1539f1d9b44e..2387f099140f 100644 --- a/python_files/tests/test_create_venv.py +++ b/python_files/tests/test_create_venv.py @@ -168,7 +168,7 @@ def test_toml_args(extras, expected): actual = [] - def run_process(args, error_message): + def run_process(args, error_message): # noqa: ARG001 nonlocal actual actual = args[1:] @@ -201,7 +201,7 @@ def test_requirements_args(extras, expected): actual = [] - def run_process(args, error_message): + def run_process(args, error_message): # noqa: ARG001 nonlocal actual actual.append(args) diff --git a/python_files/tests/test_dynamic_cursor.py b/python_files/tests/test_dynamic_cursor.py index 7aea59427aa6..d30887c24d5b 100644 --- a/python_files/tests/test_dynamic_cursor.py +++ b/python_files/tests/test_dynamic_cursor.py @@ -5,13 +5,7 @@ def test_dictionary_mouse_mover(): - """ - Having the mouse cursor on second line, - 'my_dict = {' - and pressing shift+enter should bring the - mouse cursor to line 6, on and to be able to run - 'print('only send the dictionary')' - """ + """Having the mouse cursor on second line, 'my_dict = {' and pressing shift+enter should bring the mouse cursor to line 6, on and to be able to run 'print('only send the dictionary')'.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -24,18 +18,16 @@ def test_dictionary_mouse_mover(): """ ) - result = normalizeSelection.traverse_file(src, 2, 2, False) + result = normalizeSelection.traverse_file(src, 2, 2, was_highlighted=False) assert result["which_line_next"] == 6 def test_beginning_func(): - """ - Pressing shift+enter on the very first line, - of function definition, such as 'my_func():' - It should properly skip the comment and assert the - next executable line to be executed is line 5 at - 'my_dict = {' + """Pressing shift+enter on the very first line, of function definition, such as 'my_func():'. + + It should properly skip the comment and assert the next executable line to be + executed is line 5 at 'my_dict = {'. """ importlib.reload(normalizeSelection) src = textwrap.dedent( @@ -51,7 +43,7 @@ def my_func(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["which_line_next"] == 5 @@ -69,7 +61,7 @@ def test_cursor_forloop(): """ ) - result = normalizeSelection.traverse_file(src, 2, 2, False) + result = normalizeSelection.traverse_file(src, 2, 2, was_highlighted=False) assert result["which_line_next"] == 6 @@ -85,7 +77,7 @@ def test_inside_forloop(): """ ) - result = normalizeSelection.traverse_file(src, 2, 2, False) + result = normalizeSelection.traverse_file(src, 2, 2, was_highlighted=False) assert result["which_line_next"] == 3 @@ -98,7 +90,7 @@ def test_skip_sameline_statements(): print("Next line to be run is here!") """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["which_line_next"] == 2 @@ -119,17 +111,14 @@ def test_skip_multi_comp_lambda(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) # Shift enter from the very first ( should make # next executable statement as the lambda expression assert result["which_line_next"] == 7 def test_move_whole_class(): - """ - Shift+enter on a class definition - should move the cursor after running whole class. - """ + """Shift+enter on a class definition should move the cursor after running whole class.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -142,7 +131,7 @@ def add_call(self, name, args=None, kwargs=None): print("We should be here after running whole class") """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["which_line_next"] == 7 @@ -163,7 +152,7 @@ def next_func(): print("Not here but above") """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["which_line_next"] == 9 @@ -181,7 +170,7 @@ def test_try_catch_move(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["which_line_next"] == 6 @@ -199,5 +188,5 @@ def test_skip_nested(): print("Cursor should be here after running line 1") """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["which_line_next"] == 8 diff --git a/python_files/tests/test_installed_check.py b/python_files/tests/test_installed_check.py index dae019359e08..607e02f34abd 100644 --- a/python_files/tests/test_installed_check.py +++ b/python_files/tests/test_installed_check.py @@ -7,9 +7,9 @@ import pathlib import subprocess import sys +from typing import Dict, List, Optional, Union import pytest -from typing import Dict, List, Optional, Union SCRIPT_PATH = pathlib.Path(__file__).parent.parent / "installed_check.py" TEST_DATA = pathlib.Path(__file__).parent / "test_data" @@ -21,12 +21,12 @@ def generate_file(base_file: pathlib.Path): basename = "pyproject.toml" if "pyproject" in base_file.name else "requirements.txt" fullpath = base_file.parent / basename if fullpath.exists(): - os.unlink(os.fspath(fullpath)) + fullpath.unlink() fullpath.write_text(base_file.read_text(encoding="utf-8")) try: yield fullpath finally: - os.unlink(str(fullpath)) + fullpath.unlink() def run_on_file( @@ -41,8 +41,7 @@ def run_on_file( os.fspath(SCRIPT_PATH), os.fspath(file_path), ], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, check=True, env=env, ) diff --git a/python_files/tests/test_normalize_selection.py b/python_files/tests/test_normalize_selection.py index 60dfddb11e2d..e16eb118db12 100644 --- a/python_files/tests/test_normalize_selection.py +++ b/python_files/tests/test_normalize_selection.py @@ -10,16 +10,16 @@ import normalizeSelection -class TestNormalizationScript(object): +class TestNormalizationScript: """Unit tests for the normalization script.""" - def test_basicNormalization(self): + def test_basic_normalization(self): src = 'print("this is a test")' expected = src + "\n" result = normalizeSelection.normalize_lines(src) assert result == expected - def test_moreThanOneLine(self): + def test_more_than_one_line(self): src = textwrap.dedent( """\ # Some rando comment @@ -38,7 +38,7 @@ def show_something(): result = normalizeSelection.normalize_lines(src) assert result == expected - def test_withHangingIndent(self): + def test_with_hanging_indent(self): src = textwrap.dedent( """\ x = 22 @@ -64,7 +64,7 @@ def test_withHangingIndent(self): result = normalizeSelection.normalize_lines(src) assert result == expected - def test_clearOutExtraneousNewlines(self): + def test_clear_out_extraneous_newlines(self): src = textwrap.dedent( """\ value_x = 22 @@ -88,7 +88,7 @@ def test_clearOutExtraneousNewlines(self): result = normalizeSelection.normalize_lines(src) assert result == expected - def test_clearOutExtraLinesAndWhitespace(self): + def test_clear_out_extra_lines_and_whitespace(self): src = textwrap.dedent( """\ if True: @@ -115,13 +115,13 @@ def test_clearOutExtraLinesAndWhitespace(self): result = normalizeSelection.normalize_lines(src) assert result == expected - def test_partialSingleLine(self): + def test_partial_single_line(self): src = " print('foo')" expected = textwrap.dedent(src) + "\n" result = normalizeSelection.normalize_lines(src) assert result == expected - def test_multiLineWithIndent(self): + def test_multiline_with_indent(self): src = """\ if (x > 0 @@ -146,7 +146,7 @@ def test_multiLineWithIndent(self): result = normalizeSelection.normalize_lines(src) assert result == expected - def test_multiLineWithComment(self): + def test_multiline_with_comment(self): src = textwrap.dedent( """\ @@ -172,7 +172,7 @@ def test_exception(self): result = normalizeSelection.normalize_lines(src) assert result == expected - def test_multilineException(self): + def test_multiline_exception(self): src = textwrap.dedent( """\ diff --git a/python_files/tests/test_shell_integration.py b/python_files/tests/test_shell_integration.py index c5911aad2d1d..ea7ea4099bb2 100644 --- a/python_files/tests/test_shell_integration.py +++ b/python_files/tests/test_shell_integration.py @@ -1,12 +1,13 @@ import importlib import sys from unittest.mock import Mock + import pythonrc def test_decoration_success(): importlib.reload(pythonrc) - ps1 = pythonrc.ps1() + ps1 = pythonrc.PS1() ps1.hooks.failure_flag = False result = str(ps1) @@ -21,7 +22,7 @@ def test_decoration_success(): def test_decoration_failure(): importlib.reload(pythonrc) - ps1 = pythonrc.ps1() + ps1 = pythonrc.PS1() ps1.hooks.failure_flag = True result = str(ps1) @@ -36,10 +37,10 @@ def test_decoration_failure(): def test_displayhook_call(): importlib.reload(pythonrc) - pythonrc.ps1() + pythonrc.PS1() mock_displayhook = Mock() - hooks = pythonrc.repl_hooks() + hooks = pythonrc.REPLHooks() hooks.original_displayhook = mock_displayhook hooks.my_displayhook("mock_value") @@ -49,10 +50,10 @@ def test_displayhook_call(): def test_excepthook_call(): importlib.reload(pythonrc) - pythonrc.ps1() + pythonrc.PS1() mock_excepthook = Mock() - hooks = pythonrc.repl_hooks() + hooks = pythonrc.REPLHooks() hooks.original_excepthook = mock_excepthook hooks.my_excepthook("mock_type", "mock_value", "mock_traceback") diff --git a/python_files/tests/test_smart_selection.py b/python_files/tests/test_smart_selection.py index b86e6f9dc82e..15b1b1a3ec02 100644 --- a/python_files/tests/test_smart_selection.py +++ b/python_files/tests/test_smart_selection.py @@ -26,7 +26,7 @@ def test_part_dictionary(): """ ) - result = normalizeSelection.traverse_file(src, 3, 3, False) + result = normalizeSelection.traverse_file(src, 3, 3, was_highlighted=False) assert result["normalized_smart_result"] == expected @@ -53,7 +53,7 @@ def test_nested_loop(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected @@ -84,7 +84,7 @@ def test_smart_shift_enter_multiple_statements(): print("Mercedes") """ ) - result = normalizeSelection.traverse_file(src, 8, 8, False) + result = normalizeSelection.traverse_file(src, 8, 8, was_highlighted=False) assert result["normalized_smart_result"] == expected @@ -128,7 +128,7 @@ def test_two_layer_dictionary(): } """ ) - result = normalizeSelection.traverse_file(src, 6, 7, False) + result = normalizeSelection.traverse_file(src, 6, 7, was_highlighted=False) assert result["normalized_smart_result"] == expected @@ -158,7 +158,7 @@ def my_dogs(): """ ) - result = normalizeSelection.traverse_file(src, 2, 2, False) + result = normalizeSelection.traverse_file(src, 2, 2, was_highlighted=False) assert result["normalized_smart_result"] == expected @@ -183,18 +183,13 @@ def test_small_forloop(): # Cover the whole for loop block with multiple inner statements # Make sure to contain all of the print statements included. - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected def inner_for_loop_component(): - """ - Pressing shift+enter inside a for loop, - specifically on a viable expression - by itself, such as print(i) - should only return that exact expression - """ + """Pressing shift+enter inside a for loop, specifically on a viable expression by itself, such as print(i) should only return that exact expression.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -203,7 +198,7 @@ def inner_for_loop_component(): print("Please also send this print statement") """ ) - result = normalizeSelection.traverse_file(src, 2, 2, False) + result = normalizeSelection.traverse_file(src, 2, 2, was_highlighted=False) expected = textwrap.dedent( """\ print(i) @@ -214,13 +209,7 @@ def inner_for_loop_component(): def test_dict_comprehension(): - """ - Having the mouse cursor on the first line, - and pressing shift+enter should return the - whole dictionary comp, respecting user's code style. - """ - - importlib.reload + """Having the mouse cursor on the first line, and pressing shift+enter should return the whole dictionary comp, respecting user's code style.""" src = textwrap.dedent( """\ my_dict_comp = {temp_mover: @@ -235,17 +224,13 @@ def test_dict_comprehension(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected def test_send_whole_generator(): - """ - Pressing shift+enter on the first line, which is the '(' - should be returning the whole generator expression instead of just the '(' - """ - + """Pressing shift+enter on the first line, which is the '(' should be returning the whole generator expression instead of just the '('.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -268,19 +253,13 @@ def test_send_whole_generator(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected def test_multiline_lambda(): - """ - Shift+enter on part of the lambda expression - should return the whole lambda expression, - regardless of whether all the component of - lambda expression is on the same or not. - """ - + """Shift+enter on part of the lambda expression should return the whole lambda expression, regardless of whether all the component of lambda expression is on the same or not.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -298,15 +277,12 @@ def test_multiline_lambda(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected def test_send_whole_class(): - """ - Shift+enter on a class definition - should send the whole class definition - """ + """Shift+enter on a class definition should send the whole class definition.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -319,7 +295,7 @@ def add_call(self, name, args=None, kwargs=None): print("We should be here after running whole class") """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) expected = textwrap.dedent( """\ class Stub(object): @@ -334,11 +310,7 @@ def add_call(self, name, args=None, kwargs=None): def test_send_whole_if_statement(): - """ - Shift+enter on an if statement - should send the whole if statement - including statements inside and else. - """ + """Shift+enter on an if statement should send the whole if statement including statements inside and else.""" importlib.reload(normalizeSelection) src = textwrap.dedent( """\ @@ -359,7 +331,7 @@ def test_send_whole_if_statement(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected @@ -384,5 +356,5 @@ def test_send_try(): """ ) - result = normalizeSelection.traverse_file(src, 1, 1, False) + result = normalizeSelection.traverse_file(src, 1, 1, was_highlighted=False) assert result["normalized_smart_result"] == expected diff --git a/python_files/tests/testing_tools/adapter/pytest/test_cli.py b/python_files/tests/testing_tools/adapter/pytest/test_cli.py index 6f590a31fa56..b1d9196cd50d 100644 --- a/python_files/tests/testing_tools/adapter/pytest/test_cli.py +++ b/python_files/tests/testing_tools/adapter/pytest/test_cli.py @@ -1,16 +1,18 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# ruff:noqa: PT009, PT027 import unittest -from ....util import Stub, StubProxy from testing_tools.adapter.errors import UnsupportedCommandError from testing_tools.adapter.pytest._cli import add_subparser +from ....util import Stub, StubProxy + class StubSubparsers(StubProxy): def __init__(self, stub=None, name="subparsers"): - super(StubSubparsers, self).__init__(stub, name) + super().__init__(stub, name) def add_parser(self, name): self.add_call("add_parser", None, {"name": name}) @@ -19,7 +21,7 @@ def add_parser(self, name): class StubArgParser(StubProxy): def __init__(self, stub=None): - super(StubArgParser, self).__init__(stub, "argparser") + super().__init__(stub, "argparser") def add_argument(self, *args, **kwargs): self.add_call("add_argument", args, kwargs) diff --git a/python_files/tests/testing_tools/adapter/pytest/test_discovery.py b/python_files/tests/testing_tools/adapter/pytest/test_discovery.py index 55a0e65102ae..c8658ad2d89e 100644 --- a/python_files/tests/testing_tools/adapter/pytest/test_discovery.py +++ b/python_files/tests/testing_tools/adapter/pytest/test_discovery.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - -from __future__ import print_function, unicode_literals +# ruff:noqa: PT009, PT027, SLF001 try: from io import StringIO @@ -15,6 +14,7 @@ import _pytest.doctest import pytest + from testing_tools.adapter import info from testing_tools.adapter import util as adapter_util from testing_tools.adapter.pytest import _discovery @@ -37,7 +37,7 @@ def unique(collection, key): class StubPyTest(util.StubProxy): def __init__(self, stub=None): - super(StubPyTest, self).__init__(stub, "pytest") + super().__init__(stub, "pytest") self.return_main = 0 def main(self, args, plugins): @@ -49,7 +49,7 @@ class StubPlugin(util.StubProxy): _started = True def __init__(self, stub=None, tests=None): - super(StubPlugin, self).__init__(stub, "plugin") + super().__init__(stub, "plugin") if tests is None: tests = StubDiscoveredTests(self.stub) self._tests = tests @@ -68,7 +68,7 @@ class StubDiscoveredTests(util.StubProxy): NOT_FOUND = object() def __init__(self, stub=None): - super(StubDiscoveredTests, self).__init__(stub, "discovered") + super().__init__(stub, "discovered") self.return_items = [] self.return_parents = [] @@ -92,12 +92,12 @@ def add_test(self, test, parents): self.add_call("add_test", None, {"test": test, "parents": parents}) -class FakeFunc(object): +class FakeFunc: def __init__(self, name): self.__name__ = name -class FakeMarker(object): +class FakeMarker: def __init__(self, name): self.name = name @@ -107,7 +107,7 @@ class StubPytestItem(util.StubProxy): _hasfunc = True def __init__(self, stub=None, **attrs): - super(StubPytestItem, self).__init__(stub, "pytest.Item") + super().__init__(stub, "pytest.Item") if attrs.get("function") is None: attrs.pop("function", None) self._hasfunc = False @@ -133,9 +133,8 @@ def __repr__(self): def __getattr__(self, name): if not self._debugging: self.add_call(name + " (attr)", None, None) - if name == "function": - if not self._hasfunc: - raise AttributeError(name) + if name == "function" and not self._hasfunc: + raise AttributeError(name) def func(*args, **kwargs): self.add_call(name, args or None, kwargs or None) @@ -153,7 +152,7 @@ def from_args(cls, *args, **kwargs): return self def __init__(self, *args, **kwargs): - super(StubSubtypedItem, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if "nodeid" in self.__dict__: self._nodeid = self.__dict__.pop("nodeid") @@ -182,7 +181,7 @@ def create_stub_doctest_item(*args, **kwargs): class StubPytestSession(util.StubProxy): def __init__(self, stub=None): - super(StubPytestSession, self).__init__(stub, "pytest.Session") + super().__init__(stub, "pytest.Session") def __getattr__(self, name): self.add_call(name + " (attr)", None, None) @@ -195,7 +194,7 @@ def func(*args, **kwargs): class StubPytestConfig(util.StubProxy): def __init__(self, stub=None): - super(StubPytestConfig, self).__init__(stub, "pytest.Config") + super().__init__(stub, "pytest.Config") def __getattr__(self, name): self.add_call(name + " (attr)", None, None) @@ -220,94 +219,67 @@ def normcase(path): def _fix_fileid(*args): return adapter_util.fix_fileid( *args, - **dict( - # dependency injection - _normcase=normcase, - _pathsep=pathsep, - ), + _normcase=normcase, + _pathsep=pathsep, ) def _normalize_test_id(*args): return pytest_item._normalize_test_id( *args, - **dict( - # dependency injection - _fix_fileid=_fix_fileid, - _pathsep=pathsep, - ), + _fix_fileid=_fix_fileid, + _pathsep=pathsep, ) def _iter_nodes(*args): return pytest_item._iter_nodes( *args, - **dict( - # dependency injection - _normalize_test_id=_normalize_test_id, - _normcase=normcase, - _pathsep=pathsep, - ), + _normalize_test_id=_normalize_test_id, + _normcase=normcase, + _pathsep=pathsep, ) def _parse_node_id(*args): return pytest_item._parse_node_id( *args, - **dict( - # dependency injection - _iter_nodes=_iter_nodes, - ), + _iter_nodes=_iter_nodes, ) ########## def _split_fspath(*args): return pytest_item._split_fspath( *args, - **dict( - # dependency injection - _normcase=normcase, - ), + _normcase=normcase, ) ########## def _matches_relfile(*args): return pytest_item._matches_relfile( *args, - **dict( - # dependency injection - _normcase=normcase, - _pathsep=pathsep, - ), + _normcase=normcase, + _pathsep=pathsep, ) def _is_legacy_wrapper(*args): return pytest_item._is_legacy_wrapper( *args, - **dict( - # dependency injection - _pathsep=pathsep, - ), + _pathsep=pathsep, ) def _get_location(*args): return pytest_item._get_location( *args, - **dict( - # dependency injection - _matches_relfile=_matches_relfile, - _is_legacy_wrapper=_is_legacy_wrapper, - _pathsep=pathsep, - ), + _matches_relfile=_matches_relfile, + _is_legacy_wrapper=_is_legacy_wrapper, + _pathsep=pathsep, ) ########## def _parse_item(item): return pytest_item.parse_item( item, - **dict( - # dependency injection - _parse_node_id=_parse_node_id, - _split_fspath=_split_fspath, - _get_location=_get_location, - ), + _parse_node_id=_parse_node_id, + _split_fspath=_split_fspath, + _get_location=_get_location, ) return _parse_item @@ -330,9 +302,7 @@ def ret(args, plugins): class DiscoverTests(unittest.TestCase): - DEFAULT_ARGS = [ - "--collect-only", - ] + DEFAULT_ARGS = ["--collect-only"] # noqa: RUF012 def test_basic(self): stub = util.Stub() @@ -362,7 +332,7 @@ def test_failure(self): pytest.return_main = 2 plugin = StubPlugin(stub) - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 _discovery.discover([], _pytest_main=pytest.main, _plugin=plugin) self.assertEqual( @@ -440,7 +410,7 @@ def test_stdio_hidden_file(self): _discovery.discover( [], hidestdio=True, - _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), # noqa: FBT003 _plugin=plugin, ) finally: @@ -468,7 +438,7 @@ def test_stdio_hidden_fd(self): _discovery.discover( [], hidestdio=True, - _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), # noqa: FBT003 _plugin=plugin, ) captured = sys.stdout.read() @@ -496,7 +466,7 @@ def test_stdio_not_hidden_file(self): _discovery.discover( [], hidestdio=False, - _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), # noqa: FBT003 _plugin=plugin, ) finally: @@ -522,7 +492,7 @@ def test_stdio_not_hidden_fd(self): _discovery.discover( [], hidestdio=False, - _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), # noqa: FBT003 _plugin=plugin, ) finally: @@ -628,13 +598,13 @@ def test_modifyitems(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./test_spam.py::SpamTests", "SpamTests", "suite"), ("./test_spam.py", "test_spam.py", "file"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./test_spam.py::SpamTests::test_one", name="test_one", path=info.SingleTestPath( @@ -643,22 +613,22 @@ def test_modifyitems(self): func="SpamTests.test_one", sub=None, ), - source="{}:{}".format(relfile1, 13), + source=f"{relfile1}:{13}", markers=None, parentid="./test_spam.py::SpamTests", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./test_spam.py::SpamTests", "SpamTests", "suite"), ("./test_spam.py", "test_spam.py", "file"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./test_spam.py::SpamTests::test_other", name="test_other", path=info.SingleTestPath( @@ -667,21 +637,21 @@ def test_modifyitems(self): func="SpamTests.test_other", sub=None, ), - source="{}:{}".format(relfile1, 20), + source=f"{relfile1}:{20}", markers=None, parentid="./test_spam.py::SpamTests", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./test_spam.py", "test_spam.py", "file"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./test_spam.py::test_all", name="test_all", path=info.SingleTestPath( @@ -690,22 +660,22 @@ def test_modifyitems(self): func="test_all", sub=None, ), - source="{}:{}".format(relfile1, 145), + source=f"{relfile1}:{145}", markers=None, parentid="./test_spam.py", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./test_spam.py::test_each", "test_each", "function"), ("./test_spam.py", "test_spam.py", "file"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./test_spam.py::test_each[10-10]", name="10-10", path=info.SingleTestPath( @@ -714,17 +684,17 @@ def test_modifyitems(self): func="test_each", sub=["[10-10]"], ), - source="{}:{}".format(relfile1, 274), + source=f"{relfile1}:{274}", markers=None, parentid="./test_spam.py::test_each", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ( "./x/y/z/test_eggs.py::All::BasicTests", "BasicTests", @@ -737,7 +707,7 @@ def test_modifyitems(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::All::BasicTests::test_first", name="test_first", path=info.SingleTestPath( @@ -746,17 +716,17 @@ def test_modifyitems(self): func="All.BasicTests.test_first", sub=None, ), - source="{}:{}".format(adapter_util.fix_relpath(relfile2), 32), + source=f"{adapter_util.fix_relpath(relfile2)}:{32}", markers=None, parentid="./x/y/z/test_eggs.py::All::BasicTests", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ( "./x/y/z/test_eggs.py::All::BasicTests::test_each", "test_each", @@ -774,7 +744,7 @@ def test_modifyitems(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::All::BasicTests::test_each[1+2-3]", name="1+2-3", path=info.SingleTestPath( @@ -783,11 +753,11 @@ def test_modifyitems(self): func="All.BasicTests.test_each", sub=["[1+2-3]"], ), - source="{}:{}".format(adapter_util.fix_relpath(relfile2), 63), + source=f"{adapter_util.fix_relpath(relfile2)}:{63}", markers=["expected-failure", "skip", "skip-if"], parentid="./x/y/z/test_eggs.py::All::BasicTests::test_each", ), - ), + }, ), ] self.assertEqual(stub.calls, expected) @@ -821,8 +791,8 @@ def test_finish(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), @@ -830,7 +800,7 @@ def test_finish(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::SpamTests::test_spam", name="test_spam", path=info.SingleTestPath( @@ -839,11 +809,11 @@ def test_finish(self): func="SpamTests.test_spam", sub=None, ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 13), + source=f"{adapter_util.fix_relpath(relfile)}:{13}", markers=None, parentid="./x/y/z/test_eggs.py::SpamTests", ), - ), + }, ), ], ) @@ -898,13 +868,13 @@ def test_doctest(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/test_doctest.txt", "test_doctest.txt", "file"), ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/test_doctest.txt::test_doctest.txt", name="test_doctest.txt", path=info.SingleTestPath( @@ -912,24 +882,24 @@ def test_doctest(self): relfile=adapter_util.fix_relpath(doctestfile), func=None, ), - source="{}:{}".format(adapter_util.fix_relpath(doctestfile), 1), + source=f"{adapter_util.fix_relpath(doctestfile)}:{1}", markers=[], parentid="./x/test_doctest.txt", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), ("./x/y", "y", "folder"), ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::test_eggs", name="test_eggs", path=info.SingleTestPath( @@ -937,24 +907,24 @@ def test_doctest(self): relfile=adapter_util.fix_relpath(relfile), func=None, ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 1), + source=f"{adapter_util.fix_relpath(relfile)}:{1}", markers=[], parentid="./x/y/z/test_eggs.py", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), ("./x/y", "y", "folder"), ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::test_eggs.TestSpam", name="test_eggs.TestSpam", path=info.SingleTestPath( @@ -962,24 +932,24 @@ def test_doctest(self): relfile=adapter_util.fix_relpath(relfile), func=None, ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 13), + source=f"{adapter_util.fix_relpath(relfile)}:{13}", markers=[], parentid="./x/y/z/test_eggs.py", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), ("./x/y", "y", "folder"), ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::test_eggs.TestSpam.TestEggs", name="test_eggs.TestSpam.TestEggs", path=info.SingleTestPath( @@ -987,11 +957,11 @@ def test_doctest(self): relfile=adapter_util.fix_relpath(relfile), func=None, ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 28), + source=f"{adapter_util.fix_relpath(relfile)}:{28}", markers=[], parentid="./x/y/z/test_eggs.py", ), - ), + }, ), ], ) @@ -1025,8 +995,8 @@ def test_nested_brackets(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ( "./x/y/z/test_eggs.py::SpamTests::test_spam", "test_spam", @@ -1039,7 +1009,7 @@ def test_nested_brackets(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::SpamTests::test_spam[a-[b]-c]", name="a-[b]-c", path=info.SingleTestPath( @@ -1048,11 +1018,11 @@ def test_nested_brackets(self): func="SpamTests.test_spam", sub=["[a-[b]-c]"], ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 13), + source=f"{adapter_util.fix_relpath(relfile)}:{13}", markers=None, parentid="./x/y/z/test_eggs.py::SpamTests::test_spam", ), - ), + }, ), ], ) @@ -1086,8 +1056,8 @@ def test_nested_suite(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ( "./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", "Eggs", @@ -1101,7 +1071,7 @@ def test_nested_suite(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1110,11 +1080,11 @@ def test_nested_suite(self): func="SpamTests.Ham.Eggs.test_spam", sub=None, ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 13), + source=f"{adapter_util.fix_relpath(relfile)}:{13}", markers=None, parentid="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", ), - ), + }, ), ], ) @@ -1184,8 +1154,8 @@ def test_windows(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/Y/Z/test_Eggs.py::SpamTests", "SpamTests", "suite"), (r"./X/Y/Z/test_Eggs.py", "test_Eggs.py", "file"), (r"./X/Y/Z", "Z", "folder"), @@ -1193,7 +1163,7 @@ def test_windows(self): (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/Y/Z/test_Eggs.py::SpamTests::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1206,7 +1176,7 @@ def test_windows(self): markers=None, parentid=r"./X/Y/Z/test_Eggs.py::SpamTests", ), - ), + }, ), # permutations # (*all* the IDs use "/") @@ -1215,13 +1185,13 @@ def test_windows(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_a.py", "test_a.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_a.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1234,19 +1204,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_a.py", ), - ), + }, ), # /, \, / ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_b.py", "test_b.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_b.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1259,19 +1229,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_b.py", ), - ), + }, ), # /, /, \ ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_c.py", "test_c.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_c.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1284,19 +1254,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_c.py", ), - ), + }, ), # /, /, / ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_d.py", "test_d.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_d.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1309,19 +1279,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_d.py", ), - ), + }, ), # \, \, \ ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_e.py", "test_e.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_e.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1334,19 +1304,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_e.py", ), - ), + }, ), # \, \, / ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_f.py", "test_f.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_f.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1359,19 +1329,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_f.py", ), - ), + }, ), # \, /, \ ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_g.py", "test_g.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_g.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1384,19 +1354,19 @@ def test_windows(self): markers=None, parentid=r"./X/test_g.py", ), - ), + }, ), # \, /, / ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ (r"./X/test_h.py", "test_h.py", "file"), (r"./X", "X", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id=r"./X/test_h.py::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1409,7 +1379,7 @@ def test_windows(self): markers=None, parentid=r"./X/test_h.py", ), - ), + }, ), ] self.assertEqual(stub.calls, expected) @@ -1443,8 +1413,8 @@ def test_mysterious_parens(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), @@ -1452,7 +1422,7 @@ def test_mysterious_parens(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::SpamTests::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1461,11 +1431,11 @@ def test_mysterious_parens(self): func="SpamTests.test_spam", sub=[], ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 13), + source=f"{adapter_util.fix_relpath(relfile)}:{13}", markers=None, parentid="./x/y/z/test_eggs.py::SpamTests", ), - ), + }, ), ], ) @@ -1499,8 +1469,8 @@ def test_mysterious_colons(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), @@ -1508,7 +1478,7 @@ def test_mysterious_colons(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::SpamTests::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1517,11 +1487,11 @@ def test_mysterious_colons(self): func="SpamTests.test_spam", sub=[], ), - source="{}:{}".format(adapter_util.fix_relpath(relfile), 13), + source=f"{adapter_util.fix_relpath(relfile)}:{13}", markers=None, parentid="./x/y/z/test_eggs.py::SpamTests", ), - ), + }, ), ], ) @@ -1567,8 +1537,8 @@ def test_imported_test(self): ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), @@ -1576,7 +1546,7 @@ def test_imported_test(self): ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::SpamTests::test_spam", name="test_spam", path=info.SingleTestPath( @@ -1585,24 +1555,24 @@ def test_imported_test(self): func="SpamTests.test_spam", sub=None, ), - source="{}:{}".format(adapter_util.fix_relpath(srcfile), 13), + source=f"{adapter_util.fix_relpath(srcfile)}:{13}", markers=None, parentid="./x/y/z/test_eggs.py::SpamTests", ), - ), + }, ), ( "discovered.add_test", None, - dict( - parents=[ + { + "parents": [ ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), ("./x/y/z", "z", "folder"), ("./x/y", "y", "folder"), ("./x", "x", "folder"), (".", testroot, "folder"), ], - test=info.SingleTestInfo( + "test": info.SingleTestInfo( id="./x/y/z/test_eggs.py::test_ham", name="test_ham", path=info.SingleTestPath( @@ -1611,11 +1581,11 @@ def test_imported_test(self): func="test_ham", sub=None, ), - source="{}:{}".format(adapter_util.fix_relpath(srcfile), 4), + source=f"{adapter_util.fix_relpath(srcfile)}:{4}", markers=None, parentid="./x/y/z/test_eggs.py", ), - ), + }, ), ], ) diff --git a/python_files/tests/testing_tools/adapter/test___main__.py b/python_files/tests/testing_tools/adapter/test___main__.py index 5ff0ec30c947..8028db530012 100644 --- a/python_files/tests/testing_tools/adapter/test___main__.py +++ b/python_files/tests/testing_tools/adapter/test___main__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# ruff:noqa: PT009, PT027 import unittest @@ -15,7 +16,7 @@ class StubTool(StubProxy): def __init__(self, name, stub=None): - super(StubTool, self).__init__(stub, name) + super().__init__(stub, name) self.return_discover = None def discover(self, args, **kwargs): @@ -27,7 +28,7 @@ def discover(self, args, **kwargs): class StubReporter(StubProxy): def __init__(self, stub=None): - super(StubReporter, self).__init__(stub, "reporter") + super().__init__(stub, "reporter") def report(self, tests, parents, **kwargs): self.add_call("report", (tests, parents), kwargs or None) diff --git a/python_files/tests/testing_tools/adapter/test_discovery.py b/python_files/tests/testing_tools/adapter/test_discovery.py index 2fe4db7caa37..ea9a5cdfd38e 100644 --- a/python_files/tests/testing_tools/adapter/test_discovery.py +++ b/python_files/tests/testing_tools/adapter/test_discovery.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - -from __future__ import absolute_import, print_function +# ruff:noqa: PT009, PT027 import unittest @@ -32,7 +31,7 @@ def test_list(self): func="test_each", sub=["[10-10]"], ), - source="{}:{}".format(relfile, 10), + source=f"{relfile}:{10}", markers=None, # missing "./": parentid="test_spam.py::test_each", @@ -46,7 +45,7 @@ def test_list(self): func="All.BasicTests.test_first", sub=None, ), - source="{}:{}".format(relfile, 62), + source=f"{relfile}:{62}", markers=None, parentid="test_spam.py::All::BasicTests", ), @@ -123,7 +122,7 @@ def test_parents(self): func="test_each", sub=["[10-10]"], ), - source="{}:{}".format(relfile, 10), + source=f"{relfile}:{10}", markers=None, # missing "./", using pathsep: parentid=relfile + "::test_each", @@ -138,7 +137,7 @@ def test_parents(self): func="All.BasicTests.test_first", sub=None, ), - source="{}:{}".format(relfile, 61), + source=f"{relfile}:{61}", markers=None, # missing "./", using pathsep: parentid=relfile + "::All::BasicTests", @@ -247,7 +246,7 @@ def test_add_test_simple(self): func="test_spam", ), # missing "./": - source="{}:{}".format(relfile, 11), + source=f"{relfile}:{11}", markers=[], # missing "./": parentid=relfile, @@ -303,7 +302,7 @@ def test_multiroot(self): relfile=fix_relpath(relfile1), func="test_spam", ), - source="{}:{}".format(relfile1, 10), + source=f"{relfile1}:{10}", markers=[], # missing "./": parentid=relfile1, @@ -329,7 +328,7 @@ def test_multiroot(self): relfile=fix_relpath(relfile2), func="BasicTests.test_first", ), - source="{}:{}".format(relfile2, 61), + source=f"{relfile2}:{61}", markers=[], parentid=relfile2 + "::BasicTests", ), @@ -366,7 +365,7 @@ def test_multiroot(self): relfile=fix_relpath(relfile1), func="test_spam", ), - source="{}:{}".format(relfile1, 10), + source=f"{relfile1}:{10}", markers=[], parentid="./test_spam.py", ), @@ -379,7 +378,7 @@ def test_multiroot(self): relfile=fix_relpath(relfile2), func="BasicTests.test_first", ), - source="{}:{}".format(relfile2, 61), + source=f"{relfile2}:{61}", markers=[], parentid="./w/test_eggs.py::BasicTests", ), @@ -447,7 +446,7 @@ def test_doctest(self): relfile=doctestfile, func=None, ), - source="{}:{}".format(doctestfile, 0), + source=f"{doctestfile}:{0}", markers=[], parentid=doctestfile, ), @@ -460,7 +459,7 @@ def test_doctest(self): relfile=relfile, func=None, ), - source="{}:{}".format(relfile, 0), + source=f"{relfile}:{0}", markers=[], parentid=relfile, ), @@ -472,7 +471,7 @@ def test_doctest(self): relfile=relfile, func=None, ), - source="{}:{}".format(relfile, 12), + source=f"{relfile}:{12}", markers=[], parentid=relfile, ), @@ -484,7 +483,7 @@ def test_doctest(self): relfile=relfile, func=None, ), - source="{}:{}".format(relfile, 27), + source=f"{relfile}:{27}", markers=[], parentid=relfile, ), @@ -594,7 +593,7 @@ def test_nested_suite_simple(self): relfile=relfile, func="TestOuter.TestInner.test_spam", ), - source="{}:{}".format(relfile, 10), + source=f"{relfile}:{10}", markers=None, parentid=relfile + "::TestOuter::TestInner", ), @@ -606,7 +605,7 @@ def test_nested_suite_simple(self): relfile=relfile, func="TestOuter.TestInner.test_eggs", ), - source="{}:{}".format(relfile, 21), + source=f"{relfile}:{21}", markers=None, parentid=relfile + "::TestOuter::TestInner", ), diff --git a/python_files/tests/testing_tools/adapter/test_functional.py b/python_files/tests/testing_tools/adapter/test_functional.py index 45c85ee951dc..17c36ba743da 100644 --- a/python_files/tests/testing_tools/adapter/test_functional.py +++ b/python_files/tests/testing_tools/adapter/test_functional.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - -from __future__ import absolute_import, unicode_literals +# ruff:noqa: PT009, PT027, PTH109, PTH118, PTH120 import json import os @@ -43,30 +42,14 @@ def _run_adapter(cmd, tool, *cliargs, **kwargs): hidestdio = kwargs.pop("hidestdio", True) assert not kwargs or tuple(kwargs) == ("stderr",) kwds = kwargs - argv = [sys.executable, SCRIPT, cmd, tool, "--"] + list(cliargs) + argv = [sys.executable, SCRIPT, cmd, tool, "--", *cliargs] if not hidestdio: argv.insert(4, "--no-hide-stdio") kwds["stderr"] = subprocess.STDOUT argv.append("--cache-clear") print("running {!r}".format(" ".join(arg.rpartition(CWD + "/")[-1] for arg in argv))) - output = subprocess.check_output(argv, universal_newlines=True, **kwds) - return output - -def fix_test_order(tests): - if sys.version_info >= (3, 6): - return tests - fixed = [] - curfile = None - group = [] - for test in tests: - if (curfile or "???") not in test["id"]: - fixed.extend(sorted(group, key=lambda t: t["id"])) - group = [] - curfile = test["id"].partition(".py::")[0] + ".py" - group.append(test) - fixed.extend(sorted(group, key=lambda t: t["id"])) - return fixed + return subprocess.check_output(argv, universal_newlines=True, **kwds) def fix_source(tests, testid, srcfile, lineno): @@ -74,17 +57,17 @@ def fix_source(tests, testid, srcfile, lineno): if test["id"] == testid: break else: - raise KeyError("test {!r} not found".format(testid)) + raise KeyError(f"test {testid!r} not found") if not srcfile: srcfile = test["source"].rpartition(":")[0] - test["source"] = fix_path("{}:{}".format(srcfile, lineno)) + test["source"] = fix_path(f"{srcfile}:{lineno}") def sorted_object(obj): if isinstance(obj, dict): - return sorted((key, sorted_object(obj[key])) for key in obj.keys()) + return sorted((key, sorted_object(obj[key])) for key in obj) if isinstance(obj, list): - return sorted((sorted_object(x) for x in obj)) + return sorted(sorted_object(x) for x in obj) else: return obj @@ -98,7 +81,7 @@ class PytestTests(unittest.TestCase): def setUp(self): if PATH_SEP is not os.path.sep: raise unittest.SkipTest("functional tests require unmodified env") - super(PytestTests, self).setUp() + super().setUp() def complex(self, testroot): results = COMPLEX.copy() @@ -150,19 +133,11 @@ def test_discover_simple(self): def test_discover_complex_default(self): projroot, testroot = resolve_testroot("complex") expected = self.complex(projroot) - expected[0]["tests"] = fix_test_order(expected[0]["tests"]) - if sys.version_info < (3,): - decorated = [ - "./tests/test_unittest.py::MyTests::test_skipped", - "./tests/test_unittest.py::MyTests::test_maybe_skipped", - "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", - ] - for testid in decorated: - fix_source(expected[0]["tests"], testid, None, 0) + expected[0]["tests"] = expected[0]["tests"] out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) result = json.loads(out) - result[0]["tests"] = fix_test_order(result[0]["tests"]) + result[0]["tests"] = result[0]["tests"] self.maxDiff = None self.assertEqual(sorted_object(result), sorted_object(expected)) @@ -232,21 +207,13 @@ def test_discover_complex_doctest(self): "parentid": "./mod.py", }, ] + expected[0]["tests"] - expected[0]["tests"] = fix_test_order(expected[0]["tests"]) - if sys.version_info < (3,): - decorated = [ - "./tests/test_unittest.py::MyTests::test_skipped", - "./tests/test_unittest.py::MyTests::test_maybe_skipped", - "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", - ] - for testid in decorated: - fix_source(expected[0]["tests"], testid, None, 0) + expected[0]["tests"] = expected[0]["tests"] out = run_adapter( "discover", "pytest", "--rootdir", projroot, "--doctest-modules", projroot ) result = json.loads(out) - result[0]["tests"] = fix_test_order(result[0]["tests"]) + result[0]["tests"] = result[0]["tests"] self.maxDiff = None self.assertEqual(sorted_object(result), sorted_object(expected)) diff --git a/python_files/tests/testing_tools/adapter/test_report.py b/python_files/tests/testing_tools/adapter/test_report.py index bb68c8a65e79..8fe7d764cca3 100644 --- a/python_files/tests/testing_tools/adapter/test_report.py +++ b/python_files/tests/testing_tools/adapter/test_report.py @@ -1,13 +1,15 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# ruff:noqa: PT009 import json import unittest -from ...util import StubProxy -from testing_tools.adapter.util import fix_path, fix_relpath -from testing_tools.adapter.info import SingleTestInfo, SingleTestPath, ParentInfo +from testing_tools.adapter.info import ParentInfo, SingleTestInfo, SingleTestPath from testing_tools.adapter.report import report_discovered +from testing_tools.adapter.util import fix_path, fix_relpath + +from ...util import StubProxy class StubSender(StubProxy): @@ -34,7 +36,7 @@ def test_basic(self): relfile=relfile, func="test_spam", ), - source="{}:{}".format(relfile, 10), + source=f"{relfile}:{10}", markers=[], parentid="file#1", ), @@ -71,7 +73,7 @@ def test_basic(self): { "id": "test#1", "name": "test_spam", - "source": "{}:{}".format(relfile, 10), + "source": f"{relfile}:{10}", "markers": [], "parentid": "file#1", } @@ -105,7 +107,7 @@ def test_multiroot(self): relfile=relfile1, func="test_spam", ), - source="{}:{}".format(relfile1, 10), + source=f"{relfile1}:{10}", markers=[], parentid=relfileid1, ), @@ -142,7 +144,7 @@ def test_multiroot(self): { "id": relfileid1 + "::test_spam", "name": "test_spam", - "source": "{}:{}".format(relfile1, 10), + "source": f"{relfile1}:{10}", "markers": [], "parentid": relfileid1, } @@ -164,7 +166,7 @@ def test_multiroot(self): relfile=relfile2, func="BasicTests.test_first", ), - source="{}:{}".format(relfile2, 61), + source=f"{relfile2}:{61}", markers=[], parentid=relfileid2 + "::BasicTests", ), @@ -233,7 +235,7 @@ def test_multiroot(self): { "id": relfileid2 + "::BasicTests::test_first", "name": "test_first", - "source": "{}:{}".format(relfile2, 61), + "source": f"{relfile2}:{61}", "markers": [], "parentid": relfileid2 + "::BasicTests", } @@ -287,7 +289,7 @@ def test_complex(self): test_spam.py SpamTests test_okay - """ + """ # noqa: D205, D400 stub = StubSender() testroot = fix_path("/a/b/c") relfileid1 = "./test_ham.py" @@ -305,7 +307,7 @@ def test_complex(self): relfile=fix_path(relfileid1), func="MySuite.test_x1", ), - source="{}:{}".format(fix_path(relfileid1), 10), + source=f"{fix_path(relfileid1)}:{10}", markers=None, parentid=relfileid1 + "::MySuite", ), @@ -317,7 +319,7 @@ def test_complex(self): relfile=fix_path(relfileid1), func="MySuite.test_x2", ), - source="{}:{}".format(fix_path(relfileid1), 21), + source=f"{fix_path(relfileid1)}:{21}", markers=None, parentid=relfileid1 + "::MySuite", ), @@ -329,7 +331,7 @@ def test_complex(self): relfile=fix_path(relfileid2), func="SpamTests.test_okay", ), - source="{}:{}".format(fix_path(relfileid2), 17), + source=f"{fix_path(relfileid2)}:{17}", markers=None, parentid=relfileid2 + "::SpamTests", ), @@ -341,7 +343,7 @@ def test_complex(self): relfile=fix_path(relfileid3), func="test_ham1", ), - source="{}:{}".format(fix_path(relfileid3), 8), + source=f"{fix_path(relfileid3)}:{8}", markers=None, parentid=relfileid3, ), @@ -353,7 +355,7 @@ def test_complex(self): relfile=fix_path(relfileid3), func="HamTests.test_uh_oh", ), - source="{}:{}".format(fix_path(relfileid3), 19), + source=f"{fix_path(relfileid3)}:{19}", markers=["expected-failure"], parentid=relfileid3 + "::HamTests", ), @@ -365,7 +367,7 @@ def test_complex(self): relfile=fix_path(relfileid3), func="HamTests.test_whoa", ), - source="{}:{}".format(fix_path(relfileid3), 35), + source=f"{fix_path(relfileid3)}:{35}", markers=None, parentid=relfileid3 + "::HamTests", ), @@ -378,7 +380,7 @@ def test_complex(self): func="MoreHam.test_yay", sub=["[1-2]"], ), - source="{}:{}".format(fix_path(relfileid3), 57), + source=f"{fix_path(relfileid3)}:{57}", markers=None, parentid=relfileid3 + "::MoreHam::test_yay", ), @@ -391,7 +393,7 @@ def test_complex(self): func="MoreHam.test_yay", sub=["[1-2]", "[3=4]"], ), - source="{}:{}".format(fix_path(relfileid3), 72), + source=f"{fix_path(relfileid3)}:{72}", markers=None, parentid=relfileid3 + "::MoreHam::test_yay[1-2]", ), @@ -403,7 +405,7 @@ def test_complex(self): relfile=fix_path(relfileid4), func="SpamTests.test_okay", ), - source="{}:{}".format(fix_path(relfileid4), 15), + source=f"{fix_path(relfileid4)}:{15}", markers=None, parentid=relfileid4 + "::SpamTests", ), @@ -415,7 +417,7 @@ def test_complex(self): relfile=fix_path(relfileid5), func="SpamTests.test_okay", ), - source="{}:{}".format(fix_path(relfileid5), 12), + source=f"{fix_path(relfileid5)}:{12}", markers=None, parentid=relfileid5 + "::SpamTests", ), @@ -427,7 +429,7 @@ def test_complex(self): relfile=fix_path(relfileid6), func="SpamTests.test_okay", ), - source="{}:{}".format(fix_path(relfileid6), 27), + source=f"{fix_path(relfileid6)}:{27}", markers=None, parentid=relfileid6 + "::SpamTests", ), @@ -731,77 +733,77 @@ def test_complex(self): { "id": relfileid1 + "::MySuite::test_x1", "name": "test_x1", - "source": "{}:{}".format(fix_path(relfileid1), 10), + "source": f"{fix_path(relfileid1)}:{10}", "markers": [], "parentid": relfileid1 + "::MySuite", }, { "id": relfileid1 + "::MySuite::test_x2", "name": "test_x2", - "source": "{}:{}".format(fix_path(relfileid1), 21), + "source": f"{fix_path(relfileid1)}:{21}", "markers": [], "parentid": relfileid1 + "::MySuite", }, { "id": relfileid2 + "::SpamTests::test_okay", "name": "test_okay", - "source": "{}:{}".format(fix_path(relfileid2), 17), + "source": f"{fix_path(relfileid2)}:{17}", "markers": [], "parentid": relfileid2 + "::SpamTests", }, { "id": relfileid3 + "::test_ham1", "name": "test_ham1", - "source": "{}:{}".format(fix_path(relfileid3), 8), + "source": f"{fix_path(relfileid3)}:{8}", "markers": [], "parentid": relfileid3, }, { "id": relfileid3 + "::HamTests::test_uh_oh", "name": "test_uh_oh", - "source": "{}:{}".format(fix_path(relfileid3), 19), + "source": f"{fix_path(relfileid3)}:{19}", "markers": ["expected-failure"], "parentid": relfileid3 + "::HamTests", }, { "id": relfileid3 + "::HamTests::test_whoa", "name": "test_whoa", - "source": "{}:{}".format(fix_path(relfileid3), 35), + "source": f"{fix_path(relfileid3)}:{35}", "markers": [], "parentid": relfileid3 + "::HamTests", }, { "id": relfileid3 + "::MoreHam::test_yay[1-2]", "name": "test_yay[1-2]", - "source": "{}:{}".format(fix_path(relfileid3), 57), + "source": f"{fix_path(relfileid3)}:{57}", "markers": [], "parentid": relfileid3 + "::MoreHam::test_yay", }, { "id": relfileid3 + "::MoreHam::test_yay[1-2][3-4]", "name": "test_yay[1-2][3-4]", - "source": "{}:{}".format(fix_path(relfileid3), 72), + "source": f"{fix_path(relfileid3)}:{72}", "markers": [], "parentid": relfileid3 + "::MoreHam::test_yay[1-2]", }, { "id": relfileid4 + "::SpamTests::test_okay", "name": "test_okay", - "source": "{}:{}".format(fix_path(relfileid4), 15), + "source": f"{fix_path(relfileid4)}:{15}", "markers": [], "parentid": relfileid4 + "::SpamTests", }, { "id": relfileid5 + "::SpamTests::test_okay", "name": "test_okay", - "source": "{}:{}".format(fix_path(relfileid5), 12), + "source": f"{fix_path(relfileid5)}:{12}", "markers": [], "parentid": relfileid5 + "::SpamTests", }, { "id": relfileid6 + "::SpamTests::test_okay", "name": "test_okay", - "source": "{}:{}".format(fix_path(relfileid6), 27), + "source": f"{fix_path(relfileid6)}:{27}", "markers": [], "parentid": relfileid6 + "::SpamTests", }, @@ -833,7 +835,7 @@ def test_simple_basic(self): func="MySuite.test_spam_1", sub=None, ), - source="{}:{}".format(relfile, 10), + source=f"{relfile}:{10}", markers=None, parentid="suite#1", ), @@ -897,7 +899,7 @@ def test_simple_complex(self): test_spam.py SpamTests test_okay - """ + """ # noqa: D205, D400 stub = StubSender() testroot1 = fix_path("/a/b/c") relfile1 = fix_path("./test_ham.py") @@ -918,7 +920,7 @@ def test_simple_complex(self): func="MySuite.test_x1", sub=None, ), - source="{}:{}".format(relfile1, 10), + source=f"{relfile1}:{10}", markers=None, parentid="suite#1", ), @@ -931,7 +933,7 @@ def test_simple_complex(self): func="MySuite.test_x2", sub=None, ), - source="{}:{}".format(relfile1, 21), + source=f"{relfile1}:{21}", markers=None, parentid="suite#1", ), @@ -945,7 +947,7 @@ def test_simple_complex(self): func="SpamTests.test_okay", sub=None, ), - source="{}:{}".format(relfile2, 17), + source=f"{relfile2}:{17}", markers=None, parentid="suite#2", ), @@ -958,7 +960,7 @@ def test_simple_complex(self): func="test_ham1", sub=None, ), - source="{}:{}".format(relfile3, 8), + source=f"{relfile3}:{8}", markers=None, parentid="file#3", ), @@ -971,7 +973,7 @@ def test_simple_complex(self): func="HamTests.test_uh_oh", sub=None, ), - source="{}:{}".format(relfile3, 19), + source=f"{relfile3}:{19}", markers=["expected-failure"], parentid="suite#3", ), @@ -984,7 +986,7 @@ def test_simple_complex(self): func="HamTests.test_whoa", sub=None, ), - source="{}:{}".format(relfile3, 35), + source=f"{relfile3}:{35}", markers=None, parentid="suite#3", ), @@ -997,7 +999,7 @@ def test_simple_complex(self): func="MoreHam.test_yay", sub=["sub1"], ), - source="{}:{}".format(relfile3, 57), + source=f"{relfile3}:{57}", markers=None, parentid="suite#4", ), @@ -1010,7 +1012,7 @@ def test_simple_complex(self): func="MoreHam.test_yay", sub=["sub2", "sub3"], ), - source="{}:{}".format(relfile3, 72), + source=f"{relfile3}:{72}", markers=None, parentid="suite#3", ), @@ -1023,7 +1025,7 @@ def test_simple_complex(self): func="SpamTests.test_okay", sub=None, ), - source="{}:{}".format(relfile4, 15), + source=f"{relfile4}:{15}", markers=None, parentid="suite#5", ), @@ -1036,7 +1038,7 @@ def test_simple_complex(self): func="SpamTests.test_okay", sub=None, ), - source="{}:{}".format(relfile5, 12), + source=f"{relfile5}:{12}", markers=None, parentid="suite#6", ), @@ -1049,7 +1051,7 @@ def test_simple_complex(self): func="SpamTests.test_okay", sub=None, ), - source="{}:{}".format(relfile6, 27), + source=f"{relfile6}:{27}", markers=None, parentid="suite#7", ), diff --git a/python_files/tests/testing_tools/adapter/test_util.py b/python_files/tests/testing_tools/adapter/test_util.py index 8a7cd475a1c7..36df55a1d0f3 100644 --- a/python_files/tests/testing_tools/adapter/test_util.py +++ b/python_files/tests/testing_tools/adapter/test_util.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - -from __future__ import absolute_import, print_function +# ruff:noqa: PT009, PTH100, PTH118, PTH120, PTH123 import ntpath import os @@ -11,7 +10,6 @@ import sys import unittest - # Pytest 3.7 and later uses pathlib/pathlib2 for path resolution. try: from pathlib import Path @@ -19,9 +17,9 @@ from pathlib2 import Path # type: ignore (for Pylance) from testing_tools.adapter.util import ( + fix_fileid, fix_path, fix_relpath, - fix_fileid, shlex_unsplit, ) @@ -31,6 +29,7 @@ class FilePathTests(unittest.TestCase): def test_isolated_imports(self): import testing_tools.adapter from testing_tools.adapter import util + from . import test_functional ignored = { @@ -88,19 +87,19 @@ def test_fix_path(self): ] for path, expected in tests: pathsep = ntpath.sep - with self.subTest(r"fixed for \: {!r}".format(path)): + with self.subTest(rf"fixed for \: {path!r}"): fixed = fix_path(path, _pathsep=pathsep) self.assertEqual(fixed, expected) pathsep = posixpath.sep - with self.subTest("unchanged for /: {!r}".format(path)): + with self.subTest(f"unchanged for /: {path!r}"): unchanged = fix_path(path, _pathsep=pathsep) self.assertEqual(unchanged, path) # no path -> "." for path in ["", None]: for pathsep in [ntpath.sep, posixpath.sep]: - with self.subTest(r"fixed for {}: {!r}".format(pathsep, path)): + with self.subTest(rf"fixed for {pathsep}: {path!r}"): fixed = fix_path(path, _pathsep=pathsep) self.assertEqual(fixed, ".") @@ -116,7 +115,7 @@ def test_fix_path(self): ) for path in paths: for pathsep in [ntpath.sep, posixpath.sep]: - with self.subTest(r"unchanged for {}: {!r}".format(pathsep, path)): + with self.subTest(rf"unchanged for {pathsep}: {path!r}"): unchanged = fix_path(path, _pathsep=pathsep) self.assertEqual(unchanged, path) @@ -152,7 +151,9 @@ def test_fix_relpath(self): with self.subTest((path, _os_path.sep)): fixed = fix_relpath( path, - _fix_path=(lambda p: fix_path(p, _pathsep=_os_path.sep)), + # Capture the loop variants as default parameters to make sure they + # don't change between iterations. + _fix_path=(lambda p, _sep=_os_path.sep: fix_path(p, _pathsep=_sep)), _path_isabs=_os_path.isabs, _pathsep=_os_path.sep, ) @@ -200,7 +201,7 @@ def test_fix_fileid(self): ) for fileid, _os_path, expected in tests: pathsep = _os_path.sep - with self.subTest(r"for {}: {!r}".format(pathsep, fileid)): + with self.subTest(rf"for {pathsep}: {fileid!r}"): fixed = fix_fileid( fileid, _path_isabs=_os_path.isabs, @@ -259,7 +260,7 @@ def test_fix_fileid(self): ) for fileid, rootdir, _os_path, expected in tests: pathsep = _os_path.sep - with self.subTest(r"for {} (with rootdir {!r}): {!r}".format(pathsep, rootdir, fileid)): + with self.subTest(rf"for {pathsep} (with rootdir {rootdir!r}): {fileid!r}"): fixed = fix_fileid( fileid, rootdir, diff --git a/python_files/tests/unittestadapter/expected_discovery_test_output.py b/python_files/tests/unittestadapter/expected_discovery_test_output.py index 9fca67a3a574..9de0eff8238c 100644 --- a/python_files/tests/unittestadapter/expected_discovery_test_output.py +++ b/python_files/tests/unittestadapter/expected_discovery_test_output.py @@ -2,9 +2,10 @@ # Licensed under the MIT License. import os -from unittestadapter.pvsc_utils import TestNodeTypeEnum import pathlib +from unittestadapter.pvsc_utils import TestNodeTypeEnum + TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" diff --git a/python_files/tests/unittestadapter/test_discovery.py b/python_files/tests/unittestadapter/test_discovery.py index 94d0bb89c62a..9afff6762fcc 100644 --- a/python_files/tests/unittestadapter/test_discovery.py +++ b/python_files/tests/unittestadapter/test_discovery.py @@ -23,7 +23,7 @@ @pytest.mark.parametrize( - "args, expected", + ("args", "expected"), [ ( ["-s", "something", "-p", "other*", "-t", "else"], @@ -71,18 +71,14 @@ ], ) def test_parse_unittest_args(args: List[str], expected: List[str]) -> None: - """The parse_unittest_args function should return values for the start_dir, pattern, and top_level_dir arguments - when passed as command-line options, and ignore unrecognized arguments. - """ + """The parse_unittest_args function should return values for the start_dir, pattern, and top_level_dir arguments when passed as command-line options, and ignore unrecognized arguments.""" actual = parse_unittest_args(args) assert actual == expected def test_simple_discovery() -> None: - """The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree - if unittest discovery was performed successfully. - """ + """The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree if unittest discovery was performed successfully.""" start_dir = os.fsdecode(TEST_DATA_PATH) pattern = "discovery_simple*" file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH / "discovery_simple.py")) @@ -134,9 +130,7 @@ def test_simple_discovery() -> None: def test_simple_discovery_with_top_dir_calculated() -> None: - """The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree - if unittest discovery was performed successfully. - """ + """The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree if unittest discovery was performed successfully.""" start_dir = "." pattern = "discovery_simple*" file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH / "discovery_simple.py")) @@ -190,9 +184,7 @@ def test_simple_discovery_with_top_dir_calculated() -> None: def test_empty_discovery() -> None: - """The discover_tests function should return a dictionary with a "success" status, no errors, and no test tree - if unittest discovery was performed successfully but no tests were found. - """ + """The discover_tests function should return a dictionary with a "success" status, no errors, and no test tree if unittest discovery was performed successfully but no tests were found.""" start_dir = os.fsdecode(TEST_DATA_PATH) pattern = "discovery_empty*" @@ -204,9 +196,7 @@ def test_empty_discovery() -> None: def test_error_discovery() -> None: - """The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors - if unittest discovery failed at some point. - """ + """The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors if unittest discovery failed at some point.""" # Discover tests in .data/discovery_error/. start_path = pathlib.PurePath(TEST_DATA_PATH / "discovery_error") start_dir = os.fsdecode(start_path) @@ -262,6 +252,7 @@ def test_error_discovery() -> None: def test_unit_skip() -> None: """The discover_tests function should return a dictionary with a "success" status, no errors, and test tree. + if unittest discovery was performed and found a test in one file marked as skipped and another file marked as skipped. """ start_dir = os.fsdecode(TEST_DATA_PATH / "unittest_skip") diff --git a/python_files/tests/unittestadapter/test_execution.py b/python_files/tests/unittestadapter/test_execution.py index 44610d5bf6fa..71f1ca1ec73b 100644 --- a/python_files/tests/unittestadapter/test_execution.py +++ b/python_files/tests/unittestadapter/test_execution.py @@ -4,24 +4,24 @@ import os import pathlib import sys +from typing import TYPE_CHECKING, Dict, Optional from unittest.mock import patch -from typing import Dict, Optional import pytest script_dir = pathlib.Path(__file__).parent.parent.parent sys.path.insert(0, os.fspath(script_dir / "lib" / "python")) -from unittestadapter.pvsc_utils import ExecutionPayloadDict # noqa: E402 from unittestadapter.execution import run_tests # noqa: E402 +if TYPE_CHECKING: + from unittestadapter.pvsc_utils import ExecutionPayloadDict + TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" def test_no_ids_run() -> None: - """This test runs on an empty array of test_ids, therefore it should return - an empty dict for the result. - """ + """This test runs on an empty array of test_ids, therefore it should return an empty dict for the result.""" start_dir: str = os.fspath(TEST_DATA_PATH) testids = [] pattern = "discovery_simple*" @@ -43,16 +43,15 @@ def mock_send_run_data(): def test_single_ids_run(mock_send_run_data): - """This test runs on a single test_id, therefore it should return - a dict with a single key-value pair for the result. + """This test runs on a single test_id, therefore it should return a dict with a single key-value pair for the result. This single test passes so the outcome should be 'success'. """ - id = "discovery_simple.DiscoverySimple.test_one" + id_ = "discovery_simple.DiscoverySimple.test_one" os.environ["TEST_RUN_PIPE"] = "fake" actual: ExecutionPayloadDict = run_tests( os.fspath(TEST_DATA_PATH), - [id], + [id_], "discovery_simple*", None, 1, @@ -71,24 +70,23 @@ def test_single_ids_run(mock_send_run_data): if not isinstance(actual_result, Dict): raise AssertionError("actual_result is not a Dict") assert len(actual_result) == 1 - assert id in actual_result - id_result = actual_result[id] + assert id_ in actual_result + id_result = actual_result[id_] assert id_result is not None assert "outcome" in id_result assert id_result["outcome"] == "success" -def test_subtest_run(mock_send_run_data) -> None: - """This test runs on a the test_subtest which has a single method, test_even, - that uses unittest subtest. +def test_subtest_run(mock_send_run_data) -> None: # noqa: ARG001 + """This test runs on a the test_subtest which has a single method, test_even, that uses unittest subtest. The actual result of run should return a dict payload with 6 entry for the 6 subtests. """ - id = "test_subtest.NumbersTest.test_even" + id_ = "test_subtest.NumbersTest.test_even" os.environ["TEST_RUN_PIPE"] = "fake" actual = run_tests( os.fspath(TEST_DATA_PATH), - [id], + [id_], "test_subtest.py", None, 1, @@ -109,12 +107,12 @@ def test_subtest_run(mock_send_run_data) -> None: assert actual["result"] is not None result = actual["result"] assert len(result) == 6 - for id in subtests_ids: - assert id in result + for id_ in subtests_ids: + assert id_ in result @pytest.mark.parametrize( - "test_ids, pattern, cwd, expected_outcome", + ("test_ids", "pattern", "cwd", "expected_outcome"), [ ( [ @@ -186,7 +184,7 @@ def test_subtest_run(mock_send_run_data) -> None: ), ], ) -def test_multiple_ids_run(mock_send_run_data, test_ids, pattern, cwd, expected_outcome) -> None: +def test_multiple_ids_run(mock_send_run_data, test_ids, pattern, cwd, expected_outcome) -> None: # noqa: ARG001 """ The following are all successful tests of different formats. @@ -217,9 +215,8 @@ def test_multiple_ids_run(mock_send_run_data, test_ids, pattern, cwd, expected_o assert True -def test_failed_tests(mock_send_run_data): +def test_failed_tests(mock_send_run_data): # noqa: ARG001 """This test runs on a single file `test_fail` with two tests that fail.""" - os.environ["TEST_RUN_PIPE"] = "fake" test_ids = [ "test_fail_simple.RunFailSimple.test_one_fail", @@ -246,17 +243,16 @@ def test_failed_tests(mock_send_run_data): assert id_result is not None assert "outcome" in id_result assert id_result["outcome"] == "failure" - assert "message" and "traceback" in id_result + assert "message" in id_result + assert "traceback" in id_result assert "2 not greater than 3" in str(id_result["message"]) or "1 == 1" in str( id_result["traceback"] ) assert True -def test_unknown_id(mock_send_run_data): - """This test runs on a unknown test_id, therefore it should return - an error as the outcome as it attempts to find the given test. - """ +def test_unknown_id(mock_send_run_data): # noqa: ARG001 + """This test runs on a unknown test_id, therefore it should return an error as the outcome as it attempts to find the given test.""" os.environ["TEST_RUN_PIPE"] = "fake" test_ids = ["unknown_id"] actual = run_tests( @@ -279,13 +275,12 @@ def test_unknown_id(mock_send_run_data): assert id_result is not None assert "outcome" in id_result assert id_result["outcome"] == "error" - assert "message" and "traceback" in id_result + assert "message" in id_result + assert "traceback" in id_result def test_incorrect_path(): - """This test runs on a non existent path, therefore it should return - an error as the outcome as it attempts to find the given folder. - """ + """This test runs on a non existent path, therefore it should return an error as the outcome as it attempts to find the given folder.""" test_ids = ["unknown_id"] os.environ["TEST_RUN_PIPE"] = "fake" diff --git a/python_files/tests/unittestadapter/test_utils.py b/python_files/tests/unittestadapter/test_utils.py index 1cb9a4686399..b0341ce37b63 100644 --- a/python_files/tests/unittestadapter/test_utils.py +++ b/python_files/tests/unittestadapter/test_utils.py @@ -25,7 +25,7 @@ @pytest.mark.parametrize( - "directory, pattern, expected", + ("directory", "pattern", "expected"), [ ( ".", @@ -49,7 +49,6 @@ ) def test_simple_test_cases(directory, pattern, expected) -> None: """The get_test_case fuction should return tests from all test suites.""" - actual = [] # Discover tests in .data/. @@ -59,15 +58,13 @@ def test_simple_test_cases(directory, pattern, expected) -> None: suite = loader.discover(start_dir, pattern) # Iterate on get_test_case and save the test id. - for test in get_test_case(suite): - actual.append(test.id()) + actual = [test.id() for test in get_test_case(suite)] assert expected == actual def test_get_existing_child_node() -> None: """The get_child_node fuction should return the child node of a test tree if it exists.""" - tree: TestNode = { "name": "root", "path": "foo", @@ -115,7 +112,6 @@ def test_get_existing_child_node() -> None: def test_no_existing_child_node() -> None: """The get_child_node fuction should add a child node to a test tree and return it if it does not exist.""" - tree: TestNode = { "name": "root", "path": "foo", @@ -172,10 +168,7 @@ def test_no_existing_child_node() -> None: def test_build_simple_tree() -> None: - """The build_test_tree function should build and return a test tree from discovered test suites, - and an empty list of errors if there are none in the discovered data. - """ - + """The build_test_tree function should build and return a test tree from discovered test suites, and an empty list of errors if there are none in the discovered data.""" # Discovery tests in utils_simple_tree.py. start_dir = os.fsdecode(TEST_DATA_PATH) pattern = "utils_simple_tree*" @@ -231,11 +224,7 @@ def test_build_simple_tree() -> None: def test_build_decorated_tree() -> None: - """The build_test_tree function should build and return a test tree from discovered test suites, - with correct line numbers for decorated test, - and an empty list of errors if there are none in the discovered data. - """ - + """The build_test_tree function should build and return a test tree from discovered test suites, with correct line numbers for decorated test, and an empty list of errors if there are none in the discovered data.""" # Discovery tests in utils_decorated_tree.py. start_dir = os.fsdecode(TEST_DATA_PATH) pattern = "utils_decorated_tree*" @@ -291,9 +280,7 @@ def test_build_decorated_tree() -> None: def test_build_empty_tree() -> None: - """The build_test_tree function should return None if there are no discovered test suites, - and an empty list of errors if there are none in the discovered data.""" - + """The build_test_tree function should return None if there are no discovered test suites, and an empty list of errors if there are none in the discovered data.""" start_dir = os.fsdecode(TEST_DATA_PATH) pattern = "does_not_exist*" diff --git a/python_files/tests/util.py b/python_files/tests/util.py index 45c3536145cf..ee240cd95202 100644 --- a/python_files/tests/util.py +++ b/python_files/tests/util.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. -class Stub(object): +class Stub: def __init__(self): self.calls = [] @@ -10,7 +10,7 @@ def add_call(self, name, args=None, kwargs=None): self.calls.append((name, args, kwargs)) -class StubProxy(object): +class StubProxy: def __init__(self, stub=None, name=None): self.name = name self.stub = stub if stub is not None else Stub() @@ -22,5 +22,5 @@ def calls(self): def add_call(self, funcname, *args, **kwargs): callname = funcname if self.name: - callname = "{}.{}".format(self.name, funcname) + callname = f"{self.name}.{funcname}" return self.stub.add_call(callname, *args, **kwargs) diff --git a/python_files/unittestadapter/discovery.py b/python_files/unittestadapter/discovery.py index 58ab8ca1a651..604fe7beaeb1 100644 --- a/python_files/unittestadapter/discovery.py +++ b/python_files/unittestadapter/discovery.py @@ -13,12 +13,12 @@ # If I use from utils then there will be an import error in test_discovery.py. from unittestadapter.pvsc_utils import ( # noqa: E402 + DiscoveryPayloadDict, + EOTPayloadDict, VSCodeUnittestError, build_test_tree, parse_unittest_args, send_post_request, - DiscoveryPayloadDict, - EOTPayloadDict, ) @@ -56,9 +56,9 @@ def discover_tests( "status": "error", } """ - cwd = os.path.abspath(start_dir) + cwd = os.path.abspath(start_dir) # noqa: PTH100 if "/" in start_dir: # is a subdir - parent_dir = os.path.dirname(start_dir) + parent_dir = os.path.dirname(start_dir) # noqa: PTH120 sys.path.insert(0, parent_dir) else: sys.path.insert(0, cwd) @@ -75,7 +75,7 @@ def discover_tests( top_level_dir = start_dir # Get abspath of top level directory for build_test_tree. - top_level_dir = os.path.abspath(top_level_dir) + top_level_dir = os.path.abspath(top_level_dir) # noqa: PTH100 tests, error = build_test_tree(suite, top_level_dir) # test tree built successfully here. diff --git a/python_files/unittestadapter/execution.py b/python_files/unittestadapter/execution.py index 84cc10c4fb1f..e81407e1e83c 100644 --- a/python_files/unittestadapter/execution.py +++ b/python_files/unittestadapter/execution.py @@ -6,10 +6,9 @@ import json import os import pathlib -import socket import sys -import traceback import sysconfig +import traceback import unittest from types import TracebackType from typing import Dict, List, Optional, Tuple, Type, Union @@ -27,12 +26,12 @@ from testing_tools import process_json_util, socket_manager # noqa: E402 from unittestadapter.pvsc_utils import ( # noqa: E402 + EOTPayloadDict, + ExecutionPayloadDict, + TestExecutionStatus, VSCodeUnittestError, parse_unittest_args, send_post_request, - ExecutionPayloadDict, - EOTPayloadDict, - TestExecutionStatus, ) ErrorType = Union[Tuple[Type[BaseException], BaseException, TracebackType], Tuple[None, None, None]] @@ -53,51 +52,51 @@ class TestOutcomeEnum(str, enum.Enum): class UnittestTestResult(unittest.TextTestResult): def __init__(self, *args, **kwargs): - self.formatted: Dict[str, Dict[str, Union[str, None]]] = dict() - super(UnittestTestResult, self).__init__(*args, **kwargs) + self.formatted: Dict[str, Dict[str, Union[str, None]]] = {} + super().__init__(*args, **kwargs) - def startTest(self, test: unittest.TestCase): - super(UnittestTestResult, self).startTest(test) + def startTest(self, test: unittest.TestCase): # noqa: N802 + super().startTest(test) - def addError( + def addError( # noqa: N802 self, test: unittest.TestCase, err: ErrorType, ): - super(UnittestTestResult, self).addError(test, err) + super().addError(test, err) self.formatResult(test, TestOutcomeEnum.error, err) - def addFailure( + def addFailure( # noqa: N802 self, test: unittest.TestCase, err: ErrorType, ): - super(UnittestTestResult, self).addFailure(test, err) + super().addFailure(test, err) self.formatResult(test, TestOutcomeEnum.failure, err) - def addSuccess(self, test: unittest.TestCase): - super(UnittestTestResult, self).addSuccess(test) + def addSuccess(self, test: unittest.TestCase): # noqa: N802 + super().addSuccess(test) self.formatResult(test, TestOutcomeEnum.success) - def addSkip(self, test: unittest.TestCase, reason: str): - super(UnittestTestResult, self).addSkip(test, reason) + def addSkip(self, test: unittest.TestCase, reason: str): # noqa: N802 + super().addSkip(test, reason) self.formatResult(test, TestOutcomeEnum.skipped) - def addExpectedFailure(self, test: unittest.TestCase, err: ErrorType): - super(UnittestTestResult, self).addExpectedFailure(test, err) + def addExpectedFailure(self, test: unittest.TestCase, err: ErrorType): # noqa: N802 + super().addExpectedFailure(test, err) self.formatResult(test, TestOutcomeEnum.expected_failure, err) - def addUnexpectedSuccess(self, test: unittest.TestCase): - super(UnittestTestResult, self).addUnexpectedSuccess(test) + def addUnexpectedSuccess(self, test: unittest.TestCase): # noqa: N802 + super().addUnexpectedSuccess(test) self.formatResult(test, TestOutcomeEnum.unexpected_success) - def addSubTest( + def addSubTest( # noqa: N802 self, test: unittest.TestCase, subtest: unittest.TestCase, err: Union[ErrorType, None], ): - super(UnittestTestResult, self).addSubTest(test, subtest, err) + super().addSubTest(test, subtest, err) self.formatResult( test, TestOutcomeEnum.subtest_failure if err else TestOutcomeEnum.subtest_success, @@ -105,7 +104,7 @@ def addSubTest( subtest, ) - def formatResult( + def formatResult( # noqa: N802 self, test: unittest.TestCase, outcome: str, @@ -125,10 +124,7 @@ def formatResult( tb = "".join(formatted) # Remove the 'Traceback (most recent call last)' formatted = formatted[1:] - if subtest: - test_id = subtest.id() - else: - test_id = test.id() + test_id = subtest.id() if subtest else test.id() result = { "test": test.id(), @@ -192,11 +188,11 @@ def run_tests( top_level_dir: Optional[str], verbosity: int, failfast: Optional[bool], - locals: Optional[bool] = None, + locals_: Optional[bool] = None, ) -> ExecutionPayloadDict: - cwd = os.path.abspath(start_dir) + cwd = os.path.abspath(start_dir) # noqa: PTH100 if "/" in start_dir: # is a subdir - parent_dir = os.path.dirname(start_dir) + parent_dir = os.path.dirname(start_dir) # noqa: PTH120 sys.path.insert(0, parent_dir) else: sys.path.insert(0, cwd) @@ -208,18 +204,18 @@ def run_tests( # If it's a file, split path and file name. start_dir = cwd if cwd.endswith(".py"): - start_dir = os.path.dirname(cwd) - pattern = os.path.basename(cwd) + start_dir = os.path.dirname(cwd) # noqa: PTH120 + pattern = os.path.basename(cwd) # noqa: PTH119 if failfast is None: failfast = False - if locals is None: - locals = False + if locals_ is None: + locals_ = False if verbosity is None: verbosity = 1 runner = unittest.TextTestRunner( resultclass=UnittestTestResult, - tb_locals=locals, + tb_locals=locals_, failfast=failfast, verbosity=verbosity, ) @@ -261,11 +257,8 @@ def run_tests( def send_run_data(raw_data, test_run_pipe): status = raw_data["outcome"] - cwd = os.path.abspath(START_DIR) - if raw_data["subtest"]: - test_id = raw_data["subtest"] - else: - test_id = raw_data["test"] + cwd = os.path.abspath(START_DIR) # noqa: PTH100 + test_id = raw_data["subtest"] or raw_data["test"] test_dict = {} test_dict[test_id] = raw_data payload: ExecutionPayloadDict = {"cwd": cwd, "status": status, "result": test_dict} @@ -283,7 +276,7 @@ def send_run_data(raw_data, test_run_pipe): top_level_dir, verbosity, failfast, - locals, + locals_, ) = parse_unittest_args(argv[index + 1 :]) run_test_ids_pipe = os.environ.get("RUN_TEST_IDS_PIPE") @@ -319,10 +312,10 @@ def send_run_data(raw_data, test_run_pipe): except json.JSONDecodeError: # JSON decoding error, the complete JSON object is not yet received continue - except socket.error as e: + except OSError as e: msg = f"Error: Could not connect to RUN_TEST_IDS_PIPE: {e}" print(msg) - raise VSCodeUnittestError(msg) + raise VSCodeUnittestError(msg) from e try: if raw_json and "params" in raw_json: @@ -336,11 +329,11 @@ def send_run_data(raw_data, test_run_pipe): top_level_dir, verbosity, failfast, - locals, + locals_, ) else: # No test ids received from buffer - cwd = os.path.abspath(start_dir) + cwd = os.path.abspath(start_dir) # noqa: PTH100 status = TestExecutionStatus.error payload: ExecutionPayloadDict = { "cwd": cwd, @@ -349,9 +342,9 @@ def send_run_data(raw_data, test_run_pipe): "result": None, } send_post_request(payload, test_run_pipe) - except json.JSONDecodeError: + except json.JSONDecodeError as exc: msg = "Error: Could not parse test ids from stdin" print(msg) - raise VSCodeUnittestError(msg) + raise VSCodeUnittestError(msg) from exc eot_payload: EOTPayloadDict = {"command_type": "execution", "eot": True} send_post_request(eot_payload, test_run_pipe) diff --git a/python_files/unittestadapter/pvsc_utils.py b/python_files/unittestadapter/pvsc_utils.py index 2eba987603c0..99577fc8e9c5 100644 --- a/python_files/unittestadapter/pvsc_utils.py +++ b/python_files/unittestadapter/pvsc_utils.py @@ -10,16 +10,16 @@ import pathlib import sys import unittest -from typing import List, Optional, Tuple, Union, Dict, Literal, TypedDict - +from typing import Dict, List, Literal, Optional, Tuple, TypedDict, Union script_dir = pathlib.Path(__file__).parent.parent sys.path.append(os.fspath(script_dir)) sys.path.append(os.fspath(script_dir / "lib" / "python")) -from testing_tools import socket_manager # noqa: E402 from typing_extensions import NotRequired # noqa: E402 +from testing_tools import socket_manager # noqa: E402 + # Types @@ -77,7 +77,7 @@ class ExecutionPayloadDict(TypedDict): class EOTPayloadDict(TypedDict): """A dictionary that is used to send a end of transmission post request to the server.""" - command_type: Union[Literal["discovery"], Literal["execution"]] + command_type: Literal["discovery", "execution"] eot: bool @@ -90,8 +90,7 @@ def get_test_case(suite): if isinstance(test, unittest.TestCase): yield test else: - for test_case in get_test_case(test): - yield test_case + yield from get_test_case(test) def get_source_line(obj) -> str: @@ -130,8 +129,11 @@ def build_test_node(path: str, name: str, type_: TestNodeTypeEnum) -> TestNode: def get_child_node(name: str, path: str, type_: TestNodeTypeEnum, root: TestNode) -> TestNode: - """Find a child node in a test tree given its name, type and path. If the node doesn't exist, create it. - Path is required to distinguish between nodes with the same name and type.""" + """Find a child node in a test tree given its name, type and path. + + If the node doesn't exist, create it. + Path is required to distinguish between nodes with the same name and type. + """ try: result = next( node @@ -195,12 +197,12 @@ def build_test_tree( for test_case in get_test_case(suite): test_id = test_case.id() if test_id.startswith("unittest.loader._FailedTest"): - error.append(str(test_case._exception)) # type: ignore + error.append(str(test_case._exception)) # type: ignore # noqa: SLF001 elif test_id.startswith("unittest.loader.ModuleSkipped"): components = test_id.split(".") class_name = f"{components[-1]}.py" # Find/build class node. - file_path = os.fsdecode(os.path.join(directory_path, class_name)) + file_path = os.fsdecode(directory_path / class_name) current_node = get_child_node(class_name, file_path, TestNodeTypeEnum.file, root) else: # Get the static test path components: filename, class name and function name. @@ -220,7 +222,7 @@ def build_test_tree( ) # Find/build file node. - path_components = [top_level_directory] + folders + [py_filename] + path_components = [top_level_directory, *folders, py_filename] file_path = os.fsdecode(pathlib.PurePath("/".join(path_components))) current_node = get_child_node( py_filename, file_path, TestNodeTypeEnum.file, current_node @@ -232,7 +234,7 @@ def build_test_tree( ) # Get test line number. - test_method = getattr(test_case, test_case._testMethodName) + test_method = getattr(test_case, test_case._testMethodName) # noqa: SLF001 lineno = get_source_line(test_method) # Add test node. @@ -266,7 +268,6 @@ def parse_unittest_args( - top_level_directory: The top-level directory of the project, defaults to None, and unittest will use start_directory behind the scenes. """ - arg_parser = argparse.ArgumentParser() arg_parser.add_argument("--start-directory", "-s", default=".") arg_parser.add_argument("--pattern", "-p", default="test*.py") @@ -328,7 +329,7 @@ def send_post_request( except Exception as error: error_msg = f"Error attempting to connect to extension named pipe {test_run_pipe}[vscode-unittest]: {error}" __writer = None - raise VSCodeUnittestError(error_msg) + raise VSCodeUnittestError(error_msg) from error rpc = { "jsonrpc": "2.0", diff --git a/python_files/visualstudio_py_testlauncher.py b/python_files/visualstudio_py_testlauncher.py index b085d5ce4e6f..575f9d4fefc2 100644 --- a/python_files/visualstudio_py_testlauncher.py +++ b/python_files/visualstudio_py_testlauncher.py @@ -32,7 +32,7 @@ import _thread as thread -class _TestOutput(object): +class _TestOutput: """file like object which redirects output to the repl window.""" errors = "strict" @@ -40,7 +40,7 @@ class _TestOutput(object): def __init__(self, old_out, is_stdout): self.is_stdout = is_stdout self.old_out = old_out - if sys.version >= "3." and hasattr(old_out, "buffer"): + if sys.version_info[0] >= 3 and hasattr(old_out, "buffer"): self.buffer = _TestOutputBuffer(old_out.buffer, is_stdout) def flush(self): @@ -79,7 +79,7 @@ def __getattr__(self, name): return getattr(self.old_out, name) -class _TestOutputBuffer(object): +class _TestOutputBuffer: def __init__(self, old_buffer, is_stdout): self.buffer = old_buffer self.is_stdout = is_stdout @@ -101,7 +101,7 @@ def seek(self, pos, whence=0): return self.buffer.seek(pos, whence) -class _IpcChannel(object): +class _IpcChannel: def __init__(self, socket, callback): self.socket = socket self.seq = 0 @@ -109,12 +109,12 @@ def __init__(self, socket, callback): self.lock = thread.allocate_lock() self._closed = False # start the testing reader thread loop - self.test_thread_id = thread.start_new_thread(self.readSocket, ()) + self.test_thread_id = thread.start_new_thread(self.read_socket, ()) def close(self): self._closed = True - def readSocket(self): + def read_socket(self): try: self.socket.recv(1024) self.callback() @@ -139,40 +139,40 @@ def send_event(self, name, **args): class VsTestResult(unittest.TextTestResult): - def startTest(self, test): - super(VsTestResult, self).startTest(test) + def startTest(self, test): # noqa: N802 + super().startTest(test) if _channel is not None: _channel.send_event(name="start", test=test.id()) - def addError(self, test, err): - super(VsTestResult, self).addError(test, err) + def addError(self, test, err): # noqa: N802 + super().addError(test, err) self.sendResult(test, "error", err) - def addFailure(self, test, err): - super(VsTestResult, self).addFailure(test, err) + def addFailure(self, test, err): # noqa: N802 + super().addFailure(test, err) self.sendResult(test, "failed", err) - def addSuccess(self, test): - super(VsTestResult, self).addSuccess(test) + def addSuccess(self, test): # noqa: N802 + super().addSuccess(test) self.sendResult(test, "passed") - def addSkip(self, test, reason): - super(VsTestResult, self).addSkip(test, reason) + def addSkip(self, test, reason): # noqa: N802 + super().addSkip(test, reason) self.sendResult(test, "skipped") - def addExpectedFailure(self, test, err): - super(VsTestResult, self).addExpectedFailure(test, err) + def addExpectedFailure(self, test, err): # noqa: N802 + super().addExpectedFailure(test, err) self.sendResult(test, "failed-expected", err) - def addUnexpectedSuccess(self, test): - super(VsTestResult, self).addUnexpectedSuccess(test) + def addUnexpectedSuccess(self, test): # noqa: N802 + super().addUnexpectedSuccess(test) self.sendResult(test, "passed-unexpected") - def addSubTest(self, test, subtest, err): - super(VsTestResult, self).addSubTest(test, subtest, err) + def addSubTest(self, test, subtest, err): # noqa: N802 + super().addSubTest(test, subtest, err) self.sendResult(test, "subtest-passed" if err is None else "subtest-failed", err, subtest) - def sendResult(self, test, outcome, trace=None, subtest=None): + def sendResult(self, test, outcome, trace=None, subtest=None): # noqa: N802 if _channel is not None: tb = None message = None @@ -195,19 +195,19 @@ def sendResult(self, test, outcome, trace=None, subtest=None): _channel.send_event("result", **result) -def stopTests(): +def stop_tests(): try: os.kill(os.getpid(), signal.SIGUSR1) except Exception: os.kill(os.getpid(), signal.SIGTERM) -class ExitCommand(Exception): +class ExitCommand(Exception): # noqa: N818 pass -def signal_handler(signal, frame): - raise ExitCommand() +def signal_handler(signal, frame): # noqa: ARG001 + raise ExitCommand def main(): @@ -248,9 +248,7 @@ def main(): help="connect to port on localhost and send test results", ) parser.add_option("--us", type="str", help="Directory to start discovery") - parser.add_option( - "--up", type="str", help="Pattern to match test files (" "test*.py" " default)" - ) + parser.add_option("--up", type="str", help="Pattern to match test files (test*.py default)") parser.add_option( "--ut", type="str", @@ -266,14 +264,16 @@ def main(): parser.add_option("--uc", "--catch", type="str", help="Catch control-C and display results") (opts, _) = parser.parse_args() - sys.path[0] = os.getcwd() + sys.path[0] = os.getcwd() # noqa: PTH109 if opts.result_port: try: signal.signal(signal.SIGUSR1, signal_handler) except Exception: with contextlib.suppress(Exception): signal.signal(signal.SIGTERM, signal_handler) - _channel = _IpcChannel(socket.create_connection(("127.0.0.1", opts.result_port)), stopTests) + _channel = _IpcChannel( + socket.create_connection(("127.0.0.1", opts.result_port)), stop_tests + ) sys.stdout = _TestOutput(sys.stdout, is_stdout=True) sys.stderr = _TestOutput(sys.stderr, is_stdout=False) @@ -289,11 +289,11 @@ def main(): sleep(0.1) try: debugger_helper = windll["Microsoft.PythonTools.Debugger.Helper.x86.dll"] - except WindowsError: + except OSError: debugger_helper = windll["Microsoft.PythonTools.Debugger.Helper.x64.dll"] - isTracing = c_char.in_dll(debugger_helper, "isTracing") + is_tracing = c_char.in_dll(debugger_helper, "isTracing") while True: - if isTracing.value != 0: + if is_tracing.value != 0: break sleep(0.1) @@ -318,7 +318,9 @@ def main(): loader = unittest.TestLoader() # opts.us will be passed in suites = loader.discover( - opts.us, pattern=os.path.basename(opts.testFile), top_level_dir=opts.ut + opts.us, + pattern=os.path.basename(opts.testFile), # noqa: PTH119 + top_level_dir=opts.ut, ) suite = None tests = None @@ -327,14 +329,14 @@ def main(): tests = suites else: # Run a specific test class or test method - for test_suite in suites._tests: - for cls in test_suite._tests: + for test_suite in suites._tests: # noqa: SLF001 + for cls in test_suite._tests: # noqa: SLF001 with contextlib.suppress(Exception): for m in cls._tests: - testId = m.id() - if testId.startswith(opts.tests[0]): + test_id = m.id() + if test_id.startswith(opts.tests[0]): suite = cls - if testId in opts.tests: + if test_id in opts.tests: if tests is None: tests = unittest.TestSuite([m]) else: diff --git a/python_files/vscode_datascience_helpers/__init__.py b/python_files/vscode_datascience_helpers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python_files/vscode_datascience_helpers/tests/__init__.py b/python_files/vscode_datascience_helpers/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python_files/vscode_datascience_helpers/tests/logParser.py b/python_files/vscode_datascience_helpers/tests/logParser.py index e07a38e9d4d3..12c090ec581f 100644 --- a/python_files/vscode_datascience_helpers/tests/logParser.py +++ b/python_files/vscode_datascience_helpers/tests/logParser.py @@ -1,4 +1,4 @@ -import argparse +import argparse # noqa: N999 import os import re from io import TextIOWrapper @@ -20,74 +20,75 @@ timestamp_regex = re.compile(r"\d{4}-\d{2}-\d{2}T.*\dZ") -def stripTimestamp(line: str): +def strip_timestamp(line: str): match = timestamp_regex.match(line) if match: return line[match.end() :] return line -def readStripLines(f: TextIOWrapper): - return map(stripTimestamp, f.readlines()) +def read_strip_lines(f: TextIOWrapper): + return map(strip_timestamp, f.readlines()) -def printTestOutput(testlog): +def print_test_output(testlog): # Find all the lines that don't have a PID in them. These are the test output p = Path(testlog[0]) with p.open() as f: - for line in readStripLines(f): + for line in read_strip_lines(f): stripped = line.strip() if len(stripped) > 2 and stripped[0] == "\x1b" and stripped[1] == "[": print(line.rstrip()) # Should be a test line as it has color encoding -def splitByPid(testlog): +def split_by_pid(testlog): # Split testlog into prefixed logs based on pid - baseFile = os.path.splitext(testlog[0])[0] p = Path(testlog[0]) pids = set() logs = {} pid = None - with p.open() as f: - for line in readStripLines(f): - stripped = ansi_escape.sub("", line.strip()) - if len(stripped) > 0: - # Pull out the pid - match = pid_regex.match(stripped) - - # Pids are at least two digits - if match and len(match.group(1)) > 2: - # Pid is found - pid = int(match.group(1)) - - # See if we've created a log for this pid or not - if pid not in pids: - pids.add(pid) - logFile = "{}_{}.log".format(baseFile, pid) - print("Writing to new log: " + logFile) - logs[pid] = Path(logFile).open(mode="w") - - # Add this line to the log - if pid is not None: - logs[pid].write(line) - # Close all of the open logs - for key in logs: - logs[key].close() - - -def doWork(args): + try: + with p.open() as f: + for line in read_strip_lines(f): + stripped = ansi_escape.sub("", line.strip()) + if len(stripped) > 0: + # Pull out the pid + match = pid_regex.match(stripped) + + # Pids are at least two digits + if match and len(match.group(1)) > 2: + # Pid is found + pid = int(match.group(1)) + + # See if we've created a log for this pid or not + if pid not in pids: + pids.add(pid) + log_file = p.with_name(f"{p.stem}_{pid}.log") + print("Writing to new log:", os.fsdecode(log_file)) + logs[pid] = log_file.open(mode="w") + + # Add this line to the log + if pid is not None: + logs[pid].write(line) + finally: + # Close all of the open logs + for key in logs: + logs[key].close() + + +def do_work(args): if not args.testlog: print("Test log should be passed") elif args.testoutput: - printTestOutput(args.testlog) + print_test_output(args.testlog) elif args.split: - splitByPid(args.testlog) + split_by_pid(args.testlog) else: parser.print_usage() def main(): - doWork(parser.parse_args()) + do_work(parser.parse_args()) if __name__ == "__main__": diff --git a/python_files/vscode_pytest/__init__.py b/python_files/vscode_pytest/__init__.py index a7b197ca26a5..c3be7e53d1b6 100644 --- a/python_files/vscode_pytest/__init__.py +++ b/python_files/vscode_pytest/__init__.py @@ -7,7 +7,16 @@ import pathlib import sys import traceback - +from typing import ( + Any, + Dict, + Generator, + List, + Literal, + Optional, + TypedDict, + Union, +) import pytest @@ -15,16 +24,6 @@ sys.path.append(os.fspath(script_dir)) sys.path.append(os.fspath(script_dir / "lib" / "python")) from testing_tools import socket_manager # noqa: E402 -from typing import ( # noqa: E402 - Any, - Dict, - List, - Optional, - Union, - TypedDict, - Literal, - Generator, -) class TestData(TypedDict): @@ -58,13 +57,13 @@ def __init__(self, message): ERRORS = [] IS_DISCOVERY = False -map_id_to_path = dict() -collected_tests_so_far = list() +map_id_to_path = {} +collected_tests_so_far = [] TEST_RUN_PIPE = os.getenv("TEST_RUN_PIPE") SYMLINK_PATH = None -def pytest_load_initial_conftests(early_config, parser, args): +def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001 global TEST_RUN_PIPE TEST_RUN_PIPE = os.getenv("TEST_RUN_PIPE") error_string = ( @@ -82,32 +81,32 @@ def pytest_load_initial_conftests(early_config, parser, args): # check if --rootdir is in the args for arg in args: if "--rootdir=" in arg: - rootdir = arg.split("--rootdir=")[1] - if not os.path.exists(rootdir): + rootdir = pathlib.Path(arg.split("--rootdir=")[1]) + if not rootdir.exists(): raise VSCodePytestError( f"The path set in the argument --rootdir={rootdir} does not exist." ) # Check if the rootdir is a symlink or a child of a symlink to the current cwd. - isSymlink = False + is_symlink = False - if os.path.islink(rootdir): - isSymlink = True + if rootdir.is_symlink(): + is_symlink = True print( f"Plugin info[vscode-pytest]: rootdir argument, {rootdir}, is identified as a symlink." ) - elif pathlib.Path(os.path.realpath(rootdir)) != rootdir: + elif rootdir.resolve() != rootdir: print("Plugin info[vscode-pytest]: Checking if rootdir is a child of a symlink.") - isSymlink = has_symlink_parent(rootdir) - if isSymlink: + is_symlink = has_symlink_parent(rootdir) + if is_symlink: print( f"Plugin info[vscode-pytest]: rootdir argument, {rootdir}, is identified as a symlink or child of a symlink, adjusting pytest paths accordingly.", ) global SYMLINK_PATH - SYMLINK_PATH = pathlib.Path(rootdir) + SYMLINK_PATH = rootdir -def pytest_internalerror(excrepr, excinfo): +def pytest_internalerror(excrepr, excinfo): # noqa: ARG001 """A pytest hook that is called when an internal error occurs. Keyword arguments: @@ -150,7 +149,7 @@ def pytest_exception_interact(node, call, report): "Test failed with exception", report.longreprtext, ) - collected_test = testRunResultDict() + collected_test = TestRunResultDict() collected_test[node_id] = item_result cwd = pathlib.Path.cwd() execution_post( @@ -169,14 +168,16 @@ def has_symlink_parent(current_path): # Iterate over all parent directories for parent in curr_path.parents: # Check if the parent directory is a symlink - if os.path.islink(parent): + if parent.is_symlink(): print(f"Symlink found at: {parent}") return True return False -def get_absolute_test_id(test_id: str, testPath: pathlib.Path) -> str: - """A function that returns the absolute test id. This is necessary because testIds are relative to the rootdir. +def get_absolute_test_id(test_id: str, test_path: pathlib.Path) -> str: + """A function that returns the absolute test id. + + This is necessary because testIds are relative to the rootdir. This does not work for our case since testIds when referenced during run time are relative to the instantiation location. Absolute paths for testIds are necessary for the test tree ensures configurations that change the rootdir of pytest are handled correctly. @@ -186,8 +187,7 @@ def get_absolute_test_id(test_id: str, testPath: pathlib.Path) -> str: testPath -- the path to the file the test is located in, as a pathlib.Path object. """ split_id = test_id.split("::")[1:] - absolute_test_id = "::".join([str(testPath), *split_id]) - return absolute_test_id + return "::".join([str(test_path), *split_id]) def pytest_keyboard_interrupt(excinfo): @@ -218,7 +218,7 @@ def create_test_outcome( outcome: str, message: Union[str, None], traceback: Union[str, None], - subtype: Optional[str] = None, + subtype: Optional[str] = None, # noqa: ARG001 ) -> TestOutcome: """A function that creates a TestOutcome object.""" return TestOutcome( @@ -230,7 +230,7 @@ def create_test_outcome( ) -class testRunResultDict(Dict[str, Dict[str, TestOutcome]]): +class TestRunResultDict(Dict[str, Dict[str, TestOutcome]]): """A class that stores all test run results.""" outcome: str @@ -238,10 +238,11 @@ class testRunResultDict(Dict[str, Dict[str, TestOutcome]]): @pytest.hookimpl(hookwrapper=True, trylast=True) -def pytest_report_teststatus(report, config): - """ - A pytest hook that is called when a test is called. It is called 3 times per test, - during setup, call, and teardown. +def pytest_report_teststatus(report, config): # noqa: ARG001 + """A pytest hook that is called when a test is called. + + It is called 3 times per test, during setup, call, and teardown. + Keyword arguments: report -- the report on the test setup, call, and teardown. config -- configuration object. @@ -273,7 +274,7 @@ def pytest_report_teststatus(report, config): message, traceback, ) - collected_test = testRunResultDict() + collected_test = TestRunResultDict() collected_test[absolute_node_id] = item_result execution_post( os.fsdecode(cwd), @@ -292,7 +293,7 @@ def pytest_report_teststatus(report, config): @pytest.hookimpl(hookwrapper=True, trylast=True) -def pytest_runtest_protocol(item, nextitem): +def pytest_runtest_protocol(item, nextitem): # noqa: ARG001 map_id_to_path[item.nodeid] = get_node_path(item) skipped = check_skipped_wrapper(item) if skipped: @@ -307,7 +308,7 @@ def pytest_runtest_protocol(item, nextitem): None, None, ) - collected_test = testRunResultDict() + collected_test = TestRunResultDict() collected_test[absolute_node_id] = item_result execution_post( os.fsdecode(cwd), @@ -325,14 +326,12 @@ def check_skipped_wrapper(item): Keyword arguments: item -- the pytest item object. """ - if item.own_markers: - if check_skipped_condition(item): - return True + if item.own_markers and check_skipped_condition(item): + return True parent = item.parent while isinstance(parent, pytest.Class): - if parent.own_markers: - if check_skipped_condition(parent): - return True + if parent.own_markers and check_skipped_condition(parent): + return True parent = parent.parent return False @@ -343,7 +342,6 @@ def check_skipped_condition(item): Keyword arguments: item -- the pytest item object. """ - for marker in item.own_markers: # If the test is marked with skip then it will not hit the pytest_report_teststatus hook, # therefore we need to handle it as skipped here. @@ -376,14 +374,14 @@ def pytest_sessionfinish(session, exitstatus): if IS_DISCOVERY: if not (exitstatus == 0 or exitstatus == 1 or exitstatus == 5): - errorNode: TestNode = { + error_node: TestNode = { "name": "", "path": cwd, "type_": "error", "children": [], "id_": "", } - post_response(os.fsdecode(cwd), errorNode) + post_response(os.fsdecode(cwd), error_node) try: session_node: Union[TestNode, None] = build_test_tree(session) if not session_node: @@ -396,14 +394,14 @@ def pytest_sessionfinish(session, exitstatus): ERRORS.append( f"Error Occurred, traceback: {(traceback.format_exc() if e.__traceback__ else '')}" ) - errorNode: TestNode = { + error_node: TestNode = { "name": "", "path": cwd, "type_": "error", "children": [], "id_": "", } - post_response(os.fsdecode(cwd), errorNode) + post_response(os.fsdecode(cwd), error_node) else: if exitstatus == 0 or exitstatus == 1: exitstatus_bool = "success" @@ -469,7 +467,9 @@ def build_test_tree(session: pytest.Session) -> TestNode: ERRORS.append( f"unable to find original name for {test_case.name} with parameterization detected." ) - raise VSCodePytestError("Unable to find original name for parameterized test case") + raise VSCodePytestError( + "Unable to find original name for parameterized test case" + ) from None except KeyError: function_test_node: TestNode = create_parameterized_function_node( function_name, get_node_path(test_case), parent_id @@ -529,7 +529,7 @@ def build_test_tree(session: pytest.Session) -> TestNode: file_nodes_dict[test_case.parent] = parent_test_case parent_test_case["children"].append(test_node) created_files_folders_dict: Dict[str, TestNode] = {} - for _, file_node in file_nodes_dict.items(): + for file_node in file_nodes_dict.values(): # Iterate through all the files that exist and construct them into nested folders. root_folder_node: TestNode try: @@ -726,13 +726,11 @@ class DiscoveryPayloadDict(TypedDict): class ExecutionPayloadDict(Dict): - """ - A dictionary that is used to send a execution post request to the server. - """ + """A dictionary that is used to send a execution post request to the server.""" cwd: str status: Literal["success", "error"] - result: Union[testRunResultDict, None] + result: Union[TestRunResultDict, None] not_found: Union[List[str], None] # Currently unused need to check error: Union[str, None] # Currently unused need to check @@ -740,13 +738,13 @@ class ExecutionPayloadDict(Dict): class EOTPayloadDict(TypedDict): """A dictionary that is used to send a end of transmission post request to the server.""" - command_type: Union[Literal["discovery"], Literal["execution"]] + command_type: Literal["discovery", "execution"] eot: bool def get_node_path(node: Any) -> pathlib.Path: - """ - A function that returns the path of a node given the switch to pathlib.Path. + """A function that returns the path of a node given the switch to pathlib.Path. + It also evaluates if the node is a symlink and returns the equivalent path. """ node_path = getattr(node, "path", None) or pathlib.Path(node.fspath) @@ -760,23 +758,22 @@ def get_node_path(node: Any) -> pathlib.Path: if SYMLINK_PATH and not isinstance(node, pytest.Session): # Get relative between the cwd (resolved path) and the node path. try: - # check to see if the node path contains the symlink root already + # Check to see if the node path contains the symlink root already common_path = os.path.commonpath([SYMLINK_PATH, node_path]) if common_path == os.fsdecode(SYMLINK_PATH): - # node path is already relative to the SYMLINK_PATH root therefore return + # The node path is already relative to the SYMLINK_PATH root therefore return return node_path else: - # if the node path is not a symlink, then we need to calculate the equivalent symlink path - # get the relative path between the cwd and the node path (as the node path is not a symlink) + # If the node path is not a symlink, then we need to calculate the equivalent symlink path + # get the relative path between the cwd and the node path (as the node path is not a symlink). rel_path = node_path.relative_to(pathlib.Path.cwd()) # combine the difference between the cwd and the node path with the symlink path - sym_path = pathlib.Path(os.path.join(SYMLINK_PATH, rel_path)) - return sym_path + return pathlib.Path(SYMLINK_PATH, rel_path) except Exception as e: raise VSCodePytestError( f"Error occurred while calculating symlink equivalent from node path: {e}" f"\n SYMLINK_PATH: {SYMLINK_PATH}, \n node path: {node_path}, \n cwd: {pathlib.Path.cwd()}" - ) + ) from e return node_path @@ -785,17 +782,15 @@ def get_node_path(node: Any) -> pathlib.Path: def execution_post( - cwd: str, status: Literal["success", "error"], tests: Union[testRunResultDict, None] + cwd: str, status: Literal["success", "error"], tests: Union[TestRunResultDict, None] ): - """ - Sends a POST request with execution payload details. + """Sends a POST request with execution payload details. Args: cwd (str): Current working directory. status (Literal["success", "error"]): Execution status indicating success or error. tests (Union[testRunResultDict, None]): Test run results, if available. """ - payload: ExecutionPayloadDict = ExecutionPayloadDict( cwd=cwd, status=status, result=tests, not_found=None, error=None ) @@ -869,7 +864,7 @@ def send_post_request( file=sys.stderr, ) __writer = None - raise VSCodePytestError(error_msg) + raise VSCodePytestError(error_msg) from error rpc = { "jsonrpc": "2.0", @@ -895,7 +890,7 @@ def send_post_request( class DeferPlugin: @pytest.hookimpl(wrapper=True) def pytest_xdist_auto_num_workers(self, config: pytest.Config) -> Generator[None, int, int]: - """determine how many workers to use based on how many tests were selected in the test explorer""" + """Determine how many workers to use based on how many tests were selected in the test explorer.""" return min((yield), len(config.option.file_or_dir)) diff --git a/python_files/vscode_pytest/run_pytest_script.py b/python_files/vscode_pytest/run_pytest_script.py index fae9b5e4af18..515e04d1b84d 100644 --- a/python_files/vscode_pytest/run_pytest_script.py +++ b/python_files/vscode_pytest/run_pytest_script.py @@ -3,9 +3,9 @@ import json import os import pathlib -import socket import sys import sysconfig + import pytest # Adds the scripts directory to the PATH as a workaround for enabling shell for test execution. @@ -17,9 +17,10 @@ script_dir = pathlib.Path(__file__).parent.parent sys.path.append(os.fspath(script_dir)) sys.path.append(os.fspath(script_dir / "lib" / "python")) -from testing_tools import process_json_util # noqa: E402 -from testing_tools import socket_manager # noqa: E402 - +from testing_tools import ( # noqa: E402 + process_json_util, + socket_manager, +) # This script handles running pytest via pytest.main(). It is called via run in the # pytest execution adapter and gets the test_ids to run via stdin and the rest of the @@ -29,7 +30,7 @@ # Add the root directory to the path so that we can import the plugin. directory_path = pathlib.Path(__file__).parent.parent sys.path.append(os.fspath(directory_path)) - sys.path.insert(0, os.getcwd()) + sys.path.insert(0, os.getcwd()) # noqa: PTH109 # Get the rest of the args to run with pytest. args = sys.argv[1:] run_test_ids_pipe = os.environ.get("RUN_TEST_IDS_PIPE") @@ -61,13 +62,13 @@ continue except UnicodeDecodeError: continue - except socket.error as e: + except OSError as e: print(f"Error: Could not connect to runTestIdsPort: {e}") print("Error: Could not connect to runTestIdsPort") try: test_ids_from_buffer = raw_json.get("params") if test_ids_from_buffer: - arg_array = ["-p", "vscode_pytest"] + args + test_ids_from_buffer + arg_array = ["-p", "vscode_pytest", *args, *test_ids_from_buffer] print("Running pytest with args: " + str(arg_array)) pytest.main(arg_array) else: @@ -75,7 +76,7 @@ "Error: No test ids received from stdin, could be an error or a run request without ids provided.", ) print("Running pytest with no test ids as args. Args being used: ", args) - arg_array = ["-p", "vscode_pytest"] + args + arg_array = ["-p", "vscode_pytest", *args] pytest.main(arg_array) except json.JSONDecodeError: print( From 7e434a7b5f9a26a7773accb0db78eb9362f8b1b7 Mon Sep 17 00:00:00 2001 From: Anthony Kim <62267334+anthonykim1@users.noreply.github.com> Date: Tue, 9 Jul 2024 14:13:18 -0700 Subject: [PATCH 6/7] Hide Run Python option when in Jupyter Notebook (#23732) Resolves: https://github.com/microsoft/vscode-python/issues/22739 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 3de58d434ec4..705569ecab5e 100644 --- a/package.json +++ b/package.json @@ -1361,7 +1361,7 @@ { "submenu": "python.run", "group": "Python", - "when": "editorLangId == python && !virtualWorkspace && shellExecutionSupported && isWorkspaceTrusted" + "when": "editorLangId == python && !virtualWorkspace && shellExecutionSupported && isWorkspaceTrusted && notebookType != jupyter-notebook" }, { "submenu": "python.runFileInteractive", From 9484737d54141cd779a97e224d46b2ddaa18597f Mon Sep 17 00:00:00 2001 From: Don Jayamanne Date: Wed, 10 Jul 2024 08:21:42 +1000 Subject: [PATCH 7/7] Add configuration request --- .../locators/common/nativePythonFinder.ts | 59 ++++++++++++++----- 1 file changed, 43 insertions(+), 16 deletions(-) diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts index 029c131188fa..b8cb5c48a157 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts @@ -68,6 +68,7 @@ class NativeGlobalPythonFinderImpl extends DisposableBase implements NativeGloba constructor() { super(); this.connection = this.start(); + void this.configure(); this.firstRefreshResults = this.refreshFirstTime(); } @@ -320,22 +321,12 @@ class NativeGlobalPythonFinderImpl extends DisposableBase implements NativeGloba ); trackPromiseAndNotifyOnCompletion( - this.connection - .sendRequest<{ duration: number }>( - 'refresh', - // Send configuration information to the Python finder. - { - // This has a special meaning in locator, its lot a low priority - // as we treat this as workspace folders that can contain a large number of files. - project_directories: getWorkspaceFolderPaths(), - // We do not want to mix this with `search_paths` - environment_directories: getCustomVirtualEnvDirs(), - conda_executable: getPythonSettingAndUntildify(CONDAPATH_SETTING_KEY), - poetry_executable: getPythonSettingAndUntildify('poetryPath'), - }, - ) - .then(({ duration }) => this.outputChannel.info(`Refresh completed in ${duration}ms`)) - .catch((ex) => this.outputChannel.error('Refresh error', ex)), + this.configure().then(() => + this.connection + .sendRequest<{ duration: number }>('refresh') + .then(({ duration }) => this.outputChannel.info(`Refresh completed in ${duration}ms`)) + .catch((ex) => this.outputChannel.error('Refresh error', ex)), + ), ); completed.promise.finally(() => disposable.dispose()); @@ -344,8 +335,44 @@ class NativeGlobalPythonFinderImpl extends DisposableBase implements NativeGloba discovered: discovered.event, }; } + + private lastConfiguration?: ConfigurationOptions; + + /** + * Configuration request, this must always be invoked before any other request. + * Must be invoked when ever there are changes to any data related to the configuration details. + */ + private async configure() { + const options: ConfigurationOptions = { + workspaceDirectories: getWorkspaceFolderPaths(), + // We do not want to mix this with `search_paths` + environmentDirectories: getCustomVirtualEnvDirs(), + condaExecutable: getPythonSettingAndUntildify(CONDAPATH_SETTING_KEY), + poetryExecutable: getPythonSettingAndUntildify('poetryPath'), + }; + // No need to send a configuration request, is there are no changes. + if (JSON.stringify(options) === JSON.stringify(this.lastConfiguration || {})) { + return; + } + try { + this.lastConfiguration = options; + await this.connection.sendRequest('configure', options); + } catch (ex) { + this.outputChannel.error('Refresh error', ex); + } + } } +type ConfigurationOptions = { + workspaceDirectories: string[]; + /** + * Place where virtual envs and the like are stored + * Should not contain workspace folders. + */ + environmentDirectories: string[]; + condaExecutable: string | undefined; + poetryExecutable: string | undefined; +}; /** * Gets all custom virtual environment locations to look for environments. */