diff --git a/foo.py b/foo.py new file mode 100644 index 0000000000000..011284e7b0c30 --- /dev/null +++ b/foo.py @@ -0,0 +1 @@ +"%s" % "Hello, world!" diff --git a/pyproject.toml b/pyproject.toml index de3af0f50670c..7f622bff8cd55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,3 +42,9 @@ license-files = [ "LICENSE", "licenses/*", ] + +[tool.isort] +add_imports = "from __future__ import annotations" + +[tool.ruff.isort] +required-imports = ["from __future__ import annotations"] diff --git a/src/checkers/imports.rs b/src/checkers/imports.rs index 26cefa6cb72bf..b43e0c9ab8f67 100644 --- a/src/checkers/imports.rs +++ b/src/checkers/imports.rs @@ -13,37 +13,6 @@ use crate::settings::{flags, Settings}; use crate::source_code_locator::SourceCodeLocator; use crate::source_code_style::SourceCodeStyleDetector; -fn organize_imports( - tracker: &ImportTracker, - locator: &SourceCodeLocator, - settings: &Settings, - stylist: &SourceCodeStyleDetector, - autofix: flags::Autofix, - package: Option<&Path>, -) -> Vec { - let mut diagnostics = vec![]; - for block in tracker.iter() { - if !block.imports.is_empty() { - if let Some(diagnostic) = - isort::rules::organize_imports(block, locator, settings, stylist, autofix, package) - { - diagnostics.push(diagnostic); - } - } - } - diagnostics -} - -fn add_required_imports( - contents: &str, - tracker: &ImportTracker, - settings: &Settings, - autofix: flags::Autofix, -) -> Vec { - let blocks: Vec<&Block> = tracker.iter().collect(); - isort::rules::add_required_imports(contents, &blocks, settings, autofix) -} - #[allow(clippy::too_many_arguments)] pub fn check_imports( contents: &str, @@ -69,16 +38,25 @@ pub fn check_imports( } tracker }; + let blocks: Vec<&Block> = tracker.iter().collect(); // Enforce import rules. let mut diagnostics = vec![]; if settings.enabled.contains(&RuleCode::I001) { - diagnostics.extend(organize_imports( - &tracker, locator, settings, stylist, autofix, package, - )); + for block in &blocks { + if !block.imports.is_empty() { + if let Some(diagnostic) = isort::rules::organize_imports( + block, locator, settings, stylist, autofix, package, + ) { + diagnostics.push(diagnostic); + } + } + } } if settings.enabled.contains(&RuleCode::I002) { - diagnostics.extend(add_required_imports(contents, &tracker, settings, autofix)); + diagnostics.extend(isort::rules::add_required_imports( + contents, &blocks, settings, autofix, + )); } diagnostics } diff --git a/src/directives.rs b/src/directives.rs index 4098f114cc456..33275ab3b55f4 100644 --- a/src/directives.rs +++ b/src/directives.rs @@ -90,17 +90,11 @@ pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap { pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { let mut exclusions: IntSet = IntSet::default(); let mut splits: Vec = Vec::default(); - let mut skip_file: bool = false; let mut off: Option = None; let mut last: Option = None; for &(start, ref tok, end) in lxr.iter().flatten() { last = Some(end); - // No need to keep processing, but we do need to determine the last token. - if skip_file { - continue; - } - let Tok::Comment(comment_text) = tok else { continue; }; @@ -112,7 +106,10 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { if comment_text == "# isort: split" { splits.push(start.row()); } else if comment_text == "# isort: skip_file" || comment_text == "# isort:skip_file" { - skip_file = true; + return IsortDirectives { + skip_file: true, + ..IsortDirectives::default() + }; } else if off.is_some() { if comment_text == "# isort: on" { if let Some(start) = off { @@ -142,7 +139,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { IsortDirectives { exclusions, splits, - skip_file, + ..IsortDirectives::default() } } @@ -281,10 +278,7 @@ x = 1 y = 2 z = x + 1"; let lxr: Vec = lexer::make_tokenizer(contents).collect(); - assert_eq!( - extract_isort_directives(&lxr).exclusions, - IntSet::from_iter([1, 2, 3, 4]) - ); + assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); let contents = "# isort: off x = 1 @@ -293,10 +287,7 @@ y = 2 # isort: skip_file z = x + 1"; let lxr: Vec = lexer::make_tokenizer(contents).collect(); - assert_eq!( - extract_isort_directives(&lxr).exclusions, - IntSet::from_iter([1, 2, 3, 4, 5, 6]) - ); + assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); } #[test] diff --git a/src/isort/rules/add_required_imports.rs b/src/isort/rules/add_required_imports.rs index 21c7a48c0be95..0009d8f098e94 100644 --- a/src/isort/rules/add_required_imports.rs +++ b/src/isort/rules/add_required_imports.rs @@ -23,6 +23,15 @@ struct ImportFrom<'a> { level: Option<&'a usize>, } +struct Import<'a> { + name: Alias<'a>, +} + +enum AnyImport<'a> { + Import(Import<'a>), + ImportFrom(ImportFrom<'a>), +} + impl fmt::Display for ImportFrom<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "from ")?; @@ -32,30 +41,63 @@ impl fmt::Display for ImportFrom<'_> { if let Some(module) = self.module { write!(f, "{module}")?; } - write!(f, " import {}", self.name.name) + write!(f, " import {}", self.name.name)?; + Ok(()) + } +} + +impl fmt::Display for Import<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "import {}", self.name.name)?; + if let Some(as_name) = self.name.as_name { + write!(f, " as {as_name}")?; + } + Ok(()) } } -fn has_required_import(block: &Block, required_import: &ImportFrom) -> bool { - block.imports.iter().any(|import| { - let StmtKind::ImportFrom { - module, - names, - level, - } = &import.node else { - return false; - }; - - module.as_deref() == required_import.module - && level.as_ref() == required_import.level - && names.iter().any(|alias| { +impl fmt::Display for AnyImport<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + AnyImport::Import(import) => write!(f, "{import}"), + AnyImport::ImportFrom(import_from) => write!(f, "{import_from}"), + } + } +} + +fn contains(block: &Block, required_import: &AnyImport) -> bool { + block.imports.iter().any(|import| match required_import { + AnyImport::Import(required_import) => { + let StmtKind::Import { + names, + } = &import.node else { + return false; + }; + names.iter().any(|alias| { alias.node.name == required_import.name.name && alias.node.asname.as_deref() == required_import.name.as_name }) + } + AnyImport::ImportFrom(required_import) => { + let StmtKind::ImportFrom { + module, + names, + level, + } = &import.node else { + return false; + }; + module.as_deref() == required_import.module + && level.as_ref() == required_import.level + && names.iter().any(|alias| { + alias.node.name == required_import.name.name + && alias.node.asname.as_deref() == required_import.name.as_name + }) + } }) } -/// Find the first token that isn't a docstring, comment, or whitespace. +/// Find the end of the first token that isn't a docstring, comment, or +/// whitespace. fn find_splice_location(contents: &str) -> Location { let mut splice = Location::default(); for (.., tok, end) in lexer::make_tokenizer(contents).flatten() { @@ -69,7 +111,7 @@ fn find_splice_location(contents: &str) -> Location { } fn add_required_import( - required_import: &ImportFrom, + required_import: &AnyImport, contents: &str, blocks: &[&Block], settings: &Settings, @@ -79,7 +121,7 @@ fn add_required_import( if blocks .iter() .filter(|block| !block.nested) - .any(|block| has_required_import(block, required_import)) + .any(|block| contains(block, required_import)) { return None; } @@ -131,26 +173,47 @@ pub fn add_required_imports( error!("Expected require import to contain a single statement: `{}`", required_import); return vec![]; } - let StmtKind::ImportFrom { module, names, level } = &body[0].node else { - error!("Expected required import to be in import-from style: `{}`", required_import); - return vec![]; - }; - names.iter().filter_map(|name| { - add_required_import( - &ImportFrom { - module: module.as_ref().map(String::as_str), - name: Alias { - name: name.node.name.as_str(), - as_name: name.node.asname.as_deref(), - }, - level: level.as_ref(), - }, - contents, - blocks, - settings, - autofix, - ) - }).collect() + + match &body[0].node { + StmtKind::ImportFrom { module, names, level } => { + names.iter().filter_map(|name| { + add_required_import( + &AnyImport::ImportFrom(ImportFrom { + module: module.as_ref().map(String::as_str), + name: Alias { + name: name.node.name.as_str(), + as_name: name.node.asname.as_deref(), + }, + level: level.as_ref(), + }), + contents, + blocks, + settings, + autofix, + ) + }).collect() + } + StmtKind::Import { names } => { + names.iter().filter_map(|name| { + add_required_import( + &AnyImport::Import(Import { + name: Alias { + name: name.node.name.as_str(), + as_name: name.node.asname.as_deref(), + }, + }), + contents, + blocks, + settings, + autofix, + ) + }).collect() + } + _ => { + error!("Expected required import to be in import-from style: `{}`", required_import); + vec![] + } + } }) .collect() } diff --git a/tests/black_compatibility_test.rs b/tests/black_compatibility_test.rs index 380e1899ad836..177c3e1b4919d 100644 --- a/tests/black_compatibility_test.rs +++ b/tests/black_compatibility_test.rs @@ -191,8 +191,11 @@ fn test_ruff_black_compatibility() -> Result<()> { &codes, ]; + println!("ruff args: {:?}", ruff_args); + for entry in paths { let path = entry.path(); + println!("path: {}", path.display()); run_test(path, &blackd, &ruff_args).context(format!("Testing {}", path.display()))?; }