diff --git a/pyproject.toml b/pyproject.toml index de3af0f50670cb..7f622bff8cd55d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,3 +42,9 @@ license-files = [ "LICENSE", "licenses/*", ] + +[tool.isort] +add_imports = "from __future__ import annotations" + +[tool.ruff.isort] +required-imports = ["from __future__ import annotations"] diff --git a/src/directives.rs b/src/directives.rs index 4098f114cc4562..33275ab3b55f42 100644 --- a/src/directives.rs +++ b/src/directives.rs @@ -90,17 +90,11 @@ pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap { pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { let mut exclusions: IntSet = IntSet::default(); let mut splits: Vec = Vec::default(); - let mut skip_file: bool = false; let mut off: Option = None; let mut last: Option = None; for &(start, ref tok, end) in lxr.iter().flatten() { last = Some(end); - // No need to keep processing, but we do need to determine the last token. - if skip_file { - continue; - } - let Tok::Comment(comment_text) = tok else { continue; }; @@ -112,7 +106,10 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { if comment_text == "# isort: split" { splits.push(start.row()); } else if comment_text == "# isort: skip_file" || comment_text == "# isort:skip_file" { - skip_file = true; + return IsortDirectives { + skip_file: true, + ..IsortDirectives::default() + }; } else if off.is_some() { if comment_text == "# isort: on" { if let Some(start) = off { @@ -142,7 +139,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { IsortDirectives { exclusions, splits, - skip_file, + ..IsortDirectives::default() } } @@ -281,10 +278,7 @@ x = 1 y = 2 z = x + 1"; let lxr: Vec = lexer::make_tokenizer(contents).collect(); - assert_eq!( - extract_isort_directives(&lxr).exclusions, - IntSet::from_iter([1, 2, 3, 4]) - ); + assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); let contents = "# isort: off x = 1 @@ -293,10 +287,7 @@ y = 2 # isort: skip_file z = x + 1"; let lxr: Vec = lexer::make_tokenizer(contents).collect(); - assert_eq!( - extract_isort_directives(&lxr).exclusions, - IntSet::from_iter([1, 2, 3, 4, 5, 6]) - ); + assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); } #[test] diff --git a/src/isort/rules/add_required_imports.rs b/src/isort/rules/add_required_imports.rs index 21c7a48c0be951..9a2c463fd98081 100644 --- a/src/isort/rules/add_required_imports.rs +++ b/src/isort/rules/add_required_imports.rs @@ -23,6 +23,15 @@ struct ImportFrom<'a> { level: Option<&'a usize>, } +struct Import<'a> { + name: Alias<'a>, +} + +enum AnyImport<'a> { + Import(Import<'a>), + ImportFrom(ImportFrom<'a>), +} + impl fmt::Display for ImportFrom<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "from ")?; @@ -55,7 +64,8 @@ fn has_required_import(block: &Block, required_import: &ImportFrom) -> bool { }) } -/// Find the first token that isn't a docstring, comment, or whitespace. +/// Find the end of the first token that isn't a docstring, comment, or +/// whitespace. fn find_splice_location(contents: &str) -> Location { let mut splice = Location::default(); for (.., tok, end) in lexer::make_tokenizer(contents).flatten() {