diff --git a/crates/ruff_linter/src/checkers/physical_lines.rs b/crates/ruff_linter/src/checkers/physical_lines.rs index e1850544ef1f1..b2ccaf6c39b43 100644 --- a/crates/ruff_linter/src/checkers/physical_lines.rs +++ b/crates/ruff_linter/src/checkers/physical_lines.rs @@ -92,8 +92,7 @@ pub(crate) fn check_physical_lines( mod tests { use ruff_python_codegen::Stylist; use ruff_python_index::Indexer; - use ruff_python_parser::lexer::lex; - use ruff_python_parser::Mode; + use ruff_python_parser::parse_module; use ruff_source_file::Locator; use crate::line_width::LineLength; @@ -107,15 +106,16 @@ mod tests { fn e501_non_ascii_char() { let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8. let locator = Locator::new(line); - let tokens: Vec<_> = lex(line, Mode::Module).collect(); - let indexer = Indexer::from_tokens(&tokens, &locator); - let stylist = Stylist::from_tokens(&tokens, &locator); + let program = parse_module(line).unwrap(); + let indexer = Indexer::from_tokens(program.tokens(), &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); let check_with_max_line_length = |line_length: LineLength| { check_physical_lines( &locator, &stylist, &indexer, + program.comment_ranges(), &[], &LinterSettings { pycodestyle: pycodestyle::settings::Settings { diff --git a/crates/ruff_linter/src/directives.rs b/crates/ruff_linter/src/directives.rs index 4a81439042563..5d75799c6482c 100644 --- a/crates/ruff_linter/src/directives.rs +++ b/crates/ruff_linter/src/directives.rs @@ -6,7 +6,7 @@ use std::str::FromStr; use bitflags::bitflags; use ruff_python_ast::{ModModule, StringFlags}; use ruff_python_parser::lexer::LexResult; -use ruff_python_parser::{Program, Tok}; +use ruff_python_parser::{Program, Tok, TokenKind, Tokens}; use ruff_python_trivia::CommentRanges; use ruff_text_size::{Ranged, TextLen, TextRange, TextSize}; @@ -106,22 +106,22 @@ where } /// Extract a mapping from logical line to noqa line. -fn extract_noqa_line_for(lxr: &[LexResult], locator: &Locator, indexer: &Indexer) -> NoqaMapping { +fn extract_noqa_line_for(tokens: &Tokens, locator: &Locator, indexer: &Indexer) -> NoqaMapping { let mut string_mappings = Vec::new(); - for (tok, range) in lxr.iter().flatten() { - match tok { - Tok::EndOfFile => { + for token in tokens.up_to_first_unknown() { + match token.kind() { + TokenKind::EndOfFile => { break; } // For multi-line strings, we expect `noqa` directives on the last line of the // string. - Tok::String { flags, .. } if flags.is_triple_quoted() => { - if locator.contains_line_break(*range) { + TokenKind::String if token.is_triple_quoted_string() => { + if locator.contains_line_break(token.range()) { string_mappings.push(TextRange::new( - locator.line_start(range.start()), - range.end(), + locator.line_start(token.start()), + token.end(), )); } } @@ -381,7 +381,7 @@ impl TodoDirectiveKind { #[cfg(test)] mod tests { use ruff_python_parser::lexer::LexResult; - use ruff_python_parser::{lexer, Mode}; + use ruff_python_parser::{lexer, parse_module, Mode}; use ruff_text_size::{TextLen, TextRange, TextSize}; use ruff_python_index::Indexer; @@ -392,12 +392,14 @@ mod tests { }; use crate::noqa::NoqaMapping; + use super::IsortDirectives; + fn noqa_mappings(contents: &str) -> NoqaMapping { - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); + let program = parse_module(contents).unwrap(); let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); - extract_noqa_line_for(&lxr, &locator, &indexer) + extract_noqa_line_for(program.tokens(), &locator, &indexer) } #[test] @@ -567,29 +569,26 @@ assert foo, \ ); } + fn isort_directives(contents: &str) -> IsortDirectives { + let program = parse_module(contents).unwrap(); + let locator = Locator::new(contents); + extract_isort_directives(&locator, program.comment_ranges()) + } + #[test] fn isort_exclusions() { let contents = "x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); - assert_eq!( - extract_isort_directives(&locator, &indexer).exclusions, - Vec::default() - ); + assert_eq!(isort_directives(contents).exclusions, Vec::default()); let contents = "# isort: off x = 1 y = 2 # isort: on z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); assert_eq!( - extract_isort_directives(&locator, &indexer).exclusions, + isort_directives(contents).exclusions, Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(25))]) ); @@ -600,11 +599,8 @@ y = 2 # isort: on z = x + 1 # isort: on"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); assert_eq!( - extract_isort_directives(&locator, &indexer).exclusions, + isort_directives(contents).exclusions, Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(38))]) ); @@ -612,11 +608,8 @@ z = x + 1 x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); assert_eq!( - extract_isort_directives(&locator, &indexer).exclusions, + isort_directives(contents).exclusions, Vec::from_iter([TextRange::at(TextSize::from(0), contents.text_len())]) ); @@ -624,13 +617,7 @@ z = x + 1"; x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); - assert_eq!( - extract_isort_directives(&locator, &indexer).exclusions, - Vec::default() - ); + assert_eq!(isort_directives(contents).exclusions, Vec::default()); let contents = "# isort: off x = 1 @@ -638,13 +625,7 @@ x = 1 y = 2 # isort: skip_file z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); - assert_eq!( - extract_isort_directives(&locator, &indexer).exclusions, - Vec::default() - ); + assert_eq!(isort_directives(contents).exclusions, Vec::default()); } #[test] @@ -652,36 +633,18 @@ z = x + 1"; let contents = "x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); - assert_eq!( - extract_isort_directives(&locator, &indexer).splits, - Vec::new() - ); + assert_eq!(isort_directives(contents).splits, Vec::new()); let contents = "x = 1 y = 2 # isort: split z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); - assert_eq!( - extract_isort_directives(&locator, &indexer).splits, - vec![TextSize::from(12)] - ); + assert_eq!(isort_directives(contents).splits, vec![TextSize::from(12)]); let contents = "x = 1 y = 2 # isort: split z = x + 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let locator = Locator::new(contents); - let indexer = Indexer::from_tokens(&lxr, &locator); - assert_eq!( - extract_isort_directives(&locator, &indexer).splits, - vec![TextSize::from(13)] - ); + assert_eq!(isort_directives(contents).splits, vec![TextSize::from(13)]); } #[test] diff --git a/crates/ruff_linter/src/doc_lines.rs b/crates/ruff_linter/src/doc_lines.rs index eebe21cec3ca5..d1f780053db75 100644 --- a/crates/ruff_linter/src/doc_lines.rs +++ b/crates/ruff_linter/src/doc_lines.rs @@ -2,28 +2,29 @@ //! standalone comment or a constant string statement. use std::iter::FusedIterator; +use std::slice::Iter; use ruff_python_ast::{self as ast, Stmt, Suite}; -use ruff_python_parser::{TokenKind, TokenKindIter}; +use ruff_python_parser::{Token, TokenKind, Tokens}; use ruff_text_size::{Ranged, TextSize}; use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor}; use ruff_source_file::{Locator, UniversalNewlineIterator}; /// Extract doc lines (standalone comments) from a token sequence. -pub(crate) fn doc_lines_from_tokens(tokens: TokenKindIter) -> DocLines { +pub(crate) fn doc_lines_from_tokens(tokens: &Tokens) -> DocLines { DocLines::new(tokens) } pub(crate) struct DocLines<'a> { - inner: TokenKindIter<'a>, + inner: Iter<'a, Token>, prev: TextSize, } impl<'a> DocLines<'a> { - fn new(tokens: TokenKindIter<'a>) -> Self { + fn new(tokens: &'a Tokens) -> Self { Self { - inner: tokens, + inner: tokens.up_to_first_unknown().iter(), prev: TextSize::default(), } } @@ -35,12 +36,12 @@ impl Iterator for DocLines<'_> { fn next(&mut self) -> Option { let mut at_start_of_line = true; loop { - let (tok, range) = self.inner.next()?; + let token = self.inner.next()?; - match tok { + match token.kind() { TokenKind::Comment => { if at_start_of_line { - break Some(range.start()); + break Some(token.start()); } } TokenKind::Newline | TokenKind::NonLogicalNewline => { @@ -54,7 +55,7 @@ impl Iterator for DocLines<'_> { } } - self.prev = range.end(); + self.prev = token.end(); } } } diff --git a/crates/ruff_linter/src/fix/edits.rs b/crates/ruff_linter/src/fix/edits.rs index 048af21a4a37a..34026e3fb6fc6 100644 --- a/crates/ruff_linter/src/fix/edits.rs +++ b/crates/ruff_linter/src/fix/edits.rs @@ -663,13 +663,9 @@ x = 1 \ fn add_to_dunder_all_test(raw: &str, names: &[&str], expect: &str) -> Result<()> { let locator = Locator::new(raw); let edits = { - let expr = parse_expression(raw)?.expr(); - let stylist = Stylist::from_tokens( - &lexer::lex(raw, Mode::Expression).collect::>(), - &locator, - ); - // SUT - add_to_dunder_all(names.iter().copied(), expr, &stylist) + let program = parse_expression(raw)?; + let stylist = Stylist::from_tokens(program.tokens(), &locator); + add_to_dunder_all(names.iter().copied(), program.expr(), &stylist) }; let diag = { use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile; diff --git a/crates/ruff_linter/src/importer/insertion.rs b/crates/ruff_linter/src/importer/insertion.rs index f15dffa8969a1..5805e3e0009de 100644 --- a/crates/ruff_linter/src/importer/insertion.rs +++ b/crates/ruff_linter/src/importer/insertion.rs @@ -328,11 +328,14 @@ mod tests { #[test] fn start_of_file() -> Result<()> { fn insert(contents: &str) -> Result { - let suite = parse_module(contents)?.into_suite(); - let tokens = ruff_python_parser::tokenize(contents, Mode::Module); + let program = parse_module(contents)?; let locator = Locator::new(contents); - let stylist = Stylist::from_tokens(&tokens, &locator); - Ok(Insertion::start_of_file(&suite, &locator, &stylist)) + let stylist = Stylist::from_tokens(program.tokens(), &locator); + Ok(Insertion::start_of_file( + program.suite(), + &locator, + &stylist, + )) } let contents = ""; @@ -440,9 +443,9 @@ x = 1 #[test] fn start_of_block() { fn insert(contents: &str, offset: TextSize) -> Insertion { - let program = ruff_python_parser::parse_module(contents).unwrap(); + let program = parse_module(contents).unwrap(); let locator = Locator::new(contents); - let stylist = Stylist::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); Insertion::start_of_block(offset, &locator, &stylist, program.tokens()) } diff --git a/crates/ruff_linter/src/linter.rs b/crates/ruff_linter/src/linter.rs index 926e66d6f5fd1..eabf8942e1b00 100644 --- a/crates/ruff_linter/src/linter.rs +++ b/crates/ruff_linter/src/linter.rs @@ -10,11 +10,10 @@ use rustc_hash::FxHashMap; use ruff_diagnostics::Diagnostic; use ruff_notebook::Notebook; -use ruff_python_ast::{ModModule, PySourceType, Suite}; +use ruff_python_ast::{ModModule, PySourceType}; use ruff_python_codegen::Stylist; use ruff_python_index::Indexer; -use ruff_python_parser::lexer::LexResult; -use ruff_python_parser::{AsMode, ParseError, Program, TokenKindIter, Tokens}; +use ruff_python_parser::{ParseError, Program}; use ruff_source_file::{Locator, SourceFileBuilder}; use ruff_text_size::Ranged; @@ -93,7 +92,7 @@ pub fn check_path( let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong); let mut doc_lines = vec![]; if use_doc_lines { - doc_lines.extend(doc_lines_from_tokens(program.kinds())); + doc_lines.extend(doc_lines_from_tokens(program.tokens())); } // Run the token-based rules. @@ -378,10 +377,10 @@ pub fn add_noqa_to_path( let locator = Locator::new(source_kind.source_code()); // Detect the current code style (lazily). - let stylist = Stylist::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); // Extra indices from the code. - let indexer = Indexer::from_tokens(&program, &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); // Extract the `# noqa` and `# isort: skip` directives from the source. let directives = directives::extract_directives( @@ -452,10 +451,10 @@ pub fn lint_only( let locator = Locator::new(source_kind.source_code()); // Detect the current code style (lazily). - let stylist = Stylist::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); // Extra indices from the code. - let indexer = Indexer::from_tokens(&program, &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); // Extract the `# noqa` and `# isort: skip` directives from the source. let directives = directives::extract_directives( @@ -543,10 +542,10 @@ pub fn lint_fix<'a>( let locator = Locator::new(transformed.source_code()); // Detect the current code style (lazily). - let stylist = Stylist::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); // Extra indices from the code. - let indexer = Indexer::from_tokens(&program, &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); // Extract the `# noqa` and `# isort: skip` directives from the source. let directives = directives::extract_directives( diff --git a/crates/ruff_linter/src/rules/pyflakes/mod.rs b/crates/ruff_linter/src/rules/pyflakes/mod.rs index 91eb1a0cc15d0..90b12e5085015 100644 --- a/crates/ruff_linter/src/rules/pyflakes/mod.rs +++ b/crates/ruff_linter/src/rules/pyflakes/mod.rs @@ -641,8 +641,8 @@ mod tests { let program = ruff_python_parser::parse_unchecked_source(source_kind.source_code(), source_type); let locator = Locator::new(&contents); - let stylist = Stylist::from_tokens(&program, &locator); - let indexer = Indexer::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); let directives = directives::extract_directives( &program, directives::Flags::from_settings(&settings), diff --git a/crates/ruff_linter/src/test.rs b/crates/ruff_linter/src/test.rs index f227c3d8120bd..2b6d567cf2809 100644 --- a/crates/ruff_linter/src/test.rs +++ b/crates/ruff_linter/src/test.rs @@ -113,8 +113,8 @@ pub(crate) fn test_contents<'a>( let program = ruff_python_parser::parse_unchecked_source(source_kind.source_code(), source_type); let locator = Locator::new(source_kind.source_code()); - let stylist = Stylist::from_tokens(&program, &locator); - let indexer = Indexer::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); let directives = directives::extract_directives( &program, directives::Flags::from_settings(settings), @@ -179,8 +179,8 @@ pub(crate) fn test_contents<'a>( let program = ruff_python_parser::parse_unchecked_source(source_kind.source_code(), source_type); let locator = Locator::new(transformed.source_code()); - let stylist = Stylist::from_tokens(&program, &locator); - let indexer = Indexer::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); let directives = directives::extract_directives( &program, directives::Flags::from_settings(settings), diff --git a/crates/ruff_python_codegen/src/lib.rs b/crates/ruff_python_codegen/src/lib.rs index 1e94ecfa0da44..ed267dfa67646 100644 --- a/crates/ruff_python_codegen/src/lib.rs +++ b/crates/ruff_python_codegen/src/lib.rs @@ -2,17 +2,16 @@ mod generator; mod stylist; pub use generator::Generator; -use ruff_python_parser::{lexer, parse_module, Mode, ParseError}; +use ruff_python_parser::{parse_module, ParseError}; use ruff_source_file::Locator; pub use stylist::Stylist; /// Run round-trip source code generation on a given Python code. pub fn round_trip(code: &str) -> Result { let locator = Locator::new(code); - let stmts = parse_module(code)?.suite(); - let tokens: Vec<_> = lexer::lex(code, Mode::Module).collect(); - let stylist = Stylist::from_tokens(&tokens, &locator); + let program = parse_module(code)?; + let stylist = Stylist::from_tokens(program.tokens(), &locator); let mut generator: Generator = (&stylist).into(); - generator.unparse_suite(&stmts); + generator.unparse_suite(program.suite()); Ok(generator.generate()) } diff --git a/crates/ruff_python_codegen/src/stylist.rs b/crates/ruff_python_codegen/src/stylist.rs index fc9e43bfb6926..2ddfe0aa15d39 100644 --- a/crates/ruff_python_codegen/src/stylist.rs +++ b/crates/ruff_python_codegen/src/stylist.rs @@ -4,10 +4,10 @@ use std::ops::Deref; use once_cell::unsync::OnceCell; -use ruff_python_ast::{str::Quote, StringFlags}; -use ruff_python_parser::lexer::LexResult; -use ruff_python_parser::{Tok, TokenKind}; +use ruff_python_ast::str::Quote; +use ruff_python_parser::{Token, TokenKind, Tokens}; use ruff_source_file::{find_newline, LineEnding, Locator}; +use ruff_text_size::Ranged; #[derive(Debug, Clone)] pub struct Stylist<'a> { @@ -35,40 +35,42 @@ impl<'a> Stylist<'a> { }) } - pub fn from_tokens(tokens: &[LexResult], locator: &'a Locator<'a>) -> Self { - let indentation = detect_indention(tokens, locator); + pub fn from_tokens(tokens: &Tokens, locator: &'a Locator<'a>) -> Self { + let indentation = detect_indention(tokens.up_to_first_unknown(), locator); Self { locator, indentation, - quote: detect_quote(tokens), + quote: detect_quote(tokens.up_to_first_unknown()), line_ending: OnceCell::default(), } } } -fn detect_quote(tokens: &[LexResult]) -> Quote { - for (token, _) in tokens.iter().flatten() { - match token { - Tok::String { flags, .. } if !flags.is_triple_quoted() => return flags.quote_style(), - Tok::FStringStart(flags) => return flags.quote_style(), +fn detect_quote(tokens: &[Token]) -> Quote { + for token in tokens { + match token.kind() { + TokenKind::String if !token.is_triple_quoted_string() => { + return token.string_quote_style() + } + TokenKind::FStringStart => return token.string_quote_style(), _ => continue, } } Quote::default() } -fn detect_indention(tokens: &[LexResult], locator: &Locator) -> Indentation { - let indent_range = tokens.iter().flatten().find_map(|(t, range)| { - if matches!(t, Tok::Indent) { - Some(range) +fn detect_indention(tokens: &[Token], locator: &Locator) -> Indentation { + let indent_range = tokens.iter().find_map(|token| { + if matches!(token.kind(), TokenKind::Indent) { + Some(token.range()) } else { None } }); if let Some(indent_range) = indent_range { - let mut whitespace = locator.slice(*indent_range); + let mut whitespace = locator.slice(indent_range); // https://docs.python.org/3/reference/lexical_analysis.html#indentation // > A formfeed character may be present at the start of the line; it will be ignored for // > the indentation calculations above. Formfeed characters occurring elsewhere in the @@ -158,8 +160,7 @@ impl Deref for Indentation { #[cfg(test)] mod tests { - use ruff_python_parser::lexer::lex; - use ruff_python_parser::Mode; + use ruff_python_parser::parse_module; use ruff_source_file::{find_newline, LineEnding}; @@ -170,44 +171,36 @@ mod tests { fn indentation() { let contents = r"x = 1"; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).indentation(), - &Indentation::default() - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.indentation(), &Indentation::default()); let contents = r" if True: pass "; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).indentation(), - &Indentation(" ".to_string()) - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.indentation(), &Indentation(" ".to_string())); let contents = r" if True: pass "; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).indentation(), - &Indentation(" ".to_string()) - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.indentation(), &Indentation(" ".to_string())); let contents = r" if True: pass "; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).indentation(), - &Indentation("\t".to_string()) - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.indentation(), &Indentation("\t".to_string())); let contents = r" x = ( @@ -217,11 +210,9 @@ x = ( ) "; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).indentation(), - &Indentation(" ".to_string()) - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.indentation(), &Indentation::default()); let contents = r" x = ( @@ -244,62 +235,48 @@ class FormFeedIndent: print(a) "; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).indentation(), - &Indentation(" ".to_string()) - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.indentation(), &Indentation(" ".to_string())); } #[test] fn quote() { let contents = r"x = 1"; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::default() - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::default()); let contents = r"x = '1'"; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Single - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Single); let contents = r"x = f'1'"; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Single - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Single); let contents = r#"x = "1""#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Double - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Double); let contents = r#"x = f"1""#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Double - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Double); let contents = r#"s = "It's done.""#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Double - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Double); // No style if only double quoted docstring (will take default Double) let contents = r#" @@ -308,11 +285,9 @@ def f(): pass "#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::default() - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::default()); // Detect from string literal appearing after docstring let contents = r#" @@ -321,11 +296,9 @@ def f(): a = 'v' "#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Single - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Single); let contents = r#" '''Module docstring.''' @@ -333,11 +306,9 @@ a = 'v' a = "v" "#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Double - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Double); // Detect from f-string appearing after docstring let contents = r#" @@ -346,11 +317,9 @@ a = "v" a = f'v' "#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Single - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Single); let contents = r#" '''Module docstring.''' @@ -358,21 +327,17 @@ a = f'v' a = f"v" "#; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Double - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Double); let contents = r" f'''Module docstring.''' "; let locator = Locator::new(contents); - let tokens: Vec<_> = lex(contents, Mode::Module).collect(); - assert_eq!( - Stylist::from_tokens(&tokens, &locator).quote(), - Quote::Single - ); + let program = parse_module(contents).unwrap(); + let stylist = Stylist::from_tokens(program.tokens(), &locator); + assert_eq!(stylist.quote(), Quote::Single); } #[test] diff --git a/crates/ruff_python_index/src/comment_ranges.rs b/crates/ruff_python_index/src/comment_ranges.rs deleted file mode 100644 index 602446a934c47..0000000000000 --- a/crates/ruff_python_index/src/comment_ranges.rs +++ /dev/null @@ -1,22 +0,0 @@ -use std::fmt::Debug; - -use ruff_python_parser::Tok; -use ruff_python_trivia::CommentRanges; -use ruff_text_size::TextRange; - -#[derive(Debug, Clone, Default)] -pub struct CommentRangesBuilder { - ranges: Vec, -} - -impl CommentRangesBuilder { - pub fn visit_token(&mut self, token: &Tok, range: TextRange) { - if token.is_comment() { - self.ranges.push(range); - } - } - - pub fn finish(self) -> CommentRanges { - CommentRanges::new(self.ranges) - } -} diff --git a/crates/ruff_python_index/src/fstring_ranges.rs b/crates/ruff_python_index/src/fstring_ranges.rs index b92bbd382c6b6..089050334eebc 100644 --- a/crates/ruff_python_index/src/fstring_ranges.rs +++ b/crates/ruff_python_index/src/fstring_ranges.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; -use ruff_python_parser::Tok; -use ruff_text_size::{TextRange, TextSize}; +use ruff_python_parser::{Token, TokenKind}; +use ruff_text_size::{Ranged, TextRange, TextSize}; /// Stores the ranges of all f-strings in a file sorted by [`TextRange::start`]. /// There can be multiple overlapping ranges for nested f-strings. @@ -85,14 +85,14 @@ pub(crate) struct FStringRangesBuilder { } impl FStringRangesBuilder { - pub(crate) fn visit_token(&mut self, token: &Tok, range: TextRange) { - match token { - Tok::FStringStart(_) => { - self.start_locations.push(range.start()); + pub(crate) fn visit_token(&mut self, token: &Token) { + match token.kind() { + TokenKind::FStringStart => { + self.start_locations.push(token.start()); } - Tok::FStringEnd => { + TokenKind::FStringEnd => { if let Some(start) = self.start_locations.pop() { - self.raw.insert(start, TextRange::new(start, range.end())); + self.raw.insert(start, TextRange::new(start, token.end())); } } _ => {} diff --git a/crates/ruff_python_index/src/indexer.rs b/crates/ruff_python_index/src/indexer.rs index d7f7810de6e83..9b6995062fa6d 100644 --- a/crates/ruff_python_index/src/indexer.rs +++ b/crates/ruff_python_index/src/indexer.rs @@ -2,21 +2,15 @@ //! are omitted from the AST (e.g., commented lines). use ruff_python_ast::Stmt; -use ruff_python_parser::lexer::LexResult; -use ruff_python_parser::Tok; -use ruff_python_trivia::{ - has_leading_content, has_trailing_content, is_python_whitespace, CommentRanges, -}; +use ruff_python_parser::{TokenKind, Tokens}; +use ruff_python_trivia::{has_leading_content, has_trailing_content, is_python_whitespace}; use ruff_source_file::Locator; use ruff_text_size::{Ranged, TextRange, TextSize}; use crate::fstring_ranges::{FStringRanges, FStringRangesBuilder}; use crate::multiline_ranges::{MultilineRanges, MultilineRangesBuilder}; -use crate::CommentRangesBuilder; pub struct Indexer { - comment_ranges: CommentRanges, - /// Stores the start offset of continuation lines. continuation_lines: Vec, @@ -28,10 +22,9 @@ pub struct Indexer { } impl Indexer { - pub fn from_tokens(tokens: &[LexResult], locator: &Locator) -> Self { + pub fn from_tokens(tokens: &Tokens, locator: &Locator<'_>) -> Self { assert!(TextSize::try_from(locator.contents().len()).is_ok()); - let mut comment_ranges_builder = CommentRangesBuilder::default(); let mut fstring_ranges_builder = FStringRangesBuilder::default(); let mut multiline_ranges_builder = MultilineRangesBuilder::default(); let mut continuation_lines = Vec::new(); @@ -39,8 +32,8 @@ impl Indexer { let mut prev_end = TextSize::default(); let mut line_start = TextSize::default(); - for (tok, range) in tokens.iter().flatten() { - let trivia = locator.slice(TextRange::new(prev_end, range.start())); + for token in tokens.up_to_first_unknown() { + let trivia = locator.slice(TextRange::new(prev_end, token.start())); // Get the trivia between the previous and the current token and detect any newlines. // This is necessary because `RustPython` doesn't emit `[Tok::Newline]` tokens @@ -59,38 +52,31 @@ impl Indexer { } } - comment_ranges_builder.visit_token(tok, *range); - fstring_ranges_builder.visit_token(tok, *range); - multiline_ranges_builder.visit_token(tok, *range); + fstring_ranges_builder.visit_token(token); + multiline_ranges_builder.visit_token(token); - match tok { - Tok::Newline | Tok::NonLogicalNewline => { - line_start = range.end(); + match token.kind() { + TokenKind::Newline | TokenKind::NonLogicalNewline => { + line_start = token.end(); } - Tok::String { .. } => { + TokenKind::String => { // If the previous token was a string, find the start of the line that contains // the closing delimiter, since the token itself can span multiple lines. - line_start = locator.line_start(range.end()); + line_start = locator.line_start(token.end()); } _ => {} } - prev_end = range.end(); + prev_end = token.end(); } Self { - comment_ranges: comment_ranges_builder.finish(), continuation_lines, fstring_ranges: fstring_ranges_builder.finish(), multiline_ranges: multiline_ranges_builder.finish(), } } - /// Returns the byte offset ranges of comments - pub const fn comment_ranges(&self) -> &CommentRanges { - &self.comment_ranges - } - /// Returns the byte offset ranges of f-strings. pub const fn fstring_ranges(&self) -> &FStringRanges { &self.fstring_ranges @@ -225,19 +211,22 @@ impl Indexer { #[cfg(test)] mod tests { - use ruff_python_parser::lexer::LexResult; - use ruff_python_parser::{lexer, Mode}; + use ruff_python_parser::parse_module; use ruff_source_file::Locator; use ruff_text_size::{TextRange, TextSize}; use crate::Indexer; + fn new_indexer(contents: &str) -> Indexer { + let program = parse_module(contents).unwrap(); + let locator = Locator::new(contents); + Indexer::from_tokens(program.tokens(), &locator) + } + #[test] fn continuation() { let contents = r"x = 1"; - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(&lxr, &Locator::new(contents)); - assert_eq!(indexer.continuation_line_starts(), &[]); + assert_eq!(new_indexer(contents).continuation_line_starts(), &[]); let contents = r" # Hello, world! @@ -248,9 +237,7 @@ y = 2 " .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(&lxr, &Locator::new(contents)); - assert_eq!(indexer.continuation_line_starts(), &[]); + assert_eq!(new_indexer(contents).continuation_line_starts(), &[]); let contents = r#" x = \ @@ -268,10 +255,8 @@ if True: ) "# .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); assert_eq!( - indexer.continuation_line_starts(), + new_indexer(contents).continuation_line_starts(), [ // row 1 TextSize::from(0), @@ -300,10 +285,8 @@ x = 1; \ import os " .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); assert_eq!( - indexer.continuation_line_starts(), + new_indexer(contents).continuation_line_starts(), [ // row 9 TextSize::from(84), @@ -323,10 +306,8 @@ f'foo { 'str1' \ }' " .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); assert_eq!( - indexer.continuation_line_starts(), + new_indexer(contents).continuation_line_starts(), [ // row 1 TextSize::new(0), @@ -348,10 +329,8 @@ x = ( + 2) " .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); assert_eq!( - indexer.continuation_line_starts(), + new_indexer(contents).continuation_line_starts(), [ // row 3 TextSize::new(12), @@ -373,10 +352,8 @@ f"start {f"inner {f"another"}"} end" f"implicit " f"concatenation" "# .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); assert_eq!( - indexer + new_indexer(contents) .fstring_ranges() .values() .copied() @@ -409,10 +386,8 @@ f-string"""} """ "# .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); assert_eq!( - indexer + new_indexer(contents) .fstring_ranges() .values() .copied() @@ -447,8 +422,7 @@ f-string"""} the end""" "# .trim(); - let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); - let indexer = Indexer::from_tokens(lxr.as_slice(), &Locator::new(contents)); + let indexer = new_indexer(contents); // For reference, the ranges of the f-strings in the above code are as // follows where the ones inside parentheses are nested f-strings: diff --git a/crates/ruff_python_index/src/lib.rs b/crates/ruff_python_index/src/lib.rs index 7f1117de97a9d..aabdef1d482ba 100644 --- a/crates/ruff_python_index/src/lib.rs +++ b/crates/ruff_python_index/src/lib.rs @@ -1,7 +1,5 @@ -mod comment_ranges; mod fstring_ranges; mod indexer; mod multiline_ranges; -pub use comment_ranges::CommentRangesBuilder; pub use indexer::Indexer; diff --git a/crates/ruff_python_index/src/multiline_ranges.rs b/crates/ruff_python_index/src/multiline_ranges.rs index 8043929aa9e6b..75fc5d90b7996 100644 --- a/crates/ruff_python_index/src/multiline_ranges.rs +++ b/crates/ruff_python_index/src/multiline_ranges.rs @@ -1,6 +1,5 @@ -use ruff_python_ast::StringFlags; -use ruff_python_parser::Tok; -use ruff_text_size::TextRange; +use ruff_python_parser::{Token, TokenKind}; +use ruff_text_size::{Ranged, TextRange}; /// Stores the range of all multiline strings in a file sorted by /// [`TextRange::start`]. @@ -46,10 +45,10 @@ pub(crate) struct MultilineRangesBuilder { } impl MultilineRangesBuilder { - pub(crate) fn visit_token(&mut self, token: &Tok, range: TextRange) { - if let Tok::String { flags, .. } | Tok::FStringMiddle { flags, .. } = token { - if flags.is_triple_quoted() { - self.ranges.push(range); + pub(crate) fn visit_token(&mut self, token: &Token) { + if matches!(token.kind(), TokenKind::String | TokenKind::FStringStart) { + if token.is_triple_quoted_string() { + self.ranges.push(token.range()); } } } diff --git a/crates/ruff_python_parser/src/lexer.rs b/crates/ruff_python_parser/src/lexer.rs index d7b825e38457f..b70d2f2639d3d 100644 --- a/crates/ruff_python_parser/src/lexer.rs +++ b/crates/ruff_python_parser/src/lexer.rs @@ -1500,6 +1500,37 @@ impl Token { pub const fn is_trivia(self) -> bool { matches!(self.kind, TokenKind::Comment | TokenKind::NonLogicalNewline) } + + /// Returns `true` if this is any kind of string token. + const fn is_any_string(self) -> bool { + matches!( + self.kind, + TokenKind::String + | TokenKind::FStringStart + | TokenKind::FStringMiddle + | TokenKind::FStringEnd + ) + } + + /// Returns `true` if the current token is a triple-quoted string of any kind. + /// + /// # Panics + /// + /// If it isn't a string or any f-string tokens. + pub fn is_triple_quoted_string(self) -> bool { + assert!(self.is_any_string()); + self.flags.is_triple_quoted() + } + + /// Returns the [`Quote`] style for the current string token of any kind. + /// + /// # Panics + /// + /// If it isn't a string or any f-string tokens. + pub fn string_quote_style(self) -> Quote { + assert!(self.is_any_string()); + self.flags.quote_style() + } } impl Ranged for Token { diff --git a/crates/ruff_python_trivia_integration_tests/tests/block_comments.rs b/crates/ruff_python_trivia_integration_tests/tests/block_comments.rs index fe6cc47ac9d7d..66ae74200f9a8 100644 --- a/crates/ruff_python_trivia_integration_tests/tests/block_comments.rs +++ b/crates/ruff_python_trivia_integration_tests/tests/block_comments.rs @@ -1,5 +1,5 @@ use ruff_python_index::Indexer; -use ruff_python_parser::{tokenize, Mode}; +use ruff_python_parser::{parse_module, tokenize, Mode}; use ruff_source_file::Locator; use ruff_text_size::TextSize; @@ -7,12 +7,11 @@ use ruff_text_size::TextSize; fn block_comments_two_line_block_at_start() { // arrange let source = "# line 1\n# line 2\n"; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!(block_comments, vec![TextSize::new(0), TextSize::new(9)]); @@ -22,12 +21,11 @@ fn block_comments_two_line_block_at_start() { fn block_comments_indented_block() { // arrange let source = " # line 1\n # line 2\n"; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!(block_comments, vec![TextSize::new(4), TextSize::new(17)]); @@ -37,12 +35,11 @@ fn block_comments_indented_block() { fn block_comments_single_line_is_not_a_block() { // arrange let source = "\n"; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!(block_comments, Vec::::new()); @@ -52,12 +49,11 @@ fn block_comments_single_line_is_not_a_block() { fn block_comments_lines_with_code_not_a_block() { // arrange let source = "x = 1 # line 1\ny = 2 # line 2\n"; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!(block_comments, Vec::::new()); @@ -67,12 +63,11 @@ fn block_comments_lines_with_code_not_a_block() { fn block_comments_sequential_lines_not_in_block() { // arrange let source = " # line 1\n # line 2\n"; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!(block_comments, Vec::::new()); @@ -87,12 +82,11 @@ fn block_comments_lines_in_triple_quotes_not_a_block() { # line 2 """ "#; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!(block_comments, Vec::::new()); @@ -124,12 +118,11 @@ y = 2 # do not form a block comment # therefore do not form a block comment """ "#; - let tokens = tokenize(source, Mode::Module); + let program = parse_module(source).unwrap(); let locator = Locator::new(source); - let indexer = Indexer::from_tokens(&tokens, &locator); // act - let block_comments = indexer.comment_ranges().block_comments(&locator); + let block_comments = program.comment_ranges().block_comments(&locator); // assert assert_eq!( diff --git a/crates/ruff_server/src/lint.rs b/crates/ruff_server/src/lint.rs index 21acf21359e7c..6d408b0c51cb5 100644 --- a/crates/ruff_server/src/lint.rs +++ b/crates/ruff_server/src/lint.rs @@ -105,10 +105,10 @@ pub(crate) fn check(query: &DocumentQuery, encoding: PositionEncoding) -> Diagno let locator = Locator::with_index(source_kind.source_code(), index.clone()); // Detect the current code style (lazily). - let stylist = Stylist::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); // Extra indices from the code. - let indexer = Indexer::from_tokens(&program, &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); // Extract the `# noqa` and `# isort: skip` directives from the source. let directives = extract_directives(&program, Flags::all(), &locator, &indexer); diff --git a/crates/ruff_wasm/src/lib.rs b/crates/ruff_wasm/src/lib.rs index 4de2e1978c524..cd3afd2696040 100644 --- a/crates/ruff_wasm/src/lib.rs +++ b/crates/ruff_wasm/src/lib.rs @@ -17,7 +17,7 @@ use ruff_python_ast::{Mod, PySourceType}; use ruff_python_codegen::Stylist; use ruff_python_formatter::{format_module_ast, pretty_comments, PyFormatContext, QuoteStyle}; use ruff_python_index::Indexer; -use ruff_python_parser::{parse, AsMode, Mode, Program}; +use ruff_python_parser::{parse, parse_unchecked, parse_unchecked_source, AsMode, Mode, Program}; use ruff_python_trivia::CommentRanges; use ruff_source_file::{Locator, SourceLocation}; use ruff_text_size::Ranged; @@ -161,17 +161,16 @@ impl Workspace { let source_kind = SourceKind::Python(contents.to_string()); // Parse once. - let program = - ruff_python_parser::parse_unchecked_source(source_kind.source_code(), source_type); + let program = parse_unchecked_source(source_kind.source_code(), source_type); // Map row and column locations to byte slices (lazily). let locator = Locator::new(contents); // Detect the current code style (lazily). - let stylist = Stylist::from_tokens(&program, &locator); + let stylist = Stylist::from_tokens(program.tokens(), &locator); // Extra indices from the code. - let indexer = Indexer::from_tokens(&program, &locator); + let indexer = Indexer::from_tokens(program.tokens(), &locator); // Extract the `# noqa` and `# isort: skip` directives from the source. let directives = directives::extract_directives( @@ -257,9 +256,9 @@ impl Workspace { /// Parses the content and returns its AST pub fn parse(&self, contents: &str) -> Result { - let program = Program::parse(contents, Mode::Module); + let program = parse_unchecked(contents, Mode::Module); - Ok(format!("{:#?}", program.into_ast())) + Ok(format!("{:#?}", program.into_syntax())) } pub fn tokens(&self, contents: &str) -> Result {