Skip to content

Commit

Permalink
Update Stylist, Indexer to use tokens from parsed
Browse files Browse the repository at this point in the history
  • Loading branch information
dhruvmanila committed May 29, 2024
1 parent c0054de commit c48a5a1
Show file tree
Hide file tree
Showing 19 changed files with 247 additions and 349 deletions.
10 changes: 5 additions & 5 deletions crates/ruff_linter/src/checkers/physical_lines.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,7 @@ pub(crate) fn check_physical_lines(
mod tests {
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_parser::lexer::lex;
use ruff_python_parser::Mode;
use ruff_python_parser::parse_module;
use ruff_source_file::Locator;

use crate::line_width::LineLength;
Expand All @@ -107,15 +106,16 @@ mod tests {
fn e501_non_ascii_char() {
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
let locator = Locator::new(line);
let tokens: Vec<_> = lex(line, Mode::Module).collect();
let indexer = Indexer::from_tokens(&tokens, &locator);
let stylist = Stylist::from_tokens(&tokens, &locator);
let program = parse_module(line).unwrap();
let indexer = Indexer::from_program(&program, &locator);
let stylist = Stylist::from_program(&program, &locator);

let check_with_max_line_length = |line_length: LineLength| {
check_physical_lines(
&locator,
&stylist,
&indexer,
program.comment_ranges(),
&[],
&LinterSettings {
pycodestyle: pycodestyle::settings::Settings {
Expand Down
97 changes: 30 additions & 67 deletions crates/ruff_linter/src/directives.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use std::str::FromStr;
use bitflags::bitflags;
use ruff_python_ast::{ModModule, StringFlags};
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{Program, Tok};
use ruff_python_parser::{Program, Tok, TokenKind, Tokens};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};

Expand Down Expand Up @@ -122,22 +122,22 @@ where
}

/// Extract a mapping from logical line to noqa line.
fn extract_noqa_line_for(lxr: &[LexResult], locator: &Locator, indexer: &Indexer) -> NoqaMapping {
fn extract_noqa_line_for(tokens: &Tokens, locator: &Locator, indexer: &Indexer) -> NoqaMapping {
let mut string_mappings = Vec::new();

for (tok, range) in lxr.iter().flatten() {
match tok {
Tok::EndOfFile => {
for token in tokens.up_to_first_unknown() {
match token.kind() {
TokenKind::EndOfFile => {
break;
}

// For multi-line strings, we expect `noqa` directives on the last line of the
// string.
Tok::String { flags, .. } if flags.is_triple_quoted() => {
if locator.contains_line_break(*range) {
TokenKind::String if token.is_triple_quoted_string() => {
if locator.contains_line_break(token.range()) {
string_mappings.push(TextRange::new(
locator.line_start(range.start()),
range.end(),
locator.line_start(token.start()),
token.end(),
));
}
}
Expand Down Expand Up @@ -397,7 +397,7 @@ impl TodoDirectiveKind {
#[cfg(test)]
mod tests {
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{lexer, Mode};
use ruff_python_parser::{lexer, parse_module, Mode};
use ruff_text_size::{TextLen, TextRange, TextSize};

use ruff_python_index::Indexer;
Expand All @@ -408,12 +408,14 @@ mod tests {
};
use crate::noqa::NoqaMapping;

use super::IsortDirectives;

fn noqa_mappings(contents: &str) -> NoqaMapping {
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let program = parse_module(contents).unwrap();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
let indexer = Indexer::from_program(&program, &locator);

extract_noqa_line_for(&lxr, &locator, &indexer)
extract_noqa_line_for(program.tokens(), &locator, &indexer)
}

#[test]
Expand Down Expand Up @@ -583,29 +585,26 @@ assert foo, \
);
}

fn isort_directives(contents: &str) -> IsortDirectives {
let program = parse_module(contents).unwrap();
let locator = Locator::new(contents);
extract_isort_directives(&locator, program.comment_ranges())
}

#[test]
fn isort_exclusions() {
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).exclusions,
Vec::default()
);
assert_eq!(isort_directives(contents).exclusions, Vec::default());

let contents = "# isort: off
x = 1
y = 2
# isort: on
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).exclusions,
isort_directives(contents).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(25))])
);

Expand All @@ -616,88 +615,52 @@ y = 2
# isort: on
z = x + 1
# isort: on";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).exclusions,
isort_directives(contents).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(38))])
);

let contents = "# isort: off
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).exclusions,
isort_directives(contents).exclusions,
Vec::from_iter([TextRange::at(TextSize::from(0), contents.text_len())])
);

let contents = "# isort: skip_file
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).exclusions,
Vec::default()
);
assert_eq!(isort_directives(contents).exclusions, Vec::default());

let contents = "# isort: off
x = 1
# isort: on
y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).exclusions,
Vec::default()
);
assert_eq!(isort_directives(contents).exclusions, Vec::default());
}

#[test]
fn isort_splits() {
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).splits,
Vec::new()
);
assert_eq!(isort_directives(contents).splits, Vec::new());

let contents = "x = 1
y = 2
# isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).splits,
vec![TextSize::from(12)]
);
assert_eq!(isort_directives(contents).splits, vec![TextSize::from(12)]);

let contents = "x = 1
y = 2 # isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
assert_eq!(
extract_isort_directives(&locator, &indexer).splits,
vec![TextSize::from(13)]
);
assert_eq!(isort_directives(contents).splits, vec![TextSize::from(13)]);
}

#[test]
Expand Down
19 changes: 10 additions & 9 deletions crates/ruff_linter/src/doc_lines.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,29 @@
//! standalone comment or a constant string statement.

use std::iter::FusedIterator;
use std::slice::Iter;

use ruff_python_ast::{self as ast, Stmt, Suite};
use ruff_python_parser::{TokenKind, TokenKindIter};
use ruff_python_parser::{Token, TokenKind, Tokens};
use ruff_text_size::{Ranged, TextSize};

use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor};
use ruff_source_file::{Locator, UniversalNewlineIterator};

/// Extract doc lines (standalone comments) from a token sequence.
pub(crate) fn doc_lines_from_tokens(tokens: TokenKindIter) -> DocLines {
pub(crate) fn doc_lines_from_tokens(tokens: &Tokens) -> DocLines {
DocLines::new(tokens)
}

pub(crate) struct DocLines<'a> {
inner: TokenKindIter<'a>,
inner: Iter<'a, Token>,
prev: TextSize,
}

impl<'a> DocLines<'a> {
fn new(tokens: TokenKindIter<'a>) -> Self {
fn new(tokens: &'a Tokens) -> Self {
Self {
inner: tokens,
inner: tokens.up_to_first_unknown().iter(),
prev: TextSize::default(),
}
}
Expand All @@ -35,12 +36,12 @@ impl Iterator for DocLines<'_> {
fn next(&mut self) -> Option<Self::Item> {
let mut at_start_of_line = true;
loop {
let (tok, range) = self.inner.next()?;
let token = self.inner.next()?;

match tok {
match token.kind() {
TokenKind::Comment => {
if at_start_of_line {
break Some(range.start());
break Some(token.start());
}
}
TokenKind::Newline | TokenKind::NonLogicalNewline => {
Expand All @@ -54,7 +55,7 @@ impl Iterator for DocLines<'_> {
}
}

self.prev = range.end();
self.prev = token.end();
}
}
}
Expand Down
10 changes: 3 additions & 7 deletions crates/ruff_linter/src/fix/edits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -663,13 +663,9 @@ x = 1 \
fn add_to_dunder_all_test(raw: &str, names: &[&str], expect: &str) -> Result<()> {
let locator = Locator::new(raw);
let edits = {
let expr = parse_expression(raw)?.expr();
let stylist = Stylist::from_tokens(
&lexer::lex(raw, Mode::Expression).collect::<Vec<_>>(),
&locator,
);
// SUT
add_to_dunder_all(names.iter().copied(), expr, &stylist)
let program = parse_expression(raw)?;
let stylist = Stylist::from_program(&program, &locator);
add_to_dunder_all(names.iter().copied(), program.expr(), &stylist)
};
let diag = {
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
Expand Down
15 changes: 9 additions & 6 deletions crates/ruff_linter/src/importer/insertion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,11 +328,14 @@ mod tests {
#[test]
fn start_of_file() -> Result<()> {
fn insert(contents: &str) -> Result<Insertion> {
let suite = parse_module(contents)?.into_suite();
let tokens = ruff_python_parser::tokenize(contents, Mode::Module);
let program = parse_module(contents)?;
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(&tokens, &locator);
Ok(Insertion::start_of_file(&suite, &locator, &stylist))
let stylist = Stylist::from_program(&program, &locator);
Ok(Insertion::start_of_file(
program.suite(),
&locator,
&stylist,
))
}

let contents = "";
Expand Down Expand Up @@ -440,9 +443,9 @@ x = 1
#[test]
fn start_of_block() {
fn insert(contents: &str, offset: TextSize) -> Insertion {
let program = ruff_python_parser::parse_module(contents).unwrap();
let program = parse_module(contents).unwrap();
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(&program, &locator);
let stylist = Stylist::from_program(&program, &locator);
Insertion::start_of_block(offset, &locator, &stylist, program.tokens())
}

Expand Down
Loading

0 comments on commit c48a5a1

Please sign in to comment.