Skip to content

Commit

Permalink
Address Deepsource suggestions
Browse files Browse the repository at this point in the history
  • Loading branch information
jmanuel1 committed Nov 9, 2024
1 parent cb6670f commit 20e4ede
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 10 deletions.
4 changes: 3 additions & 1 deletion concat/error_reporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@ def create_parsing_failure_message(
) -> str:
if failure.furthest_index < len(stream):
location = stream[failure.furthest_index].start
else:
elif stream:
location = stream[-1].start
else:
location = (1, 0)
line = get_line_at(file, location)
message = f'Expected {failure.expected} at line {location[0]}, column {location[1] + 1}:\n{line.rstrip()}\n{" " * location[1] + "^"}'
if failure.children:
Expand Down
31 changes: 23 additions & 8 deletions concat/lex.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ def input(self, data: str, should_preserve_comments: bool = False) -> None:
)
self.lineno = 1
self.lexpos = 0
self._concat_token_iterator = self._tokens_glued(self._tokens())
self._concat_token_iterator = self._tokens_filtering_nl_and_comments(
self._tokens_glued(self._tokens())
)
self._should_preserve_comments = should_preserve_comments

def token(self) -> Optional[Result]:
Expand Down Expand Up @@ -134,6 +136,18 @@ def _tokens_glued(self, tokens: Iterator[Result]) -> Iterator[Result]:
self._update_position(glued_token_prefix)
yield TokenResult(glued_token_prefix)

def _tokens_filtering_nl_and_comments(
self, tokens: Iterator[Result]
) -> Iterator[Result]:
for r in tokens:
if r.type != 'token' or r.token.type not in ['NL', 'COMMENT']:
yield r
continue
tok = r.token
self._update_position(tok)
if self._should_preserve_comments and tok.type == 'COMMENT':
yield r

def _tokens(self) -> Iterator[Result]:
for token_or_error in self.tokens:
if isinstance(
Expand All @@ -144,15 +158,10 @@ def _tokens(self) -> Iterator[Result]:
tok = Token()
_, tok.value, tok.start, tok.end, _ = token_or_error
tok.type = token.tok_name[token_or_error.exact_type]
if tok.type in {'NL', 'COMMENT'}:
self._update_position(tok)
if self._should_preserve_comments and tok.type == 'COMMENT':
yield TokenResult(tok)
continue
elif tok.type == 'ERRORTOKEN' and tok.value == ' ':
if tok.type == 'ERRORTOKEN' and tok.value == ' ':
self._update_position(tok)
continue
elif tok.value in {'def', 'import', 'from', 'as', 'class', 'cast'}:
if tok.value in {'def', 'import', 'from', 'as', 'class', 'cast'}:
tok.type = tok.value.upper()
tok.is_keyword = True
elif tok.value == '$':
Expand Down Expand Up @@ -208,6 +217,8 @@ def __is_bytes_literal(self, literal: str) -> bool:

@dataclasses.dataclass
class TokenResult:
"""Result class for successfully generated tokens."""

type: Literal['token']
token: Token

Expand All @@ -218,6 +229,8 @@ def __init__(self, token: Token) -> None:

@dataclasses.dataclass
class IndentationErrorResult:
"""Result class for IndentationErrors raised by the Python tokenizer."""

type: Literal['indent-err']
err: IndentationError

Expand All @@ -228,6 +241,8 @@ def __init__(self, err: IndentationError) -> None:

@dataclasses.dataclass
class TokenErrorResult:
"""Result class for TokenErrors raised by the Python tokenizer."""

type: Literal['token-err']
err: py_tokenize.TokenError
location: Location
Expand Down
3 changes: 2 additions & 1 deletion concat/tests/test_lex.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def test_examples(self) -> None:
for actual_token, expected_token in expectationPairs:
self.assertEqual(actual_token, expected_token)

def test_indentation_error(self) -> None:
@staticmethod
def test_indentation_error() -> None:
code = textwrap.dedent("""\
def remove_stack_polymorphism(
f:forall `t *s. (*s i:`t -- *s) -- g:forall `t. (i:`t -- )
Expand Down

0 comments on commit 20e4ede

Please sign in to comment.