Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🔧 Move linting from flake8 to ruff #268

Merged
merged 1 commit into from
May 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,13 @@ repos:
hooks:
- id: black

- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.270
hooks:
- id: flake8
additional_dependencies: [flake8-bugbear~=22.7]
- id: ruff

- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.2.0
rev: v1.3.0
hooks:
- id: mypy
additional_dependencies: [mdurl]
Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def run_apidoc(app):
shutil.rmtree(api_folder)
os.mkdir(api_folder)

argv = ["-M", "--separate", "-o", api_folder, module_path] + ignore_paths
argv = ["-M", "--separate", "-o", api_folder, module_path, *ignore_paths]

apidoc.OPTIONS.append("ignore-module-all")
apidoc.main(argv)
Expand Down
47 changes: 23 additions & 24 deletions markdown_it/common/normalize_url.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

from collections.abc import Callable
from contextlib import suppress
import re
from urllib.parse import quote, unquote, urlparse, urlunparse # noqa: F401

Expand All @@ -21,18 +22,17 @@ def normalizeLink(url: str) -> str:
"""
parsed = mdurl.parse(url, slashes_denote_host=True)

if parsed.hostname:
# Encode hostnames in urls like:
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
#
# We don't encode unknown schemas, because it's likely that we encode
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
#
if not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR:
try:
parsed = parsed._replace(hostname=_punycode.to_ascii(parsed.hostname))
except Exception:
pass
# Encode hostnames in urls like:
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
#
# We don't encode unknown schemas, because it's likely that we encode
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
#
if parsed.hostname and (
not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR
):
with suppress(Exception):
parsed = parsed._replace(hostname=_punycode.to_ascii(parsed.hostname))

return mdurl.encode(mdurl.format(parsed))

Expand All @@ -47,18 +47,17 @@ def normalizeLinkText(url: str) -> str:
"""
parsed = mdurl.parse(url, slashes_denote_host=True)

if parsed.hostname:
# Encode hostnames in urls like:
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
#
# We don't encode unknown schemas, because it's likely that we encode
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
#
if not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR:
try:
parsed = parsed._replace(hostname=_punycode.to_unicode(parsed.hostname))
except Exception:
pass
# Encode hostnames in urls like:
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
#
# We don't encode unknown schemas, because it's likely that we encode
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
#
if parsed.hostname and (
not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR
):
with suppress(Exception):
parsed = parsed._replace(hostname=_punycode.to_unicode(parsed.hostname))

# add '%' to exclude list because of https://github.com/markdown-it/markdown-it/issues/720
return mdurl.decode(mdurl.format(parsed), mdurl.DECODE_DEFAULT_CHARS + "%")
Expand Down
10 changes: 5 additions & 5 deletions markdown_it/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
from contextlib import contextmanager
from typing import Any, Literal, overload

from . import helpers, presets # noqa F401
from .common import normalize_url, utils # noqa F401
from .parser_block import ParserBlock # noqa F401
from .parser_core import ParserCore # noqa F401
from .parser_inline import ParserInline # noqa F401
from . import helpers, presets
from .common import normalize_url, utils
from .parser_block import ParserBlock
from .parser_core import ParserCore
from .parser_inline import ParserInline
from .renderer import RendererHTML, RendererProtocol
from .rules_core.state_core import StateCore
from .token import Token
Expand Down
2 changes: 1 addition & 1 deletion markdown_it/presets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
js_default = default


class gfm_like:
class gfm_like: # noqa: N801
"""GitHub Flavoured Markdown (GFM) like.

This adds the linkify, table and strikethrough components to CommmonMark.
Expand Down
25 changes: 12 additions & 13 deletions markdown_it/renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,19 +152,18 @@ def renderToken(
if token.block:
needLf = True

if token.nesting == 1:
if idx + 1 < len(tokens):
nextToken = tokens[idx + 1]

if nextToken.type == "inline" or nextToken.hidden:
# Block-level tag containing an inline tag.
#
needLf = False

elif nextToken.nesting == -1 and nextToken.tag == token.tag:
# Opening tag + closing tag of the same type. E.g. `<li></li>`.
#
needLf = False
if token.nesting == 1 and (idx + 1 < len(tokens)):
nextToken = tokens[idx + 1]

if nextToken.type == "inline" or nextToken.hidden: # noqa: SIM114
# Block-level tag containing an inline tag.
#
needLf = False

elif nextToken.nesting == -1 and nextToken.tag == token.tag:
# Opening tag + closing tag of the same type. E.g. `<li></li>`.
#
needLf = False

result += ">\n" if needLf else ">"

Expand Down
2 changes: 1 addition & 1 deletion markdown_it/ruler.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class Ruler


class StateBase:
srcCharCode: tuple[int, ...]
srcCharCode: tuple[int, ...] # noqa: N815

def __init__(self, src: str, md: MarkdownIt, env: EnvType):
self.src = src
Expand Down
5 changes: 2 additions & 3 deletions markdown_it/rules_block/fence.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,8 @@ def fence(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool
params = state.src[pos:maximum]

# /* ` */
if marker == 0x60:
if chr(marker) in params:
return False
if marker == 0x60 and chr(marker) in params:
return False

# Since start is found, we can report success here in validation mode
if silent:
Expand Down
34 changes: 18 additions & 16 deletions markdown_it/rules_block/list.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,14 +120,17 @@ def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) ->

# limit conditions when list can interrupt
# a paragraph (validation mode only)
if silent and state.parentType == "paragraph":
# Next list item should still terminate previous list item
#
# This code can fail if plugins use blkIndent as well as lists,
# but I hope the spec gets fixed long before that happens.
#
if state.tShift[startLine] >= state.blkIndent:
isTerminatingParagraph = True
# Next list item should still terminate previous list item
#
# This code can fail if plugins use blkIndent as well as lists,
# but I hope the spec gets fixed long before that happens.
#
if (
silent
and state.parentType == "paragraph"
and state.tShift[startLine] >= state.blkIndent
):
isTerminatingParagraph = True

# Detect list type and position after marker
posAfterMarker = skipOrderedListMarker(state, startLine)
Expand All @@ -149,9 +152,11 @@ def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) ->

# If we're starting a new unordered list right after
# a paragraph, first line should not be empty.
if isTerminatingParagraph:
if state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]:
return False
if (
isTerminatingParagraph
and state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]
):
return False

# We should terminate list on style change. Remember first one to compare.
markerCharCode = state.srcCharCode[posAfterMarker - 1]
Expand Down Expand Up @@ -209,11 +214,8 @@ def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) ->

contentStart = pos

if contentStart >= maximum:
# trimming space in "- \n 3" case, indent is 1 here
indentAfterMarker = 1
else:
indentAfterMarker = offset - initial
# trimming space in "- \n 3" case, indent is 1 here
indentAfterMarker = 1 if contentStart >= maximum else offset - initial

# If we have more than 4 spaces, the indent is 1
# (the rest is just indented code block)
Expand Down
23 changes: 11 additions & 12 deletions markdown_it/rules_block/reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,18 +153,17 @@ def reference(state: StateBlock, startLine: int, _endLine: int, silent: bool) ->
break
pos += 1

if pos < maximum and charCodeAt(string, pos) != 0x0A:
if title:
# garbage at the end of the line after title,
# but it could still be a valid reference if we roll back
title = ""
pos = destEndPos
lines = destEndLineNo
while pos < maximum:
ch = charCodeAt(string, pos)
if not isSpace(ch):
break
pos += 1
if pos < maximum and charCodeAt(string, pos) != 0x0A and title:
# garbage at the end of the line after title,
# but it could still be a valid reference if we roll back
title = ""
pos = destEndPos
lines = destEndLineNo
while pos < maximum:
ch = charCodeAt(string, pos)
if not isSpace(ch):
break
pos += 1

if pos < maximum and charCodeAt(string, pos) != 0x0A:
# garbage at the end of the line
Expand Down
9 changes: 5 additions & 4 deletions markdown_it/rules_block/state_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,10 +202,11 @@ def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str:
while line < end:
lineIndent = 0
lineStart = first = self.bMarks[line]
if line + 1 < end or keepLastLF:
last = self.eMarks[line] + 1
else:
last = self.eMarks[line]
last = (
self.eMarks[line] + 1
if line + 1 < end or keepLastLF
else self.eMarks[line]
)

while (first < last) and (lineIndent < indent):
ch = self.srcCharCode[first]
Expand Down
47 changes: 24 additions & 23 deletions markdown_it/rules_core/replacements.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,29 +78,30 @@ def replace_rare(inlineTokens: list[Token]) -> None:
inside_autolink = 0

for token in inlineTokens:
if token.type == "text" and not inside_autolink:
if RARE_RE.search(token.content):
# +- -> ±
token.content = PLUS_MINUS_RE.sub("±", token.content)

# .., ..., ....... -> …
token.content = ELLIPSIS_RE.sub("…", token.content)

# but ?..... & !..... -> ?.. & !..
token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub(
"\\1..", token.content
)
token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content)

# ,, ,,, ,,,, -> ,
token.content = COMMA_RE.sub(",", token.content)

# em-dash
token.content = EM_DASH_RE.sub("\\1\u2014", token.content)

# en-dash
token.content = EN_DASH_RE.sub("\\1\u2013", token.content)
token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content)
if (
token.type == "text"
and (not inside_autolink)
and RARE_RE.search(token.content)
):
# +- -> ±
token.content = PLUS_MINUS_RE.sub("±", token.content)

# .., ..., ....... -> …
token.content = ELLIPSIS_RE.sub("…", token.content)

# but ?..... & !..... -> ?.. & !..
token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub("\\1..", token.content)
token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content)

# ,, ,,, ,,,, -> ,
token.content = COMMA_RE.sub(",", token.content)

# em-dash
token.content = EM_DASH_RE.sub("\\1\u2014", token.content)

# en-dash
token.content = EN_DASH_RE.sub("\\1\u2013", token.content)
token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content)

if token.type == "link_open" and token.info == "auto":
inside_autolink -= 1
Expand Down
16 changes: 7 additions & 9 deletions markdown_it/rules_core/smartquotes.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,19 +100,17 @@ def process_inlines(tokens: list[Token], state: StateCore) -> None:
isLastWhiteSpace = isWhiteSpace(lastChar)
isNextWhiteSpace = isWhiteSpace(nextChar)

if isNextWhiteSpace:
if isNextWhiteSpace: # noqa: SIM114
canOpen = False
elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar):
canOpen = False
elif isNextPunctChar:
if not (isLastWhiteSpace or isLastPunctChar):
canOpen = False

if isLastWhiteSpace:
if isLastWhiteSpace: # noqa: SIM114
canClose = False
elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar):
canClose = False
elif isLastPunctChar:
if not (isNextWhiteSpace or isNextPunctChar):
canClose = False

if nextChar == 0x22 and t.group(0) == '"': # 0x22: "
if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102
if lastChar >= 0x30 and lastChar <= 0x39: # 0x30: 0, 0x39: 9
# special case: 1"" - count first quote as an inch
canClose = canOpen = False
Expand Down
10 changes: 6 additions & 4 deletions markdown_it/rules_inline/balance_pairs.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,12 @@ def processDelimiters(state: StateInline, delimiters: list[Delimiter]) -> None:
# closing delimiters must not be a multiple of 3 unless both lengths
# are multiples of 3.
#
if opener.close or closer.open:
if (opener.length + closer.length) % 3 == 0:
if opener.length % 3 != 0 or closer.length % 3 != 0:
isOddMatch = True
if (
(opener.close or closer.open)
and ((opener.length + closer.length) % 3 == 0)
and (opener.length % 3 != 0 or closer.length % 3 != 0)
):
isOddMatch = True

if not isOddMatch:
# If previous delimiter cannot be an opener, we can safely skip
Expand Down
11 changes: 5 additions & 6 deletions markdown_it/rules_inline/entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,11 @@ def entity(state: StateInline, silent: bool) -> bool:

else:
match = NAMED_RE.search(state.src[pos:])
if match:
if match.group(1) in entities:
if not silent:
state.pending += entities[match.group(1)]
state.pos += len(match.group(0))
return True
if match and match.group(1) in entities:
if not silent:
state.pending += entities[match.group(1)]
state.pos += len(match.group(0))
return True

if not silent:
state.pending += "&"
Expand Down
Loading