diff --git a/.ruff.toml b/.ruff.toml index a511d1c242d..11edee4b8f9 100644 --- a/.ruff.toml +++ b/.ruff.toml @@ -470,8 +470,6 @@ exclude = [ "sphinx/ext/inheritance_diagram.py", "sphinx/ext/linkcode.py", "sphinx/ext/mathjax.py", - "sphinx/ext/napoleon/__init__.py", - "sphinx/ext/napoleon/docstring.py", "sphinx/ext/todo.py", "sphinx/ext/viewcode.py", "sphinx/registry.py", diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py index 581f3ea4442..085ce23fb98 100644 --- a/sphinx/ext/napoleon/__init__.py +++ b/sphinx/ext/napoleon/__init__.py @@ -333,19 +333,26 @@ def setup(app: Sphinx) -> ExtensionMetadata: def _patch_python_domain() -> None: from sphinx.domains.python._object import PyObject, PyTypedField from sphinx.locale import _ + for doc_field in PyObject.doc_field_types: if doc_field.name == 'parameter': doc_field.names = ('param', 'parameter', 'arg', 'argument') break PyObject.doc_field_types.append( - PyTypedField('keyword', label=_('Keyword Arguments'), - names=('keyword', 'kwarg', 'kwparam'), - typerolename='class', typenames=('paramtype', 'kwtype'), - can_collapse=True)) - - -def _process_docstring(app: Sphinx, what: str, name: str, obj: Any, - options: Any, lines: list[str]) -> None: + PyTypedField( + 'keyword', + label=_('Keyword Arguments'), + names=('keyword', 'kwarg', 'kwparam'), + typerolename='class', + typenames=('paramtype', 'kwtype'), + can_collapse=True, + ) + ) + + +def _process_docstring( + app: Sphinx, what: str, name: str, obj: Any, options: Any, lines: list[str] +) -> None: """Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list @@ -384,18 +391,21 @@ def _process_docstring(app: Sphinx, what: str, name: str, obj: Any, result_lines = lines docstring: GoogleDocstring if app.config.napoleon_numpy_docstring: - docstring = NumpyDocstring(result_lines, app.config, app, what, name, - obj, options) + docstring = NumpyDocstring( + result_lines, app.config, app, what, name, obj, options + ) result_lines = docstring.lines() if app.config.napoleon_google_docstring: - docstring = GoogleDocstring(result_lines, app.config, app, what, name, - obj, options) + docstring = GoogleDocstring( + result_lines, app.config, app, what, name, obj, options + ) result_lines = docstring.lines() lines[:] = result_lines.copy() -def _skip_member(app: Sphinx, what: str, name: str, obj: Any, - skip: bool, options: Any) -> bool | None: +def _skip_member( + app: Sphinx, what: str, name: str, obj: Any, skip: bool, options: Any +) -> bool | None: """Determine if private and special class members are included in docs. The following settings in conf.py determine if private and special class @@ -458,22 +468,25 @@ def _skip_member(app: Sphinx, what: str, name: str, obj: Any, except Exception: cls_is_owner = False else: - cls_is_owner = (cls and hasattr(cls, name) and # type: ignore[assignment] - name in cls.__dict__) + cls_is_owner = ( + cls # type: ignore[assignment] + and hasattr(cls, name) + and name in cls.__dict__ + ) else: cls_is_owner = False if what == 'module' or cls_is_owner: - is_init = (name == '__init__') - is_special = (not is_init and name.startswith('__') and - name.endswith('__')) - is_private = (not is_init and not is_special and - name.startswith('_')) + is_init = name == '__init__' + is_special = not is_init and name.startswith('__') and name.endswith('__') + is_private = not is_init and not is_special and name.startswith('_') inc_init = app.config.napoleon_include_init_with_doc inc_special = app.config.napoleon_include_special_with_doc inc_private = app.config.napoleon_include_private_with_doc - if ((is_special and inc_special) or - (is_private and inc_private) or - (is_init and inc_init)): + if ( + (is_special and inc_special) + or (is_private and inc_private) + or (is_init and inc_init) + ): return False return None diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py index 592f3a847a9..3a7f821b2b2 100644 --- a/sphinx/ext/napoleon/docstring.py +++ b/sphinx/ext/napoleon/docstring.py @@ -31,7 +31,8 @@ r'((?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)|' r'(?:``.+?``)|' r'(?::meta .+:.*)|' - r'(?:`.+?\s*(?`))') + r'(?:`.+?\s*(?`))' +) _xref_regex = re.compile( r'(?:(?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:)?`.+?`)', ) @@ -39,17 +40,18 @@ _enumerated_list_regex = re.compile( r'^(?P\()?' r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])' - r'(?(paren)\)|\.)(\s+\S|\s*$)') + r'(?(paren)\)|\.)(\s+\S|\s*$)' +) _token_regex = re.compile( - r"(,\sor\s|\sor\s|\sof\s|:\s|\sto\s|,\sand\s|\sand\s|,\s" - r"|[{]|[}]" + r'(,\sor\s|\sor\s|\sof\s|:\s|\sto\s|,\sand\s|\sand\s|,\s' + r'|[{]|[}]' r'|"(?:\\"|[^"])*"' r"|'(?:\\'|[^'])*')", ) _default_regex = re.compile( - r"^default[^_0-9A-Za-z].*$", + r'^default[^_0-9A-Za-z].*$', ) -_SINGLETONS = ("None", "True", "False", "Ellipsis") +_SINGLETONS = ('None', 'True', 'False', 'Ellipsis') class Deque(collections.deque[Any]): @@ -147,8 +149,11 @@ class GoogleDocstring: """ - _name_rgx = re.compile(r"^\s*((?::(?P\S+):)?`(?P~?[a-zA-Z0-9_.-]+)`|" - r" (?P~?[a-zA-Z0-9_.-]+))\s*", re.VERBOSE) + _name_rgx = re.compile( + r'^\s*((?::(?P\S+):)?`(?P~?[a-zA-Z0-9_.-]+)`|' + r' (?P~?[a-zA-Z0-9_.-]+))\s*', + re.VERBOSE, + ) def __init__( self, @@ -261,9 +266,8 @@ def lines(self) -> list[str]: def _consume_indented_block(self, indent: int = 1) -> list[str]: lines = [] line = self._lines.get(0) - while ( - not self._is_section_break() and - (not line or self._is_indented(line, indent)) + while not self._is_section_break() and ( + not line or self._is_indented(line, indent) ): lines.append(self._lines.next()) line = self._lines.get(0) @@ -271,9 +275,7 @@ def _consume_indented_block(self, indent: int = 1) -> list[str]: def _consume_contiguous(self) -> list[str]: lines = [] - while (self._lines and - self._lines.get(0) and - not self._is_section_header()): + while self._lines and self._lines.get(0) and not self._is_section_header(): lines.append(self._lines.next()) return lines @@ -285,8 +287,11 @@ def _consume_empty(self) -> list[str]: line = self._lines.get(0) return lines - def _consume_field(self, parse_type: bool = True, prefer_type: bool = False, - ) -> tuple[str, str, list[str]]: + def _consume_field( + self, + parse_type: bool = True, + prefer_type: bool = False, + ) -> tuple[str, str, list[str]]: line = self._lines.next() before, colon, after = self._partition_field_on_colon(line) @@ -311,14 +316,15 @@ def _consume_field(self, parse_type: bool = True, prefer_type: bool = False, _descs = self.__class__(_descs, self._config).lines() return _name, _type, _descs - def _consume_fields(self, parse_type: bool = True, prefer_type: bool = False, - multiple: bool = False) -> list[tuple[str, str, list[str]]]: + def _consume_fields( + self, parse_type: bool = True, prefer_type: bool = False, multiple: bool = False + ) -> list[tuple[str, str, list[str]]]: self._consume_empty() fields: list[tuple[str, str, list[str]]] = [] while not self._is_section_break(): _name, _type, _desc = self._consume_field(parse_type, prefer_type) if multiple and _name: - fields.extend((name.strip(), _type, _desc) for name in _name.split(",")) + fields.extend((name.strip(), _type, _desc) for name in _name.split(',')) elif _name or _type or _desc: fields.append((_name, _type, _desc)) return fields @@ -333,8 +339,9 @@ def _consume_inline_attribute(self) -> tuple[str, list[str]]: _descs = self.__class__(_descs, self._config).lines() return _type, _descs - def _consume_returns_section(self, preprocess_types: bool = False, - ) -> list[tuple[str, str, list[str]]]: + def _consume_returns_section( + self, preprocess_types: bool = False + ) -> list[tuple[str, str, list[str]]]: lines = self._dedent(self._consume_to_next_section()) if lines: before, colon, after = self._partition_field_on_colon(lines[0]) @@ -348,9 +355,10 @@ def _consume_returns_section(self, preprocess_types: bool = False, _type = before - if (_type and preprocess_types and - self._config.napoleon_preprocess_types): - _type = _convert_type_spec(_type, self._config.napoleon_type_aliases or {}) + if _type and preprocess_types and self._config.napoleon_preprocess_types: + _type = _convert_type_spec( + _type, self._config.napoleon_type_aliases or {} + ) _desc = self.__class__(_desc, self._config).lines() return [(_name, _type, _desc)] @@ -389,7 +397,9 @@ def _dedent(self, lines: list[str], full: bool = False) -> list[str]: return [line[min_indent:] for line in lines] def _escape_args_and_kwargs(self, name: str) -> str: - if name.endswith('_') and getattr(self._config, 'strip_signature_backslash', False): + if name.endswith('_') and getattr( + self._config, 'strip_signature_backslash', False + ): name = name[:-1] + r'\_' if name[:2] == '**': @@ -423,7 +433,10 @@ def _format_admonition(self, admonition: str, lines: list[str]) -> list[str]: return ['.. %s::' % admonition, ''] def _format_block( - self, prefix: str, lines: list[str], padding: str | None = None, + self, + prefix: str, + lines: list[str], + padding: str | None = None, ) -> list[str]: if lines: if padding is None: @@ -440,9 +453,12 @@ def _format_block( else: return [prefix] - def _format_docutils_params(self, fields: list[tuple[str, str, list[str]]], - field_role: str = 'param', type_role: str = 'type', - ) -> list[str]: + def _format_docutils_params( + self, + fields: list[tuple[str, str, list[str]]], + field_role: str = 'param', + type_role: str = 'type', + ) -> list[str]: lines = [] for _name, _type, _desc in fields: _desc = self._strip_empty(_desc) @@ -486,8 +502,11 @@ def _format_field(self, _name: str, _type: str, _desc: list[str]) -> list[str]: else: return [field] - def _format_fields(self, field_type: str, fields: list[tuple[str, str, list[str]]], - ) -> list[str]: + def _format_fields( + self, + field_type: str, + fields: list[tuple[str, str, list[str]]], + ) -> list[str]: field_type = ':%s:' % field_type.strip() padding = ' ' * len(field_type) multi = len(fields) > 1 @@ -579,11 +598,15 @@ def _is_section_header(self) -> bool: def _is_section_break(self) -> bool: line = self._lines.get(0) - return (not self._lines or - self._is_section_header() or - (self._is_in_section and - line and - not self._is_indented(line, self._section_indent))) + return ( + not self._lines + or self._is_section_header() + or ( + self._is_in_section + and line + and not self._is_indented(line, self._section_indent) + ) + ) def _load_custom_sections(self) -> None: if self._config.napoleon_custom_sections is not None: @@ -594,18 +617,20 @@ def _load_custom_sections(self) -> None: self._sections[entry.lower()] = self._parse_custom_generic_section else: # otherwise, assume entry is container; - if entry[1] == "params_style": - self._sections[entry[0].lower()] = \ + if entry[1] == 'params_style': + self._sections[entry[0].lower()] = ( self._parse_custom_params_style_section - elif entry[1] == "returns_style": - self._sections[entry[0].lower()] = \ + ) + elif entry[1] == 'returns_style': + self._sections[entry[0].lower()] = ( self._parse_custom_returns_style_section + ) else: # [0] is new section, [1] is the section to alias. # in the case of key mismatch, just handle as generic section. - self._sections[entry[0].lower()] = \ - self._sections.get(entry[1].lower(), - self._parse_custom_generic_section) + self._sections[entry[0].lower()] = self._sections.get( + entry[1].lower(), self._parse_custom_generic_section + ) def _parse(self) -> None: self._parsed_lines = self._consume_empty() @@ -721,9 +746,8 @@ def _parse_keyword_arguments_section(self, section: str) -> list[str]: fields = self._consume_fields() if self._config.napoleon_use_keyword: return self._format_docutils_params( - fields, - field_role="keyword", - type_role="kwtype") + fields, field_role='keyword', type_role='kwtype' + ) else: return self._format_fields(_('Keyword Arguments'), fields) @@ -770,7 +794,7 @@ def _parse_raises_section(self, section: str) -> list[str]: _type = m.group('name') elif _xref_regex.match(_type): pos = _type.find('`') - _type = _type[pos + 1:-1] + _type = _type[pos + 1 : -1] _type = ' ' + _type if _type else '' _desc = self._strip_empty(_desc) _descs = ' ' + '\n '.join(_desc) if any(_desc) else '' @@ -840,15 +864,13 @@ def _partition_field_on_colon(self, line: str) -> tuple[str, str, str]: m = _single_colon_regex.search(source) if (i % 2) == 0 and m: found_colon = True - colon = source[m.start(): m.end()] - before_colon.append(source[:m.start()]) - after_colon.append(source[m.end():]) + colon = source[m.start() : m.end()] + before_colon.append(source[: m.start()]) + after_colon.append(source[m.end() :]) else: before_colon.append(source) - return ("".join(before_colon).strip(), - colon, - "".join(after_colon).strip()) + return ''.join(before_colon).strip(), colon, ''.join(after_colon).strip() def _strip_empty(self, lines: list[str]) -> list[str]: if lines: @@ -866,29 +888,35 @@ def _strip_empty(self, lines: list[str]) -> list[str]: end = i break if start > 0 or end + 1 < len(lines): - lines = lines[start:end + 1] + lines = lines[start : end + 1] return lines def _lookup_annotation(self, _name: str) -> str: if self._config.napoleon_attr_annotations: - if self._what in ("module", "class", "exception") and self._obj: + if self._what in ('module', 'class', 'exception') and self._obj: # cache the class annotations - if not hasattr(self, "_annotations"): - localns = getattr(self._config, "autodoc_type_aliases", {}) - localns.update(getattr( - self._config, "napoleon_type_aliases", {}, - ) or {}) + if not hasattr(self, '_annotations'): + localns = getattr(self._config, 'autodoc_type_aliases', {}) + localns.update( + getattr( + self._config, + 'napoleon_type_aliases', + {}, + ) + or {} + ) self._annotations = get_type_hints(self._obj, None, localns) if _name in self._annotations: - return stringify_annotation(self._annotations[_name], - 'fully-qualified-except-typing') + return stringify_annotation( + self._annotations[_name], 'fully-qualified-except-typing' + ) # No annotation found - return "" + return '' def _recombine_set_tokens(tokens: list[str]) -> list[str]: token_queue = collections.deque(tokens) - keywords = ("optional", "default") + keywords = ('optional', 'default') def takewhile_set(tokens: collections.deque[str]) -> Iterator[str]: open_braces = 0 @@ -899,7 +927,7 @@ def takewhile_set(tokens: collections.deque[str]) -> Iterator[str]: except IndexError: break - if token == ", ": + if token == ', ': previous_token = token continue @@ -916,9 +944,9 @@ def takewhile_set(tokens: collections.deque[str]) -> Iterator[str]: yield previous_token previous_token = None - if token == "{": + if token == '{': open_braces += 1 - elif token == "}": + elif token == '}': open_braces -= 1 yield token @@ -933,9 +961,9 @@ def combine_set(tokens: collections.deque[str]) -> Iterator[str]: except IndexError: break - if token == "{": - tokens.appendleft("{") - yield "".join(takewhile_set(tokens)) + if token == '{': + tokens.appendleft('{') + yield ''.join(takewhile_set(tokens)) else: yield token @@ -950,7 +978,7 @@ def postprocess(item: str) -> list[str]: # for now other = item[8:] - return [default, " ", other] + return [default, ' ', other] else: return [item] @@ -973,70 +1001,74 @@ def is_numeric(token: str) -> bool: else: return True - if token.startswith(" ") or token.endswith(" "): - type_ = "delimiter" + if token.startswith(' ') or token.endswith(' '): + type_ = 'delimiter' elif ( - is_numeric(token) or - (token.startswith("{") and token.endswith("}")) or - (token.startswith('"') and token.endswith('"')) or - (token.startswith("'") and token.endswith("'")) + is_numeric(token) + or (token.startswith('{') and token.endswith('}')) + or (token.startswith('"') and token.endswith('"')) + or (token.startswith("'") and token.endswith("'")) ): - type_ = "literal" - elif token.startswith("{"): + type_ = 'literal' + elif token.startswith('{'): logger.warning( - __("invalid value set (missing closing brace): %s"), + __('invalid value set (missing closing brace): %s'), token, location=location, ) - type_ = "literal" - elif token.endswith("}"): + type_ = 'literal' + elif token.endswith('}'): logger.warning( - __("invalid value set (missing opening brace): %s"), + __('invalid value set (missing opening brace): %s'), token, location=location, ) - type_ = "literal" + type_ = 'literal' elif token.startswith(("'", '"')): logger.warning( - __("malformed string literal (missing closing quote): %s"), + __('malformed string literal (missing closing quote): %s'), token, location=location, ) - type_ = "literal" + type_ = 'literal' elif token.endswith(("'", '"')): logger.warning( - __("malformed string literal (missing opening quote): %s"), + __('malformed string literal (missing opening quote): %s'), token, location=location, ) - type_ = "literal" - elif token in ("optional", "default"): + type_ = 'literal' + elif token in ('optional', 'default'): # default is not a official keyword (yet) but supported by the # reference implementation (numpydoc) and widely used - type_ = "control" + type_ = 'control' elif _xref_regex.match(token): - type_ = "reference" + type_ = 'reference' else: - type_ = "obj" + type_ = 'obj' return type_ def _convert_numpy_type_spec( - _type: str, location: str | None = None, translations: dict[str, str] | None = None, + _type: str, + location: str | None = None, + translations: dict[str, str] | None = None, ) -> str: if translations is None: translations = {} - def convert_obj(obj: str, translations: dict[str, str], default_translation: str) -> str: + def convert_obj( + obj: str, translations: dict[str, str], default_translation: str + ) -> str: translation = translations.get(obj, obj) # use :class: (the default) only if obj is not a standard singleton - if translation in _SINGLETONS and default_translation == ":class:`%s`": - default_translation = ":obj:`%s`" - elif translation == "..." and default_translation == ":class:`%s`": + if translation in _SINGLETONS and default_translation == ':class:`%s`': + default_translation = ':obj:`%s`' + elif translation == '...' and default_translation == ':class:`%s`': # allow referencing the builtin ... - default_translation = ":obj:`%s `" + default_translation = ':obj:`%s `' if _xref_regex.match(translation) is None: translation = default_translation % translation @@ -1045,21 +1077,20 @@ def convert_obj(obj: str, translations: dict[str, str], default_translation: str tokens = _tokenize_type_spec(_type) combined_tokens = _recombine_set_tokens(tokens) - types = [ - (token, _token_type(token, location)) - for token in combined_tokens - ] + types = [(token, _token_type(token, location)) for token in combined_tokens] converters = { - "literal": lambda x: "``%s``" % x, - "obj": lambda x: convert_obj(x, translations, ":class:`%s`"), - "control": lambda x: "*%s*" % x, - "delimiter": lambda x: x, - "reference": lambda x: x, + 'literal': lambda x: '``%s``' % x, + 'obj': lambda x: convert_obj(x, translations, ':class:`%s`'), + 'control': lambda x: '*%s*' % x, + 'delimiter': lambda x: x, + 'reference': lambda x: x, } - converted = "".join(converters.get(type_)(token) # type: ignore[misc] - for token, type_ in types) + converted = ''.join( + converters.get(type_)(token) # type: ignore[misc] + for token, type_ in types + ) return converted @@ -1181,20 +1212,21 @@ def _get_location(self) -> str | None: if filepath is None and name is None: return None elif filepath is None: - filepath = "" + filepath = '' - return f"{filepath}:docstring of {name}" + return f'{filepath}:docstring of {name}' def _escape_args_and_kwargs(self, name: str) -> str: func = super()._escape_args_and_kwargs - if ", " in name: - return ", ".join(map(func, name.split(", "))) + if ', ' in name: + return ', '.join(map(func, name.split(', '))) else: return func(name) - def _consume_field(self, parse_type: bool = True, prefer_type: bool = False, - ) -> tuple[str, str, list[str]]: + def _consume_field( + self, parse_type: bool = True, prefer_type: bool = False + ) -> tuple[str, str, list[str]]: line = self._lines.next() if parse_type: _name, _, _type = self._partition_field_on_colon(line) @@ -1221,8 +1253,9 @@ def _consume_field(self, parse_type: bool = True, prefer_type: bool = False, _desc = self.__class__(_desc, self._config).lines() return _name, _type, _desc - def _consume_returns_section(self, preprocess_types: bool = False, - ) -> list[tuple[str, str, list[str]]]: + def _consume_returns_section( + self, preprocess_types: bool = False + ) -> list[tuple[str, str, list[str]]]: return self._consume_fields(prefer_type=True) def _consume_section_header(self) -> str: @@ -1234,12 +1267,16 @@ def _consume_section_header(self) -> str: def _is_section_break(self) -> bool: line1, line2 = self._lines.get(0), self._lines.get(1) - return (not self._lines or - self._is_section_header() or - (line1 == line2 == '') or - (self._is_in_section and - line1 and - not self._is_indented(line1, self._section_indent))) + return ( + not self._lines + or self._is_section_header() + or (line1 == line2 == '') + or ( + self._is_in_section + and line1 + and not self._is_indented(line1, self._section_indent) + ) + ) def _is_section_header(self) -> bool: section, underline = self._lines.get(0), self._lines.get(1) @@ -1312,7 +1349,7 @@ def parse_item_name(text: str) -> tuple[str, str | None]: return g[3], None else: return g[2], g[1] - raise ValueError("%s is not a item name" % text) + raise ValueError('%s is not a item name' % text) def push_item(name: str | None, rest: list[str]) -> None: if not name: @@ -1322,7 +1359,9 @@ def push_item(name: str | None, rest: list[str]) -> None: rest.clear() def translate( - func: str, description: list[str], role: str | None, + func: str, + description: list[str], + role: str | None, ) -> tuple[str, list[str], str | None]: translations = self._config.napoleon_type_aliases if role is not None or not translations: @@ -1334,8 +1373,8 @@ def translate( return translated, description, role groups = match.groupdict() - role = groups["role"] - new_func = groups["name"] or groups["name2"] + role = groups['role'] + new_func = groups['name'] or groups['name2'] return new_func, description, role @@ -1347,9 +1386,9 @@ def translate( continue m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): + if m and line[m.end() :].strip().startswith(':'): push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] + current_func, line = line[: m.end()], line[m.end() :] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] @@ -1383,7 +1422,7 @@ def translate( lines += [''] lines += [link] else: - lines[-1] += ", %s" % link + lines[-1] += ', %s' % link if desc: lines += self._indent([' '.join(desc)]) last_had_desc = True