Skip to content

Commit

Permalink
Drop Python 3.6 support (#134)
Browse files Browse the repository at this point in the history
* Drop Python 3.6 support

* Import `Callable` from `typing`

* Changelog
  • Loading branch information
hukkin authored Nov 15, 2021
1 parent 809a8ae commit 6a93a19
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 41 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['pypy-3.7', '3.6', '3.7', '3.8', '3.9', '3.10', '3.11-dev']
python-version: ['pypy-3.7', '3.7', '3.8', '3.9', '3.10', '3.11-dev']
os: [ubuntu-latest, macos-latest, windows-latest]
continue-on-error: ${{ matrix.python-version == '3.11-dev' }}

Expand All @@ -54,7 +54,7 @@ jobs:
python -m pytest --cov --cov-fail-under=100
- name: Report coverage
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.6'
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
uses: codecov/codecov-action@v2

test-built-package:
Expand Down
5 changes: 3 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# Changelog

## **unreleased**
## 2.0.0 (unreleased)

- no changes yet
- Removed
- Python 3.6 support

## 1.2.2

Expand Down
9 changes: 4 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,14 @@ authors = [
{ name = "Taneli Hukkinen", email = "hukkin@users.noreply.github.com" },
]
license = { file = "LICENSE" }
requires-python = ">=3.6"
requires-python = ">=3.7"
readme = "README.md"
classifiers = [
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
Expand Down Expand Up @@ -67,18 +66,18 @@ xfail_strict = true
legacy_tox_ini = '''
[tox]
# Only run pytest envs when no args given to tox
envlist = py{36,37,38,39,310}
envlist = py{37,38,39,310}
isolated_build = True
[testenv:py{36,37,38,39,310}]
[testenv:py{37,38,39,310}]
description = run tests against unpackaged source
skip_install = True
deps = -r tests/requirements.txt
commands =
# Use 'python -m pytest' to add CWD to sys.path
python -m pytest {posargs}
[testenv:py{36,37,38,39,310}-package]
[testenv:py{37,38,39,310}-package]
description = run tests against a built package (can fail, in theory, if test dependencies need a tomli version incompatible with local state)
deps = -r tests/requirements.txt
commands =
Expand Down
57 changes: 30 additions & 27 deletions tomli/_parser.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
from __future__ import annotations

from collections.abc import Iterable
import string
from types import MappingProxyType
from typing import Any, BinaryIO, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple
from typing import Any, BinaryIO, NamedTuple
import warnings

from tomli._re import (
Expand Down Expand Up @@ -48,7 +51,7 @@ class TOMLDecodeError(ValueError):
"""An error raised if a document is not valid TOML."""


def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]:
def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
s_bytes = fp.read()
try:
Expand All @@ -64,7 +67,7 @@ def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]:
return loads(s, parse_float=parse_float)


def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901
def loads(s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""

# The spec allows converting "\r\n" to "\n", even in string
Expand Down Expand Up @@ -100,7 +103,7 @@ def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: Optional[str] = src[pos + 1]
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
if second_char == "[":
Expand Down Expand Up @@ -138,7 +141,7 @@ class Flags:
EXPLICIT_NEST = 1

def __init__(self) -> None:
self._flags: Dict[str, dict] = {}
self._flags: dict[str, dict] = {}

def unset_all(self, key: Key) -> None:
cont = self._flags
Expand Down Expand Up @@ -193,7 +196,7 @@ def is_(self, key: Key, flag: int) -> bool:
class NestedDict:
def __init__(self) -> None:
# The parsed content of the TOML document
self.dict: Dict[str, Any] = {}
self.dict: dict[str, Any] = {}

def get_or_create_nest(
self,
Expand Down Expand Up @@ -244,7 +247,7 @@ def skip_until(
pos: Pos,
expect: str,
*,
error_on: FrozenSet[str],
error_on: frozenset[str],
error_on_eof: bool,
) -> Pos:
try:
Expand All @@ -263,7 +266,7 @@ def skip_until(

def skip_comment(src: str, pos: Pos) -> Pos:
try:
char: Optional[str] = src[pos]
char: str | None = src[pos]
except IndexError:
char = None
if char == "#":
Expand All @@ -282,7 +285,7 @@ def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
return pos


def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 1 # Skip "["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
Expand All @@ -300,7 +303,7 @@ def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
return pos + 1, key


def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 2 # Skip "[["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
Expand Down Expand Up @@ -349,10 +352,10 @@ def key_value_rule(

def parse_key_value_pair(
src: str, pos: Pos, parse_float: ParseFloat
) -> Tuple[Pos, Key, Any]:
) -> tuple[Pos, Key, Any]:
pos, key = parse_key(src, pos)
try:
char: Optional[str] = src[pos]
char: str | None = src[pos]
except IndexError:
char = None
if char != "=":
Expand All @@ -363,13 +366,13 @@ def parse_key_value_pair(
return pos, key, value


def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]:
def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
pos, key_part = parse_key_part(src, pos)
key: Key = (key_part,)
pos = skip_chars(src, pos, TOML_WS)
while True:
try:
char: Optional[str] = src[pos]
char: str | None = src[pos]
except IndexError:
char = None
if char != ".":
Expand All @@ -381,9 +384,9 @@ def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]:
pos = skip_chars(src, pos, TOML_WS)


def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]:
def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
try:
char: Optional[str] = src[pos]
char: str | None = src[pos]
except IndexError:
char = None
if char in BARE_KEY_CHARS:
Expand All @@ -397,12 +400,12 @@ def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]:
raise suffixed_err(src, pos, "Invalid initial character for a key part")


def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]:
def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1
return parse_basic_str(src, pos, multiline=False)


def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]:
def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
pos += 1
array: list = []

Expand All @@ -426,7 +429,7 @@ def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]
return pos + 1, array


def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]:
def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
pos += 1
nested_dict = NestedDict()
flags = Flags()
Expand Down Expand Up @@ -460,7 +463,7 @@ def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos

def parse_basic_str_escape( # noqa: C901
src: str, pos: Pos, *, multiline: bool = False
) -> Tuple[Pos, str]:
) -> tuple[Pos, str]:
escape_id = src[pos : pos + 2]
pos += 2
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
Expand Down Expand Up @@ -489,11 +492,11 @@ def parse_basic_str_escape( # noqa: C901
raise suffixed_err(src, pos, 'Unescaped "\\" in a string') from None


def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]:
def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
return parse_basic_str_escape(src, pos, multiline=True)


def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]:
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
hex_str = src[pos : pos + hex_len]
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
raise suffixed_err(src, pos, "Invalid hex value")
Expand All @@ -504,7 +507,7 @@ def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]:
return pos, chr(hex_int)


def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]:
def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1 # Skip starting apostrophe
start_pos = pos
pos = skip_until(
Expand All @@ -513,7 +516,7 @@ def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]:
return pos + 1, src[start_pos:pos] # Skip ending apostrophe


def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]:
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
pos += 3
if src.startswith("\n", pos):
pos += 1
Expand Down Expand Up @@ -544,7 +547,7 @@ def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]
return pos, result + (delim * 2)


def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]:
def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
if multiline:
error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape_multiline
Expand Down Expand Up @@ -578,9 +581,9 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]:

def parse_value( # noqa: C901
src: str, pos: Pos, parse_float: ParseFloat
) -> Tuple[Pos, Any]:
) -> tuple[Pos, Any]:
try:
char: Optional[str] = src[pos]
char: str | None = src[pos]
except IndexError:
char = None

Expand Down
12 changes: 7 additions & 5 deletions tomli/_re.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from __future__ import annotations

from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from typing import Any, Optional, Union
from typing import Any

from tomli._types import ParseFloat

Expand Down Expand Up @@ -43,7 +45,7 @@
)


def match_to_datetime(match: "re.Match") -> Union[datetime, date]:
def match_to_datetime(match: re.Match) -> datetime | date:
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
Raises ValueError if the match does not correspond to a valid date
Expand All @@ -68,7 +70,7 @@ def match_to_datetime(match: "re.Match") -> Union[datetime, date]:
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
if offset_sign_str:
tz: Optional[tzinfo] = cached_tz(
tz: tzinfo | None = cached_tz(
offset_hour_str, offset_minute_str, offset_sign_str
)
elif zulu_time:
Expand All @@ -89,13 +91,13 @@ def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
)


def match_to_localtime(match: "re.Match") -> time:
def match_to_localtime(match: re.Match) -> time:
hour_str, minute_str, sec_str, micros_str = match.groups()
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
return time(int(hour_str), int(minute_str), int(sec_str), micros)


def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any:
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
if match.group("floatpart"):
return parse_float(match.group())
return int(match.group(), 0)

0 comments on commit 6a93a19

Please sign in to comment.