Skip to content

Commit

Permalink
Drop python 3.9 (#652)
Browse files Browse the repository at this point in the history
  • Loading branch information
andersy005 authored Feb 6, 2024
1 parent 14d069c commit c5b0fe4
Show file tree
Hide file tree
Showing 8 changed files with 106 additions and 97 deletions.
30 changes: 18 additions & 12 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,22 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for all branches and tags.

- name: Create conda environment
uses: mamba-org/provision-with-micromamba@main
- name: set up conda environment
uses: mamba-org/setup-micromamba@v1
with:
cache-downloads: true
micromamba-version: "latest"
environment-file: ci/environment.yml
extra-specs: |
init-shell: >-
bash
cache-environment: true
cache-downloads: true
post-cleanup: "all"
create-args: |
python=${{ matrix.python-version }}
- name: Install intake-esm
Expand Down Expand Up @@ -70,14 +73,17 @@ jobs:
with:
fetch-depth: 0 # Fetch all history for all branches and tags.

- name: Create conda environment
uses: mamba-org/provision-with-micromamba@main
- name: set up conda environment
uses: mamba-org/setup-micromamba@v1
with:
cache-downloads: true
micromamba-version: "latest"
environment-file: ci/environment-upstream-dev.yml
extra-specs: |
python=3.11
init-shell: >-
bash
cache-environment: true
cache-downloads: true
post-cleanup: "all"
create-args: |
python=3.12
- name: Install intake-esm
run: |
Expand Down
6 changes: 3 additions & 3 deletions intake_esm/_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def search(
for column, values in query.items():
local_mask = np.zeros(len(df), dtype=bool)
column_is_stringtype = isinstance(
df[column].dtype, (object, pd.core.arrays.string_.StringDtype)
df[column].dtype, object | pd.core.arrays.string_.StringDtype
)
column_has_iterables = column in columns_with_iterables
for value in values:
Expand All @@ -62,8 +62,8 @@ def search_apply_require_all_on(
*,
df: pd.DataFrame,
query: dict[str, typing.Any],
require_all_on: typing.Union[str, list[typing.Any]],
columns_with_iterables: set = None,
require_all_on: str | list[typing.Any],
columns_with_iterables: set | None = None,
) -> pd.DataFrame:
_query = query.copy()
# Make sure to remove columns that were already
Expand Down
70 changes: 36 additions & 34 deletions intake_esm/cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class AggregationType(str, enum.Enum):
join_existing = 'join_existing'
union = 'union'

model_config = ConfigDict(validate_default=True, validate_assignment=True)
model_config = ConfigDict(validate_assignment=True)


class DataFormat(str, enum.Enum):
Expand All @@ -50,22 +50,22 @@ class DataFormat(str, enum.Enum):
reference = 'reference'
opendap = 'opendap'

model_config = ConfigDict(validate_default=True, validate_assignment=True)
model_config = ConfigDict(validate_assignment=True)


class Attribute(pydantic.BaseModel):
column_name: pydantic.StrictStr
vocabulary: pydantic.StrictStr = ''

model_config = ConfigDict(validate_default=True, validate_assignment=True)
model_config = ConfigDict(validate_assignment=True)


class Assets(pydantic.BaseModel):
column_name: pydantic.StrictStr
format: typing.Optional[DataFormat] = None
format_column_name: typing.Optional[pydantic.StrictStr] = None
format: DataFormat | None = None
format_column_name: pydantic.StrictStr | None = None

model_config = ConfigDict(validate_default=True, validate_assignment=True)
model_config = ConfigDict(validate_assignment=True)

@pydantic.model_validator(mode='after')
def _validate_data_format(cls, model):
Expand All @@ -82,7 +82,7 @@ class Aggregation(pydantic.BaseModel):
attribute_name: pydantic.StrictStr
options: dict = {}

model_config = ConfigDict(validate_default=True, validate_assignment=True)
model_config = ConfigDict(validate_assignment=True)


class AggregationControl(pydantic.BaseModel):
Expand All @@ -101,18 +101,16 @@ class ESMCatalogModel(pydantic.BaseModel):
esmcat_version: pydantic.StrictStr
attributes: list[Attribute]
assets: Assets
aggregation_control: typing.Optional[AggregationControl] = None
aggregation_control: AggregationControl | None = None
id: str = ''
catalog_dict: typing.Optional[list[dict]] = None
catalog_file: typing.Optional[pydantic.StrictStr] = None
description: typing.Optional[pydantic.StrictStr] = None
title: typing.Optional[pydantic.StrictStr] = None
last_updated: typing.Optional[typing.Union[datetime.datetime, datetime.date]] = None
catalog_dict: list[dict] | None = None
catalog_file: pydantic.StrictStr | None = None
description: pydantic.StrictStr | None = None
title: pydantic.StrictStr | None = None
last_updated: datetime.datetime | datetime.date | None = None
_df: pd.DataFrame = pydantic.PrivateAttr()

model_config = ConfigDict(
arbitrary_types_allowed=True, validate_default=True, validate_assignment=True
)
model_config = ConfigDict(arbitrary_types_allowed=True, validate_assignment=True)

@pydantic.model_validator(mode='after')
def validate_catalog(cls, model):
Expand All @@ -136,11 +134,11 @@ def save(
self,
name: str,
*,
directory: str = None,
directory: str | None = None,
catalog_type: str = 'dict',
to_csv_kwargs: dict = None,
json_dump_kwargs: dict = None,
storage_options: dict[str, typing.Any] = None,
to_csv_kwargs: dict | None = None,
json_dump_kwargs: dict | None = None,
storage_options: dict[str, typing.Any] | None = None,
) -> None:
"""
Save the catalog to a file.
Expand Down Expand Up @@ -193,7 +191,7 @@ def save(

if catalog_type == 'file':
csv_kwargs = {'index': False}
csv_kwargs.update(to_csv_kwargs or {})
csv_kwargs |= to_csv_kwargs or {}
compression = csv_kwargs.get('compression')
extensions = {'gzip': '.gz', 'bz2': '.bz2', 'zip': '.zip', 'xz': '.xz', None: ''}
csv_file_name = f'{csv_file_name}{extensions[compression]}'
Expand All @@ -206,15 +204,15 @@ def save(

with fs.open(json_file_name, 'w') as outfile:
json_kwargs = {'indent': 2}
json_kwargs.update(json_dump_kwargs or {})
json_kwargs |= json_dump_kwargs or {}
json.dump(data, outfile, **json_kwargs)

print(f'Successfully wrote ESM catalog json file to: {json_file_name}')

@classmethod
def load(
cls,
json_file: typing.Union[str, pydantic.FilePath, pydantic.AnyUrl],
json_file: str | pydantic.FilePath | pydantic.AnyUrl,
storage_options: dict[str, typing.Any] = None,
read_csv_kwargs: dict[str, typing.Any] = None,
) -> 'ESMCatalogModel':
Expand Down Expand Up @@ -287,16 +285,20 @@ def _cast_agg_columns_with_iterables(self) -> None:
to avoid hashing issues (e.g. TypeError: unhashable type: 'list')
"""
if self.aggregation_control:
columns = list(
if columns := list(
self.columns_with_iterables.intersection(
set(map(lambda agg: agg.attribute_name, self.aggregation_control.aggregations))
set(
map(
lambda agg: agg.attribute_name,
self.aggregation_control.aggregations,
)
)
)
)
if columns:
):
self._df[columns] = self._df[columns].apply(tuple)

@property
def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame]:
def grouped(self) -> pd.core.groupby.DataFrameGroupBy | pd.DataFrame:
if self.aggregation_control:
if self.aggregation_control.groupby_attrs:
self.aggregation_control.groupby_attrs = list(
Expand All @@ -318,7 +320,7 @@ def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame
)
return self.df.groupby(cols)

def _construct_group_keys(self, sep: str = '.') -> dict[str, typing.Union[str, tuple[str]]]:
def _construct_group_keys(self, sep: str = '.') -> dict[str, str | tuple[str]]:
internal_keys = self.grouped.groups.keys()
public_keys = map(
lambda key: key if isinstance(key, str) else sep.join(str(value) for value in key),
Expand Down Expand Up @@ -352,7 +354,7 @@ def search(
self,
*,
query: typing.Union['QueryModel', dict[str, typing.Any]],
require_all_on: typing.Union[str, list[str]] = None,
require_all_on: str | list[str] | None = None,
) -> 'ESMCatalogModel':
"""
Search for entries in the catalog.
Expand Down Expand Up @@ -398,13 +400,13 @@ def search(
class QueryModel(pydantic.BaseModel):
"""A Pydantic model to represent a query to be executed against a catalog."""

query: dict[pydantic.StrictStr, typing.Union[typing.Any, list[typing.Any]]]
query: dict[pydantic.StrictStr, typing.Any | list[typing.Any]]
columns: list[str]
require_all_on: typing.Optional[typing.Union[str, list[typing.Any]]] = None
require_all_on: str | list[typing.Any] | None = None

# TODO: Seem to be unable to modify fields in model_validator with
# validate_assignment=True since it leads to recursion
model_config = ConfigDict(validate_default=True, validate_assignment=False)
model_config = ConfigDict(validate_assignment=False)

@pydantic.model_validator(mode='after')
def validate_query(cls, model):
Expand All @@ -424,7 +426,7 @@ def validate_query(cls, model):
raise ValueError(f'Column {key} not in columns {columns}')
_query = query.copy()
for key, value in _query.items():
if isinstance(value, (str, int, float, bool)) or value is None or value is pd.NA:
if isinstance(value, str | int | float | bool) or value is None or value is pd.NA:
_query[key] = [value]

model.query = _query
Expand Down
48 changes: 24 additions & 24 deletions intake_esm/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ class esm_datastore(Catalog):

def __init__(
self,
obj: typing.Union[pydantic.FilePath, pydantic.AnyUrl, dict[str, typing.Any]],
obj: pydantic.FilePath | pydantic.AnyUrl | dict[str, typing.Any],
*,
progressbar: bool = True,
sep: str = '.',
registry: typing.Optional[DerivedVariableRegistry] = None,
registry: DerivedVariableRegistry | None = None,
read_csv_kwargs: dict[str, typing.Any] = None,
columns_with_iterables: list[str] = None,
storage_options: dict[str, typing.Any] = None,
Expand Down Expand Up @@ -209,7 +209,7 @@ def _get_entries(self) -> dict[str, ESMDataSource]:
_ = self[key]
return self._entries

@pydantic.validate_arguments
@pydantic.validate_call
def __getitem__(self, key: str) -> ESMDataSource:
"""
This method takes a key argument and return a data source
Expand Down Expand Up @@ -328,10 +328,10 @@ def __dir__(self) -> list[str]:
def _ipython_key_completions_(self):
return self.__dir__()

@pydantic.validate_arguments
@pydantic.validate_call
def search(
self,
require_all_on: typing.Optional[typing.Union[str, list[str]]] = None,
require_all_on: str | list[str] | None = None,
**query: typing.Any,
):
"""Search for entries in the catalog.
Expand Down Expand Up @@ -443,15 +443,15 @@ def search(
cat.derivedcat = self.derivedcat
return cat

@pydantic.validate_arguments
@pydantic.validate_call
def serialize(
self,
name: pydantic.StrictStr,
directory: typing.Optional[typing.Union[pydantic.DirectoryPath, pydantic.StrictStr]] = None,
directory: pydantic.DirectoryPath | pydantic.StrictStr | None = None,
catalog_type: str = 'dict',
to_csv_kwargs: typing.Optional[dict[typing.Any, typing.Any]] = None,
json_dump_kwargs: typing.Optional[dict[typing.Any, typing.Any]] = None,
storage_options: typing.Optional[dict[str, typing.Any]] = None,
to_csv_kwargs: dict[typing.Any, typing.Any] | None = None,
json_dump_kwargs: dict[typing.Any, typing.Any] | None = None,
storage_options: dict[str, typing.Any] | None = None,
) -> None:
"""Serialize catalog to corresponding json and csv files.
Expand Down Expand Up @@ -537,15 +537,15 @@ def unique(self) -> pd.Series:
)
return unique

@pydantic.validate_arguments
@pydantic.validate_call
def to_dataset_dict(
self,
xarray_open_kwargs: typing.Optional[dict[str, typing.Any]] = None,
xarray_combine_by_coords_kwargs: typing.Optional[dict[str, typing.Any]] = None,
preprocess: typing.Optional[typing.Callable] = None,
storage_options: typing.Optional[dict[pydantic.StrictStr, typing.Any]] = None,
progressbar: typing.Optional[pydantic.StrictBool] = None,
aggregate: typing.Optional[pydantic.StrictBool] = None,
xarray_open_kwargs: dict[str, typing.Any] | None = None,
xarray_combine_by_coords_kwargs: dict[str, typing.Any] | None = None,
preprocess: typing.Callable | None = None,
storage_options: dict[pydantic.StrictStr, typing.Any] | None = None,
progressbar: pydantic.StrictBool | None = None,
aggregate: pydantic.StrictBool | None = None,
skip_on_error: pydantic.StrictBool = False,
**kwargs,
) -> dict[str, xr.Dataset]:
Expand Down Expand Up @@ -687,15 +687,15 @@ def to_dataset_dict(
self.datasets = self._create_derived_variables(datasets, skip_on_error)
return self.datasets

@pydantic.validate_arguments
@pydantic.validate_call
def to_datatree(
self,
xarray_open_kwargs: typing.Optional[dict[str, typing.Any]] = None,
xarray_combine_by_coords_kwargs: typing.Optional[dict[str, typing.Any]] = None,
preprocess: typing.Optional[typing.Callable] = None,
storage_options: typing.Optional[dict[pydantic.StrictStr, typing.Any]] = None,
progressbar: typing.Optional[pydantic.StrictBool] = None,
aggregate: typing.Optional[pydantic.StrictBool] = None,
xarray_open_kwargs: dict[str, typing.Any] | None = None,
xarray_combine_by_coords_kwargs: dict[str, typing.Any] | None = None,
preprocess: typing.Callable | None = None,
storage_options: dict[pydantic.StrictStr, typing.Any] | None = None,
progressbar: pydantic.StrictBool | None = None,
aggregate: pydantic.StrictBool | None = None,
skip_on_error: pydantic.StrictBool = False,
levels: list[str] = None,
**kwargs,
Expand Down
Loading

0 comments on commit c5b0fe4

Please sign in to comment.