Skip to content

Commit

Permalink
Format with preview flag
Browse files Browse the repository at this point in the history
  • Loading branch information
etienneschalk committed Feb 18, 2024
1 parent e3383b6 commit 350b0b4
Show file tree
Hide file tree
Showing 68 changed files with 1,571 additions and 1,949 deletions.
30 changes: 15 additions & 15 deletions asv_bench/benchmarks/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ def setup(self) -> None:
data = np.random.randn(t_size)

self.dsA0 = xr.Dataset({"A": xr.DataArray(data, coords={"T": t}, dims=("T"))})
self.dsA1 = xr.Dataset(
{"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T"))}
)
self.dsA1 = xr.Dataset({
"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T"))
})

def time_combine_by_coords(self) -> None:
"""Also has to load and arrange t coordinate"""
Expand Down Expand Up @@ -54,18 +54,18 @@ def setup(self):
t = np.arange(t_size)
data = np.random.randn(t_size, x_size, y_size)

self.dsA0 = xr.Dataset(
{"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))}
)
self.dsA1 = xr.Dataset(
{"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))}
)
self.dsB0 = xr.Dataset(
{"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))}
)
self.dsB1 = xr.Dataset(
{"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))}
)
self.dsA0 = xr.Dataset({
"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))
})
self.dsA1 = xr.Dataset({
"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))
})
self.dsB0 = xr.Dataset({
"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))
})
self.dsB1 = xr.Dataset({
"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))
})

def time_combine_nested(self):
datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]]
Expand Down
10 changes: 4 additions & 6 deletions asv_bench/benchmarks/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,10 @@

class DatasetBinaryOp:
def setup(self):
self.ds = Dataset(
{
"a": (("x", "y"), np.ones((300, 400))),
"b": (("x", "y"), np.ones((300, 400))),
}
)
self.ds = Dataset({
"a": (("x", "y"), np.ones((300, 400))),
"b": (("x", "y"), np.ones((300, 400))),
})
self.mean = self.ds.mean()
self.std = self.ds.std()

Expand Down
12 changes: 5 additions & 7 deletions asv_bench/benchmarks/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,11 @@
class GroupBy:
def setup(self, *args, **kwargs):
self.n = 100
self.ds1d = xr.Dataset(
{
"a": xr.DataArray(np.r_[np.repeat(1, self.n), np.repeat(2, self.n)]),
"b": xr.DataArray(np.arange(2 * self.n)),
"c": xr.DataArray(np.arange(2 * self.n)),
}
)
self.ds1d = xr.Dataset({
"a": xr.DataArray(np.r_[np.repeat(1, self.n), np.repeat(2, self.n)]),
"b": xr.DataArray(np.arange(2 * self.n)),
"c": xr.DataArray(np.arange(2 * self.n)),
})
self.ds2d = self.ds1d.expand_dims(z=10).copy()
self.ds1d_mean = self.ds1d.groupby("b").mean()
self.ds2d_mean = self.ds2d.groupby("b").mean()
Expand Down
12 changes: 5 additions & 7 deletions asv_bench/benchmarks/pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,11 @@
class MultiIndexSeries:
def setup(self, dtype, subset):
data = np.random.rand(100000).astype(dtype)
index = pd.MultiIndex.from_product(
[
list("abcdefhijk"),
list("abcdefhijk"),
pd.date_range(start="2000-01-01", periods=1000, freq="D"),
]
)
index = pd.MultiIndex.from_product([
list("abcdefhijk"),
list("abcdefhijk"),
pd.date_range(start="2000-01-01", periods=1000, freq="D"),
])
series = pd.Series(data, index)
if subset:
series = series[::3]
Expand Down
23 changes: 11 additions & 12 deletions ci/min_deps_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
publication date. Compare it against requirements/min-all-deps.yml to verify the
policy on obsolete dependencies is being followed. Print a pretty report :)
"""

from __future__ import annotations

import itertools
Expand Down Expand Up @@ -104,18 +105,16 @@ def metadata(entry):

# Hardcoded fix to work around incorrect dates in conda
if pkg == "python":
out.update(
{
(2, 7): datetime(2010, 6, 3),
(3, 5): datetime(2015, 9, 13),
(3, 6): datetime(2016, 12, 23),
(3, 7): datetime(2018, 6, 27),
(3, 8): datetime(2019, 10, 14),
(3, 9): datetime(2020, 10, 5),
(3, 10): datetime(2021, 10, 4),
(3, 11): datetime(2022, 10, 24),
}
)
out.update({
(2, 7): datetime(2010, 6, 3),
(3, 5): datetime(2015, 9, 13),
(3, 6): datetime(2016, 12, 23),
(3, 7): datetime(2018, 6, 27),
(3, 8): datetime(2019, 10, 14),
(3, 9): datetime(2020, 10, 5),
(3, 10): datetime(2021, 10, 4),
(3, 11): datetime(2022, 10, 24),
})

return out

Expand Down
8 changes: 3 additions & 5 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,9 @@
try:
import cartopy # noqa: F401
except ImportError:
allowed_failures.update(
[
"gallery/plot_cartopy_facetgrid.py",
]
)
allowed_failures.update([
"gallery/plot_cartopy_facetgrid.py",
])

nbsphinx_allow_errors = False

Expand Down
14 changes: 8 additions & 6 deletions doc/examples/apply_ufunc_vectorize_1d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -520,9 +520,10 @@
"\n",
"interped = xr.apply_ufunc(\n",
" interp1d_np, # first the function\n",
" air.chunk(\n",
" {\"time\": 2, \"lon\": 2}\n",
" ), # now arguments in the order expected by 'interp1_np'\n",
" air.chunk({\n",
" \"time\": 2,\n",
" \"lon\": 2,\n",
" }), # now arguments in the order expected by 'interp1_np'\n",
" air.lat, # as above\n",
" newlat, # as above\n",
" input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n",
Expand Down Expand Up @@ -617,9 +618,10 @@
"source": [
"interped = xr.apply_ufunc(\n",
" interp1d_np_gufunc, # first the function\n",
" air.chunk(\n",
" {\"time\": 2, \"lon\": 2}\n",
" ), # now arguments in the order expected by 'interp1_np'\n",
" air.chunk({\n",
" \"time\": 2,\n",
" \"lon\": 2,\n",
" }), # now arguments in the order expected by 'interp1_np'\n",
" air.lat, # as above\n",
" newlat, # as above\n",
" input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n",
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ convention = "numpy"

[tool.ruff.format]
docstring-code-format = true
preview = true

[tool.pytest.ini_options]
addopts = ["--strict-config", "--strict-markers"]
Expand Down
63 changes: 26 additions & 37 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1132,8 +1132,7 @@ def to_netcdf(
*,
multifile: Literal[True],
invalid_netcdf: bool = False,
) -> tuple[ArrayWriter, AbstractDataStore]:
...
) -> tuple[ArrayWriter, AbstractDataStore]: ...


# path=None writes to bytes
Expand All @@ -1150,8 +1149,7 @@ def to_netcdf(
compute: bool = True,
multifile: Literal[False] = False,
invalid_netcdf: bool = False,
) -> bytes:
...
) -> bytes: ...


# compute=False returns dask.Delayed
Expand All @@ -1169,8 +1167,7 @@ def to_netcdf(
compute: Literal[False],
multifile: Literal[False] = False,
invalid_netcdf: bool = False,
) -> Delayed:
...
) -> Delayed: ...


# default return None
Expand All @@ -1187,8 +1184,7 @@ def to_netcdf(
compute: Literal[True] = True,
multifile: Literal[False] = False,
invalid_netcdf: bool = False,
) -> None:
...
) -> None: ...


# if compute cannot be evaluated at type check time
Expand All @@ -1206,8 +1202,7 @@ def to_netcdf(
compute: bool = False,
multifile: Literal[False] = False,
invalid_netcdf: bool = False,
) -> Delayed | None:
...
) -> Delayed | None: ...


# if multifile cannot be evaluated at type check time
Expand All @@ -1225,8 +1220,7 @@ def to_netcdf(
compute: bool = False,
multifile: bool = False,
invalid_netcdf: bool = False,
) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None:
...
) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None: ...


# Any
Expand All @@ -1243,8 +1237,7 @@ def to_netcdf(
compute: bool = False,
multifile: bool = False,
invalid_netcdf: bool = False,
) -> tuple[ArrayWriter, AbstractDataStore] | bytes | Delayed | None:
...
) -> tuple[ArrayWriter, AbstractDataStore] | bytes | Delayed | None: ...


def to_netcdf(
Expand Down Expand Up @@ -1499,22 +1492,20 @@ def save_mfdataset(
"save_mfdataset"
)

writers, stores = zip(
*[
to_netcdf(
ds,
path,
mode,
format,
group,
engine,
compute=compute,
multifile=True,
**kwargs,
)
for ds, path, group in zip(datasets, paths, groups)
]
)
writers, stores = zip(*[
to_netcdf(
ds,
path,
mode,
format,
group,
engine,
compute=compute,
multifile=True,
**kwargs,
)
for ds, path, group in zip(datasets, paths, groups)
])

try:
writes = [w.sync(compute=compute) for w in writers]
Expand All @@ -1526,9 +1517,9 @@ def save_mfdataset(
if not compute:
import dask

return dask.delayed(
[dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]
)
return dask.delayed([
dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)
])


def _auto_detect_region(ds_new, ds_orig, dim):
Expand Down Expand Up @@ -1678,8 +1669,7 @@ def to_zarr(
zarr_version: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> backends.ZarrStore:
...
) -> backends.ZarrStore: ...


# compute=False returns dask.Delayed
Expand All @@ -1702,8 +1692,7 @@ def to_zarr(
zarr_version: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> Delayed:
...
) -> Delayed: ...


def to_zarr(
Expand Down
6 changes: 3 additions & 3 deletions xarray/backends/locks.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ class SerializableLock:
The creation of locks is itself not threadsafe.
"""

_locks: ClassVar[
WeakValueDictionary[Hashable, threading.Lock]
] = WeakValueDictionary()
_locks: ClassVar[WeakValueDictionary[Hashable, threading.Lock]] = (
WeakValueDictionary()
)
token: Hashable
lock: threading.Lock

Expand Down
6 changes: 3 additions & 3 deletions xarray/backends/plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,9 @@ def sort_backends(
for be_name in STANDARD_BACKENDS_ORDER:
if backend_entrypoints.get(be_name, None) is not None:
ordered_backends_entrypoints[be_name] = backend_entrypoints.pop(be_name)
ordered_backends_entrypoints.update(
{name: backend_entrypoints[name] for name in sorted(backend_entrypoints)}
)
ordered_backends_entrypoints.update({
name: backend_entrypoints[name] for name in sorted(backend_entrypoints)
})
return ordered_backends_entrypoints


Expand Down
10 changes: 4 additions & 6 deletions xarray/backends/pydap_.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,10 @@ def _fix_attributes(attributes):
elif is_dict_like(attributes[k]):
# Make Hierarchical attributes to a single level with a
# dot-separated key
attributes.update(
{
f"{k}.{k_child}": v_child
for k_child, v_child in attributes.pop(k).items()
}
)
attributes.update({
f"{k}.{k_child}": v_child
for k_child, v_child in attributes.pop(k).items()
})
return attributes


Expand Down
6 changes: 3 additions & 3 deletions xarray/backends/zarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ def __init__(self, zarr_array):
self.shape = self._array.shape

# preserve vlen string object dtype (GH 7328)
if self._array.filters is not None and any(
[filt.codec_id == "vlen-utf8" for filt in self._array.filters]
):
if self._array.filters is not None and any([
filt.codec_id == "vlen-utf8" for filt in self._array.filters
]):
dtype = coding.strings.create_vlen_dtype(str)
else:
dtype = self._array.dtype
Expand Down
6 changes: 3 additions & 3 deletions xarray/coding/cftimeindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -807,9 +807,9 @@ def _parse_array_of_cftime_strings(strings, date_type):
-------
np.array
"""
return np.array(
[_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()]
).reshape(strings.shape)
return np.array([
_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()
]).reshape(strings.shape)


def _contains_datetime_timedeltas(array):
Expand Down
Loading

0 comments on commit 350b0b4

Please sign in to comment.