diff --git a/doc/why-xarray.rst b/doc/why-xarray.rst index d0a6c591b29..25d558d99d5 100644 --- a/doc/why-xarray.rst +++ b/doc/why-xarray.rst @@ -62,9 +62,8 @@ The power of the dataset over a plain dictionary is that, in addition to pulling out arrays by name, it is possible to select or combine data along a dimension across all arrays simultaneously. Like a :py:class:`~pandas.DataFrame`, datasets facilitate array operations with -heterogeneous data -- the difference is that the arrays in a dataset can not -only have different data types, but can also have different numbers of -dimensions. +heterogeneous data -- the difference is that the arrays in a dataset can have +not only different data types, but also different numbers of dimensions. This data model is borrowed from the netCDF_ file format, which also provides xarray with a natural and portable serialization format. NetCDF is very popular diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index a5c42fd368c..f6254b32f4f 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -2163,6 +2163,7 @@ def test_encoding_unlimited_dims(self): @requires_h5netcdf @requires_netCDF4 +@pytest.mark.filterwarnings("ignore:use make_scale(name) instead") class TestH5NetCDFData(NetCDF4Base): engine = "h5netcdf" @@ -2173,16 +2174,25 @@ def create_store(self): @pytest.mark.filterwarnings("ignore:complex dtypes are supported by h5py") @pytest.mark.parametrize( - "invalid_netcdf, warns, num_warns", + "invalid_netcdf, warntype, num_warns", [(None, FutureWarning, 1), (False, FutureWarning, 1), (True, None, 0)], ) - def test_complex(self, invalid_netcdf, warns, num_warns): + def test_complex(self, invalid_netcdf, warntype, num_warns): expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) save_kwargs = {"invalid_netcdf": invalid_netcdf} - with pytest.warns(warns) as record: + with pytest.warns(warntype) as record: with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_equal(expected, actual) - assert len(record) == num_warns + + recorded_num_warns = 0 + if warntype: + for warning in record: + if issubclass(warning.category, warntype) and ( + "complex dtypes" in str(warning.message) + ): + recorded_num_warns += 1 + + assert recorded_num_warns == num_warns def test_cross_engine_read_write_netcdf4(self): # Drop dim3, because its labels include strings. These appear to be @@ -2451,6 +2461,7 @@ def skip_if_not_engine(engine): @requires_dask +@pytest.mark.filterwarnings("ignore:use make_scale(name) instead") def test_open_mfdataset_manyfiles( readengine, nfiles, parallel, chunks, file_cache_maxsize ):