From 8469959505681eca1388dfeadc66e2a1fe3a190c Mon Sep 17 00:00:00 2001 From: Abdulaziz Aloqeely <52792999+Aloqeely@users.noreply.github.com> Date: Tue, 19 Mar 2024 02:54:22 +0300 Subject: [PATCH 01/23] DOC: Remove doc of deprecated week and weekofyear (#57901) Remove doc of deprecated week and weekofyear Co-authored-by: Abdulaziz Aloqeely <52792999+DAzVise@users.noreply.github.com> --- doc/source/user_guide/timeseries.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 0f38d90e18616..ecdfb3c565d33 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -797,8 +797,6 @@ There are several time/date properties that one can access from ``Timestamp`` or timetz,"Returns datetime.time as local time with timezone information" dayofyear,"The ordinal day of year" day_of_year,"The ordinal day of year" - weekofyear,"The week ordinal of the year" - week,"The week ordinal of the year" dayofweek,"The number of the day of the week with Monday=0, Sunday=6" day_of_week,"The number of the day of the week with Monday=0, Sunday=6" weekday,"The number of the day of the week with Monday=0, Sunday=6" @@ -812,6 +810,10 @@ There are several time/date properties that one can access from ``Timestamp`` or is_year_end,"Logical indicating if last day of year (defined by frequency)" is_leap_year,"Logical indicating if the date belongs to a leap year" +.. note:: + + You can use ``DatetimeIndex.isocalendar().week`` to access week of year date information. + Furthermore, if you have a ``Series`` with datetimelike values, then you can access these properties via the ``.dt`` accessor, as detailed in the section on :ref:`.dt accessors`. From 13997e62104809f708c6ce4e01d337c87a66e514 Mon Sep 17 00:00:00 2001 From: Yuki Kitayama <47092819+yukikitayama@users.noreply.github.com> Date: Mon, 18 Mar 2024 16:54:46 -0700 Subject: [PATCH 02/23] DOC: update link in benchmarks.md (#57903) --- web/pandas/community/benchmarks.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/pandas/community/benchmarks.md b/web/pandas/community/benchmarks.md index ffce00be96bca..1e63832a5a2ba 100644 --- a/web/pandas/community/benchmarks.md +++ b/web/pandas/community/benchmarks.md @@ -75,5 +75,5 @@ There is a quick summary here: The main benchmarks comparing dataframe tools that include pandas are: -- [H2O.ai benchmarks](https://h2oai.github.io/db-benchmark/) +- [DuckDB (former H2O.ai) benchmarks](https://duckdblabs.github.io/db-benchmark/) - [TPCH benchmarks](https://pola.rs/posts/benchmarks/) From 594466a3601fccf7f815d460b6e0a3c8515f22f7 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Mon, 18 Mar 2024 20:14:21 -0400 Subject: [PATCH 03/23] Revert "Fix issue with Tempita recompilation (#57796)" (#57905) This reverts commit 97c31a60f06a2a13db28b769bd3c4d396ddd3df6. --- pandas/_libs/meson.build | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build index 7621915ebcfdb..c27386743c6e9 100644 --- a/pandas/_libs/meson.build +++ b/pandas/_libs/meson.build @@ -54,37 +54,25 @@ _intervaltree_helper = custom_target('intervaltree_helper_pxi', py, tempita, '@INPUT@', '-o', '@OUTDIR@' ] ) - -_algos_pxi_dep = declare_dependency(sources: [_algos_take_helper, _algos_common_helper]) -_khash_pxi_dep = declare_dependency(sources: _khash_primitive_helper) -_hashtable_pxi_dep = declare_dependency( - sources: [_hashtable_class_helper, _hashtable_func_helper] -) -_index_pxi_dep = declare_dependency(sources: _index_class_helper) -_intervaltree_pxi_dep = declare_dependency(sources: _intervaltree_helper) -_sparse_pxi_dep = declare_dependency(sources: _sparse_op_helper) - +_khash_primitive_helper_dep = declare_dependency(sources: _khash_primitive_helper) subdir('tslibs') libs_sources = { # Dict of extension name -> dict of {sources, include_dirs, and deps} # numpy include dir is implicitly included - 'algos': {'sources': ['algos.pyx'], - 'deps': [_khash_pxi_dep, _algos_pxi_dep]}, + 'algos': {'sources': ['algos.pyx', _algos_common_helper, _algos_take_helper], 'deps': _khash_primitive_helper_dep}, 'arrays': {'sources': ['arrays.pyx']}, 'groupby': {'sources': ['groupby.pyx']}, 'hashing': {'sources': ['hashing.pyx']}, - 'hashtable': {'sources': ['hashtable.pyx'], - 'deps': [_khash_pxi_dep, _hashtable_pxi_dep]}, - 'index': {'sources': ['index.pyx'], - 'deps': [_khash_pxi_dep, _index_pxi_dep]}, + 'hashtable': {'sources': ['hashtable.pyx', _hashtable_class_helper, _hashtable_func_helper], 'deps': _khash_primitive_helper_dep}, + 'index': {'sources': ['index.pyx', _index_class_helper], 'deps': _khash_primitive_helper_dep}, 'indexing': {'sources': ['indexing.pyx']}, 'internals': {'sources': ['internals.pyx']}, - 'interval': {'sources': ['interval.pyx'], - 'deps': [_khash_pxi_dep, _intervaltree_pxi_dep]}, - 'join': {'sources': ['join.pyx'], - 'deps': [_khash_pxi_dep]}, + 'interval': {'sources': ['interval.pyx', _intervaltree_helper], + 'deps': _khash_primitive_helper_dep}, + 'join': {'sources': ['join.pyx', _khash_primitive_helper], + 'deps': _khash_primitive_helper_dep}, 'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c']}, 'missing': {'sources': ['missing.pyx']}, 'pandas_datetime': {'sources': ['src/vendored/numpy/datetime/np_datetime.c', @@ -95,7 +83,7 @@ libs_sources = { 'src/parser/io.c', 'src/parser/pd_parser.c']}, 'parsers': {'sources': ['parsers.pyx', 'src/parser/tokenizer.c', 'src/parser/io.c'], - 'deps': [_khash_pxi_dep]}, + 'deps': _khash_primitive_helper_dep}, 'json': {'sources': ['src/vendored/ujson/python/ujson.c', 'src/vendored/ujson/python/objToJSON.c', 'src/vendored/ujson/python/JSONtoObj.c', @@ -107,8 +95,7 @@ libs_sources = { 'reshape': {'sources': ['reshape.pyx']}, 'sas': {'sources': ['sas.pyx']}, 'byteswap': {'sources': ['byteswap.pyx']}, - 'sparse': {'sources': ['sparse.pyx'], - 'deps': [_sparse_pxi_dep]}, + 'sparse': {'sources': ['sparse.pyx', _sparse_op_helper]}, 'tslib': {'sources': ['tslib.pyx']}, 'testing': {'sources': ['testing.pyx']}, 'writers': {'sources': ['writers.pyx']} From b3b70a936fb9d2cce261afc1555d6570d2e010e5 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Mon, 18 Mar 2024 20:15:20 -0400 Subject: [PATCH 04/23] Allow Dockerfile to use local requirements.txt (#57904) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index c697f0c1c66c7..03f76f39b8cc7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,6 @@ RUN apt-get install -y build-essential RUN apt-get install -y libhdf5-dev libgles2-mesa-dev RUN python -m pip install --upgrade pip -RUN python -m pip install \ - -r https://raw.githubusercontent.com/pandas-dev/pandas/main/requirements-dev.txt +COPY requirements-dev.txt /tmp +RUN python -m pip install -r /tmp/requirements-dev.txt CMD ["/bin/bash"] From 37b9303bf5adf79aea0ced8fb74de9670377dfd1 Mon Sep 17 00:00:00 2001 From: Marc Garcia Date: Tue, 19 Mar 2024 02:05:39 +0100 Subject: [PATCH 05/23] CI: Better error control in the validation of docstrings (#57879) * CI: Better error control in the validation of docstrings * Fix CI errors * Fixing tests * Update scripts/validate_docstrings.py Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- ci/code_checks.sh | 2441 ++++++++++----------- scripts/tests/test_validate_docstrings.py | 41 +- scripts/validate_docstrings.py | 108 +- 3 files changed, 1284 insertions(+), 1306 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 4b8e632f3246c..3c46cb39eeb7e 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -65,1236 +65,1217 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - PARAMETERS=(\ - --format=actions\ - --errors=EX01,EX03,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL08,GL09,GL10,PD01,PR01,PR02,PR03,PR04,PR05,PR06,PR07,PR08,PR09,PR10,RT01,RT02,RT03,RT04,RT05,SA01,SA02,SA03,SA04,SA05,SS01,SS02,SS03,SS04,SS05,SS06\ - --ignore_errors pandas.Categorical.__array__ SA01\ - --ignore_errors pandas.Categorical.codes SA01\ - --ignore_errors pandas.Categorical.dtype SA01\ - --ignore_errors pandas.Categorical.from_codes SA01\ - --ignore_errors pandas.Categorical.ordered SA01\ - --ignore_errors pandas.CategoricalDtype.categories SA01\ - --ignore_errors pandas.CategoricalDtype.ordered SA01\ - --ignore_errors pandas.CategoricalIndex.codes SA01\ - --ignore_errors pandas.CategoricalIndex.ordered SA01\ - --ignore_errors pandas.DataFrame.__dataframe__ SA01\ - --ignore_errors pandas.DataFrame.__iter__ SA01\ - --ignore_errors pandas.DataFrame.assign SA01\ - --ignore_errors pandas.DataFrame.at_time PR01\ - --ignore_errors pandas.DataFrame.axes SA01\ - --ignore_errors pandas.DataFrame.backfill PR01,SA01\ - --ignore_errors pandas.DataFrame.bfill SA01\ - --ignore_errors pandas.DataFrame.columns SA01\ - --ignore_errors pandas.DataFrame.copy SA01\ - --ignore_errors pandas.DataFrame.droplevel SA01\ - --ignore_errors pandas.DataFrame.dtypes SA01\ - --ignore_errors pandas.DataFrame.ffill SA01\ - --ignore_errors pandas.DataFrame.first_valid_index SA01\ - --ignore_errors pandas.DataFrame.get PR01,SA01\ - --ignore_errors pandas.DataFrame.hist RT03\ - --ignore_errors pandas.DataFrame.infer_objects RT03\ - --ignore_errors pandas.DataFrame.keys SA01\ - --ignore_errors pandas.DataFrame.kurt RT03,SA01\ - --ignore_errors pandas.DataFrame.kurtosis RT03,SA01\ - --ignore_errors pandas.DataFrame.last_valid_index SA01\ - --ignore_errors pandas.DataFrame.mask RT03\ - --ignore_errors pandas.DataFrame.max RT03\ - --ignore_errors pandas.DataFrame.mean RT03,SA01\ - --ignore_errors pandas.DataFrame.median RT03,SA01\ - --ignore_errors pandas.DataFrame.min RT03\ - --ignore_errors pandas.DataFrame.pad PR01,SA01\ - --ignore_errors pandas.DataFrame.plot PR02,SA01\ - --ignore_errors pandas.DataFrame.pop SA01\ - --ignore_errors pandas.DataFrame.prod RT03\ - --ignore_errors pandas.DataFrame.product RT03\ - --ignore_errors pandas.DataFrame.reorder_levels SA01\ - --ignore_errors pandas.DataFrame.sem PR01,RT03,SA01\ - --ignore_errors pandas.DataFrame.skew RT03,SA01\ - --ignore_errors pandas.DataFrame.sparse PR01,SA01\ - --ignore_errors pandas.DataFrame.sparse.density SA01\ - --ignore_errors pandas.DataFrame.sparse.from_spmatrix SA01\ - --ignore_errors pandas.DataFrame.sparse.to_coo SA01\ - --ignore_errors pandas.DataFrame.sparse.to_dense SA01\ - --ignore_errors pandas.DataFrame.std PR01,RT03,SA01\ - --ignore_errors pandas.DataFrame.sum RT03\ - --ignore_errors pandas.DataFrame.swapaxes PR01,SA01\ - --ignore_errors pandas.DataFrame.swaplevel SA01\ - --ignore_errors pandas.DataFrame.to_feather SA01\ - --ignore_errors pandas.DataFrame.to_markdown SA01\ - --ignore_errors pandas.DataFrame.to_parquet RT03\ - --ignore_errors pandas.DataFrame.to_period SA01\ - --ignore_errors pandas.DataFrame.to_timestamp SA01\ - --ignore_errors pandas.DataFrame.tz_convert SA01\ - --ignore_errors pandas.DataFrame.tz_localize SA01\ - --ignore_errors pandas.DataFrame.unstack RT03\ - --ignore_errors pandas.DataFrame.value_counts RT03\ - --ignore_errors pandas.DataFrame.var PR01,RT03,SA01\ - --ignore_errors pandas.DataFrame.where RT03\ - --ignore_errors pandas.DatetimeIndex.ceil SA01\ - --ignore_errors pandas.DatetimeIndex.date SA01\ - --ignore_errors pandas.DatetimeIndex.day SA01\ - --ignore_errors pandas.DatetimeIndex.day_name SA01\ - --ignore_errors pandas.DatetimeIndex.day_of_year SA01\ - --ignore_errors pandas.DatetimeIndex.dayofyear SA01\ - --ignore_errors pandas.DatetimeIndex.floor SA01\ - --ignore_errors pandas.DatetimeIndex.freqstr SA01\ - --ignore_errors pandas.DatetimeIndex.hour SA01\ - --ignore_errors pandas.DatetimeIndex.indexer_at_time PR01,RT03\ - --ignore_errors pandas.DatetimeIndex.indexer_between_time RT03\ - --ignore_errors pandas.DatetimeIndex.inferred_freq SA01\ - --ignore_errors pandas.DatetimeIndex.is_leap_year SA01\ - --ignore_errors pandas.DatetimeIndex.microsecond SA01\ - --ignore_errors pandas.DatetimeIndex.minute SA01\ - --ignore_errors pandas.DatetimeIndex.month SA01\ - --ignore_errors pandas.DatetimeIndex.month_name SA01\ - --ignore_errors pandas.DatetimeIndex.nanosecond SA01\ - --ignore_errors pandas.DatetimeIndex.quarter SA01\ - --ignore_errors pandas.DatetimeIndex.round SA01\ - --ignore_errors pandas.DatetimeIndex.second SA01\ - --ignore_errors pandas.DatetimeIndex.snap PR01,RT03,SA01\ - --ignore_errors pandas.DatetimeIndex.std PR01,RT03\ - --ignore_errors pandas.DatetimeIndex.time SA01\ - --ignore_errors pandas.DatetimeIndex.timetz SA01\ - --ignore_errors pandas.DatetimeIndex.to_period RT03\ - --ignore_errors pandas.DatetimeIndex.to_pydatetime RT03,SA01\ - --ignore_errors pandas.DatetimeIndex.tz SA01\ - --ignore_errors pandas.DatetimeIndex.tz_convert RT03\ - --ignore_errors pandas.DatetimeIndex.year SA01\ - --ignore_errors pandas.DatetimeTZDtype SA01\ - --ignore_errors pandas.DatetimeTZDtype.tz SA01\ - --ignore_errors pandas.DatetimeTZDtype.unit SA01\ - --ignore_errors pandas.ExcelFile PR01,SA01\ - --ignore_errors pandas.ExcelFile.parse PR01,SA01\ - --ignore_errors pandas.ExcelWriter SA01\ - --ignore_errors pandas.Flags SA01\ - --ignore_errors pandas.Float32Dtype SA01\ - --ignore_errors pandas.Float64Dtype SA01\ - --ignore_errors pandas.Grouper PR02,SA01\ - --ignore_errors pandas.HDFStore.append PR01,SA01\ - --ignore_errors pandas.HDFStore.get SA01\ - --ignore_errors pandas.HDFStore.groups SA01\ - --ignore_errors pandas.HDFStore.info RT03,SA01\ - --ignore_errors pandas.HDFStore.keys SA01\ - --ignore_errors pandas.HDFStore.put PR01,SA01\ - --ignore_errors pandas.HDFStore.select SA01\ - --ignore_errors pandas.HDFStore.walk SA01\ - --ignore_errors pandas.Index PR07\ - --ignore_errors pandas.Index.T SA01\ - --ignore_errors pandas.Index.append PR07,RT03,SA01\ - --ignore_errors pandas.Index.astype SA01\ - --ignore_errors pandas.Index.copy PR07,SA01\ - --ignore_errors pandas.Index.difference PR07,RT03,SA01\ - --ignore_errors pandas.Index.drop PR07,SA01\ - --ignore_errors pandas.Index.drop_duplicates RT03\ - --ignore_errors pandas.Index.droplevel RT03,SA01\ - --ignore_errors pandas.Index.dropna RT03,SA01\ - --ignore_errors pandas.Index.dtype SA01\ - --ignore_errors pandas.Index.duplicated RT03\ - --ignore_errors pandas.Index.empty GL08\ - --ignore_errors pandas.Index.equals SA01\ - --ignore_errors pandas.Index.fillna RT03\ - --ignore_errors pandas.Index.get_indexer PR07,SA01\ - --ignore_errors pandas.Index.get_indexer_for PR01,SA01\ - --ignore_errors pandas.Index.get_indexer_non_unique PR07,SA01\ - --ignore_errors pandas.Index.get_loc PR07,RT03,SA01\ - --ignore_errors pandas.Index.get_slice_bound PR07\ - --ignore_errors pandas.Index.hasnans SA01\ - --ignore_errors pandas.Index.identical PR01,SA01\ - --ignore_errors pandas.Index.inferred_type SA01\ - --ignore_errors pandas.Index.insert PR07,RT03,SA01\ - --ignore_errors pandas.Index.intersection PR07,RT03,SA01\ - --ignore_errors pandas.Index.item SA01\ - --ignore_errors pandas.Index.join PR07,RT03,SA01\ - --ignore_errors pandas.Index.map SA01\ - --ignore_errors pandas.Index.memory_usage RT03\ - --ignore_errors pandas.Index.name SA01\ - --ignore_errors pandas.Index.names GL08\ - --ignore_errors pandas.Index.nbytes SA01\ - --ignore_errors pandas.Index.ndim SA01\ - --ignore_errors pandas.Index.nunique RT03\ - --ignore_errors pandas.Index.putmask PR01,RT03\ - --ignore_errors pandas.Index.ravel PR01,RT03\ - --ignore_errors pandas.Index.reindex PR07\ - --ignore_errors pandas.Index.shape SA01\ - --ignore_errors pandas.Index.size SA01\ - --ignore_errors pandas.Index.slice_indexer PR07,RT03,SA01\ - --ignore_errors pandas.Index.slice_locs RT03\ - --ignore_errors pandas.Index.str PR01,SA01\ - --ignore_errors pandas.Index.symmetric_difference PR07,RT03,SA01\ - --ignore_errors pandas.Index.take PR01,PR07\ - --ignore_errors pandas.Index.to_list RT03\ - --ignore_errors pandas.Index.union PR07,RT03,SA01\ - --ignore_errors pandas.Index.unique RT03\ - --ignore_errors pandas.Index.value_counts RT03\ - --ignore_errors pandas.Index.view GL08\ - --ignore_errors pandas.Int16Dtype SA01\ - --ignore_errors pandas.Int32Dtype SA01\ - --ignore_errors pandas.Int64Dtype SA01\ - --ignore_errors pandas.Int8Dtype SA01\ - --ignore_errors pandas.Interval PR02\ - --ignore_errors pandas.Interval.closed SA01\ - --ignore_errors pandas.Interval.left SA01\ - --ignore_errors pandas.Interval.mid SA01\ - --ignore_errors pandas.Interval.right SA01\ - --ignore_errors pandas.IntervalDtype PR01,SA01\ - --ignore_errors pandas.IntervalDtype.subtype SA01\ - --ignore_errors pandas.IntervalIndex.closed SA01\ - --ignore_errors pandas.IntervalIndex.contains RT03\ - --ignore_errors pandas.IntervalIndex.get_indexer PR07,SA01\ - --ignore_errors pandas.IntervalIndex.get_loc PR07,RT03,SA01\ - --ignore_errors pandas.IntervalIndex.is_non_overlapping_monotonic SA01\ - --ignore_errors pandas.IntervalIndex.left GL08\ - --ignore_errors pandas.IntervalIndex.length GL08\ - --ignore_errors pandas.IntervalIndex.mid GL08\ - --ignore_errors pandas.IntervalIndex.right GL08\ - --ignore_errors pandas.IntervalIndex.set_closed RT03,SA01\ - --ignore_errors pandas.IntervalIndex.to_tuples RT03,SA01\ - --ignore_errors pandas.MultiIndex PR01\ - --ignore_errors pandas.MultiIndex.append PR07,SA01\ - --ignore_errors pandas.MultiIndex.copy PR07,RT03,SA01\ - --ignore_errors pandas.MultiIndex.drop PR07,RT03,SA01\ - --ignore_errors pandas.MultiIndex.droplevel RT03,SA01\ - --ignore_errors pandas.MultiIndex.dtypes SA01\ - --ignore_errors pandas.MultiIndex.get_indexer PR07,SA01\ - --ignore_errors pandas.MultiIndex.get_level_values SA01\ - --ignore_errors pandas.MultiIndex.get_loc PR07\ - --ignore_errors pandas.MultiIndex.get_loc_level PR07\ - --ignore_errors pandas.MultiIndex.levels SA01\ - --ignore_errors pandas.MultiIndex.levshape SA01\ - --ignore_errors pandas.MultiIndex.names SA01\ - --ignore_errors pandas.MultiIndex.nlevels SA01\ - --ignore_errors pandas.MultiIndex.remove_unused_levels RT03,SA01\ - --ignore_errors pandas.MultiIndex.reorder_levels RT03,SA01\ - --ignore_errors pandas.MultiIndex.set_codes SA01\ - --ignore_errors pandas.MultiIndex.set_levels RT03,SA01\ - --ignore_errors pandas.MultiIndex.sortlevel PR07,SA01\ - --ignore_errors pandas.MultiIndex.to_frame RT03\ - --ignore_errors pandas.MultiIndex.truncate SA01\ - --ignore_errors pandas.NA SA01\ - --ignore_errors pandas.NaT SA01\ - --ignore_errors pandas.NamedAgg SA01\ - --ignore_errors pandas.Period SA01\ - --ignore_errors pandas.Period.asfreq SA01\ - --ignore_errors pandas.Period.freq GL08\ - --ignore_errors pandas.Period.freqstr SA01\ - --ignore_errors pandas.Period.is_leap_year SA01\ - --ignore_errors pandas.Period.month SA01\ - --ignore_errors pandas.Period.now SA01\ - --ignore_errors pandas.Period.ordinal GL08\ - --ignore_errors pandas.Period.quarter SA01\ - --ignore_errors pandas.Period.strftime PR01,SA01\ - --ignore_errors pandas.Period.to_timestamp SA01\ - --ignore_errors pandas.Period.year SA01\ - --ignore_errors pandas.PeriodDtype SA01\ - --ignore_errors pandas.PeriodDtype.freq SA01\ - --ignore_errors pandas.PeriodIndex.day SA01\ - --ignore_errors pandas.PeriodIndex.day_of_week SA01\ - --ignore_errors pandas.PeriodIndex.day_of_year SA01\ - --ignore_errors pandas.PeriodIndex.dayofweek SA01\ - --ignore_errors pandas.PeriodIndex.dayofyear SA01\ - --ignore_errors pandas.PeriodIndex.days_in_month SA01\ - --ignore_errors pandas.PeriodIndex.daysinmonth SA01\ - --ignore_errors pandas.PeriodIndex.freq GL08\ - --ignore_errors pandas.PeriodIndex.freqstr SA01\ - --ignore_errors pandas.PeriodIndex.from_fields PR07,SA01\ - --ignore_errors pandas.PeriodIndex.from_ordinals SA01\ - --ignore_errors pandas.PeriodIndex.hour SA01\ - --ignore_errors pandas.PeriodIndex.is_leap_year SA01\ - --ignore_errors pandas.PeriodIndex.minute SA01\ - --ignore_errors pandas.PeriodIndex.month SA01\ - --ignore_errors pandas.PeriodIndex.quarter SA01\ - --ignore_errors pandas.PeriodIndex.qyear GL08\ - --ignore_errors pandas.PeriodIndex.second SA01\ - --ignore_errors pandas.PeriodIndex.to_timestamp RT03,SA01\ - --ignore_errors pandas.PeriodIndex.week SA01\ - --ignore_errors pandas.PeriodIndex.weekday SA01\ - --ignore_errors pandas.PeriodIndex.weekofyear SA01\ - --ignore_errors pandas.PeriodIndex.year SA01\ - --ignore_errors pandas.RangeIndex PR07\ - --ignore_errors pandas.RangeIndex.from_range PR01,SA01\ - --ignore_errors pandas.RangeIndex.start SA01\ - --ignore_errors pandas.RangeIndex.step SA01\ - --ignore_errors pandas.RangeIndex.stop SA01\ - --ignore_errors pandas.Series SA01\ - --ignore_errors pandas.Series.T SA01\ - --ignore_errors pandas.Series.__iter__ RT03,SA01\ - --ignore_errors pandas.Series.add PR07\ - --ignore_errors pandas.Series.align PR07,SA01\ - --ignore_errors pandas.Series.astype RT03\ - --ignore_errors pandas.Series.at_time PR01,RT03\ - --ignore_errors pandas.Series.backfill PR01,SA01\ - --ignore_errors pandas.Series.bfill SA01\ - --ignore_errors pandas.Series.case_when RT03\ - --ignore_errors pandas.Series.cat PR07,SA01\ - --ignore_errors pandas.Series.cat.add_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.as_ordered PR01\ - --ignore_errors pandas.Series.cat.as_unordered PR01\ - --ignore_errors pandas.Series.cat.codes SA01\ - --ignore_errors pandas.Series.cat.ordered SA01\ - --ignore_errors pandas.Series.cat.remove_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.remove_unused_categories PR01\ - --ignore_errors pandas.Series.cat.rename_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.reorder_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.set_categories PR01,PR02,RT03\ - --ignore_errors pandas.Series.copy SA01\ - --ignore_errors pandas.Series.div PR07\ - --ignore_errors pandas.Series.droplevel SA01\ - --ignore_errors pandas.Series.dt PR01`# Accessors are implemented as classes, but we do not document the Parameters section` \ - --ignore_errors pandas.Series.dt.as_unit GL08,PR01,PR02\ - --ignore_errors pandas.Series.dt.ceil PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.components SA01\ - --ignore_errors pandas.Series.dt.date SA01\ - --ignore_errors pandas.Series.dt.day SA01\ - --ignore_errors pandas.Series.dt.day_name PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.day_of_year SA01\ - --ignore_errors pandas.Series.dt.dayofyear SA01\ - --ignore_errors pandas.Series.dt.days SA01\ - --ignore_errors pandas.Series.dt.days_in_month SA01\ - --ignore_errors pandas.Series.dt.daysinmonth SA01\ - --ignore_errors pandas.Series.dt.floor PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.freq GL08\ - --ignore_errors pandas.Series.dt.hour SA01\ - --ignore_errors pandas.Series.dt.is_leap_year SA01\ - --ignore_errors pandas.Series.dt.microsecond SA01\ - --ignore_errors pandas.Series.dt.microseconds SA01\ - --ignore_errors pandas.Series.dt.minute SA01\ - --ignore_errors pandas.Series.dt.month SA01\ - --ignore_errors pandas.Series.dt.month_name PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.nanosecond SA01\ - --ignore_errors pandas.Series.dt.nanoseconds SA01\ - --ignore_errors pandas.Series.dt.normalize PR01\ - --ignore_errors pandas.Series.dt.quarter SA01\ - --ignore_errors pandas.Series.dt.qyear GL08\ - --ignore_errors pandas.Series.dt.round PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.second SA01\ - --ignore_errors pandas.Series.dt.seconds SA01\ - --ignore_errors pandas.Series.dt.strftime PR01,PR02\ - --ignore_errors pandas.Series.dt.time SA01\ - --ignore_errors pandas.Series.dt.timetz SA01\ - --ignore_errors pandas.Series.dt.to_period PR01,PR02,RT03\ - --ignore_errors pandas.Series.dt.total_seconds PR01\ - --ignore_errors pandas.Series.dt.tz SA01\ - --ignore_errors pandas.Series.dt.tz_convert PR01,PR02,RT03\ - --ignore_errors pandas.Series.dt.tz_localize PR01,PR02\ - --ignore_errors pandas.Series.dt.unit GL08\ - --ignore_errors pandas.Series.dt.year SA01\ - --ignore_errors pandas.Series.dtype SA01\ - --ignore_errors pandas.Series.dtypes SA01\ - --ignore_errors pandas.Series.empty GL08\ - --ignore_errors pandas.Series.eq PR07,SA01\ - --ignore_errors pandas.Series.ewm RT03\ - --ignore_errors pandas.Series.expanding RT03\ - --ignore_errors pandas.Series.ffill SA01\ - --ignore_errors pandas.Series.filter RT03\ - --ignore_errors pandas.Series.first_valid_index RT03,SA01\ - --ignore_errors pandas.Series.floordiv PR07\ - --ignore_errors pandas.Series.ge PR07,SA01\ - --ignore_errors pandas.Series.get PR01,PR07,RT03,SA01\ - --ignore_errors pandas.Series.gt PR07,SA01\ - --ignore_errors pandas.Series.hasnans SA01\ - --ignore_errors pandas.Series.infer_objects RT03\ - --ignore_errors pandas.Series.is_monotonic_decreasing SA01\ - --ignore_errors pandas.Series.is_monotonic_increasing SA01\ - --ignore_errors pandas.Series.is_unique SA01\ - --ignore_errors pandas.Series.item SA01\ - --ignore_errors pandas.Series.keys SA01\ - --ignore_errors pandas.Series.kurt RT03,SA01\ - --ignore_errors pandas.Series.kurtosis RT03,SA01\ - --ignore_errors pandas.Series.last_valid_index RT03,SA01\ - --ignore_errors pandas.Series.le PR07,SA01\ - --ignore_errors pandas.Series.list.__getitem__ SA01\ - --ignore_errors pandas.Series.list.flatten SA01\ - --ignore_errors pandas.Series.list.len SA01\ - --ignore_errors pandas.Series.lt PR07,SA01\ - --ignore_errors pandas.Series.mask RT03\ - --ignore_errors pandas.Series.max RT03\ - --ignore_errors pandas.Series.mean RT03,SA01\ - --ignore_errors pandas.Series.median RT03,SA01\ - --ignore_errors pandas.Series.min RT03\ - --ignore_errors pandas.Series.mod PR07\ - --ignore_errors pandas.Series.mode SA01\ - --ignore_errors pandas.Series.mul PR07\ - --ignore_errors pandas.Series.nbytes SA01\ - --ignore_errors pandas.Series.ndim SA01\ - --ignore_errors pandas.Series.ne PR07,SA01\ - --ignore_errors pandas.Series.nunique RT03\ - --ignore_errors pandas.Series.pad PR01,SA01\ - --ignore_errors pandas.Series.pipe RT03\ - --ignore_errors pandas.Series.plot PR02,SA01\ - --ignore_errors pandas.Series.plot.box RT03\ - --ignore_errors pandas.Series.plot.density RT03\ - --ignore_errors pandas.Series.plot.kde RT03\ - --ignore_errors pandas.Series.pop RT03,SA01\ - --ignore_errors pandas.Series.pow PR07\ - --ignore_errors pandas.Series.prod RT03\ - --ignore_errors pandas.Series.product RT03\ - --ignore_errors pandas.Series.radd PR07\ - --ignore_errors pandas.Series.rdiv PR07\ - --ignore_errors pandas.Series.reindex RT03\ - --ignore_errors pandas.Series.reorder_levels RT03,SA01\ - --ignore_errors pandas.Series.rfloordiv PR07\ - --ignore_errors pandas.Series.rmod PR07\ - --ignore_errors pandas.Series.rmul PR07\ - --ignore_errors pandas.Series.rolling PR07\ - --ignore_errors pandas.Series.rpow PR07\ - --ignore_errors pandas.Series.rsub PR07\ - --ignore_errors pandas.Series.rtruediv PR07\ - --ignore_errors pandas.Series.sem PR01,RT03,SA01\ - --ignore_errors pandas.Series.shape SA01\ - --ignore_errors pandas.Series.size SA01\ - --ignore_errors pandas.Series.skew RT03,SA01\ - --ignore_errors pandas.Series.sparse PR01,SA01\ - --ignore_errors pandas.Series.sparse.density SA01\ - --ignore_errors pandas.Series.sparse.fill_value SA01\ - --ignore_errors pandas.Series.sparse.from_coo PR07,SA01\ - --ignore_errors pandas.Series.sparse.npoints SA01\ - --ignore_errors pandas.Series.sparse.sp_values SA01\ - --ignore_errors pandas.Series.sparse.to_coo PR07,RT03,SA01\ - --ignore_errors pandas.Series.std PR01,RT03,SA01\ - --ignore_errors pandas.Series.str PR01,SA01\ - --ignore_errors pandas.Series.str.capitalize RT03\ - --ignore_errors pandas.Series.str.casefold RT03\ - --ignore_errors pandas.Series.str.center RT03,SA01\ - --ignore_errors pandas.Series.str.decode PR07,RT03,SA01\ - --ignore_errors pandas.Series.str.encode PR07,RT03,SA01\ - --ignore_errors pandas.Series.str.find RT03\ - --ignore_errors pandas.Series.str.fullmatch RT03\ - --ignore_errors pandas.Series.str.get RT03,SA01\ - --ignore_errors pandas.Series.str.index RT03\ - --ignore_errors pandas.Series.str.ljust RT03,SA01\ - --ignore_errors pandas.Series.str.lower RT03\ - --ignore_errors pandas.Series.str.lstrip RT03\ - --ignore_errors pandas.Series.str.match RT03\ - --ignore_errors pandas.Series.str.normalize RT03,SA01\ - --ignore_errors pandas.Series.str.partition RT03\ - --ignore_errors pandas.Series.str.repeat SA01\ - --ignore_errors pandas.Series.str.replace SA01\ - --ignore_errors pandas.Series.str.rfind RT03\ - --ignore_errors pandas.Series.str.rindex RT03\ - --ignore_errors pandas.Series.str.rjust RT03,SA01\ - --ignore_errors pandas.Series.str.rpartition RT03\ - --ignore_errors pandas.Series.str.rstrip RT03\ - --ignore_errors pandas.Series.str.strip RT03\ - --ignore_errors pandas.Series.str.swapcase RT03\ - --ignore_errors pandas.Series.str.title RT03\ - --ignore_errors pandas.Series.str.translate RT03,SA01\ - --ignore_errors pandas.Series.str.upper RT03\ - --ignore_errors pandas.Series.str.wrap PR01,RT03,SA01\ - --ignore_errors pandas.Series.str.zfill RT03\ - --ignore_errors pandas.Series.struct.dtypes SA01\ - --ignore_errors pandas.Series.sub PR07\ - --ignore_errors pandas.Series.sum RT03\ - --ignore_errors pandas.Series.swaplevel SA01\ - --ignore_errors pandas.Series.to_dict SA01\ - --ignore_errors pandas.Series.to_frame SA01\ - --ignore_errors pandas.Series.to_hdf PR07\ - --ignore_errors pandas.Series.to_list RT03\ - --ignore_errors pandas.Series.to_markdown SA01\ - --ignore_errors pandas.Series.to_numpy RT03\ - --ignore_errors pandas.Series.to_period SA01\ - --ignore_errors pandas.Series.to_string SA01\ - --ignore_errors pandas.Series.to_timestamp RT03,SA01\ - --ignore_errors pandas.Series.truediv PR07\ - --ignore_errors pandas.Series.tz_convert SA01\ - --ignore_errors pandas.Series.tz_localize SA01\ - --ignore_errors pandas.Series.unstack SA01\ - --ignore_errors pandas.Series.update PR07,SA01\ - --ignore_errors pandas.Series.value_counts RT03\ - --ignore_errors pandas.Series.var PR01,RT03,SA01\ - --ignore_errors pandas.Series.where RT03\ - --ignore_errors pandas.SparseDtype SA01\ - --ignore_errors pandas.Timedelta PR07,SA01\ - --ignore_errors pandas.Timedelta.as_unit SA01\ - --ignore_errors pandas.Timedelta.asm8 SA01\ - --ignore_errors pandas.Timedelta.ceil SA01\ - --ignore_errors pandas.Timedelta.components SA01\ - --ignore_errors pandas.Timedelta.days SA01\ - --ignore_errors pandas.Timedelta.floor SA01\ - --ignore_errors pandas.Timedelta.max PR02,PR07,SA01\ - --ignore_errors pandas.Timedelta.min PR02,PR07,SA01\ - --ignore_errors pandas.Timedelta.resolution PR02,PR07,SA01\ - --ignore_errors pandas.Timedelta.round SA01\ - --ignore_errors pandas.Timedelta.to_numpy PR01\ - --ignore_errors pandas.Timedelta.to_timedelta64 SA01\ - --ignore_errors pandas.Timedelta.total_seconds SA01\ - --ignore_errors pandas.Timedelta.view SA01\ - --ignore_errors pandas.TimedeltaIndex PR01\ - --ignore_errors pandas.TimedeltaIndex.as_unit RT03,SA01\ - --ignore_errors pandas.TimedeltaIndex.ceil SA01\ - --ignore_errors pandas.TimedeltaIndex.components SA01\ - --ignore_errors pandas.TimedeltaIndex.days SA01\ - --ignore_errors pandas.TimedeltaIndex.floor SA01\ - --ignore_errors pandas.TimedeltaIndex.inferred_freq SA01\ - --ignore_errors pandas.TimedeltaIndex.mean PR07\ - --ignore_errors pandas.TimedeltaIndex.microseconds SA01\ - --ignore_errors pandas.TimedeltaIndex.nanoseconds SA01\ - --ignore_errors pandas.TimedeltaIndex.round SA01\ - --ignore_errors pandas.TimedeltaIndex.seconds SA01\ - --ignore_errors pandas.TimedeltaIndex.to_pytimedelta RT03,SA01\ - --ignore_errors pandas.Timestamp PR07,SA01\ - --ignore_errors pandas.Timestamp.as_unit SA01\ - --ignore_errors pandas.Timestamp.asm8 SA01\ - --ignore_errors pandas.Timestamp.astimezone SA01\ - --ignore_errors pandas.Timestamp.ceil SA01\ - --ignore_errors pandas.Timestamp.combine PR01,SA01\ - --ignore_errors pandas.Timestamp.ctime SA01\ - --ignore_errors pandas.Timestamp.date SA01\ - --ignore_errors pandas.Timestamp.day GL08\ - --ignore_errors pandas.Timestamp.day_name SA01\ - --ignore_errors pandas.Timestamp.day_of_week SA01\ - --ignore_errors pandas.Timestamp.day_of_year SA01\ - --ignore_errors pandas.Timestamp.dayofweek SA01\ - --ignore_errors pandas.Timestamp.dayofyear SA01\ - --ignore_errors pandas.Timestamp.days_in_month SA01\ - --ignore_errors pandas.Timestamp.daysinmonth SA01\ - --ignore_errors pandas.Timestamp.dst SA01\ - --ignore_errors pandas.Timestamp.floor SA01\ - --ignore_errors pandas.Timestamp.fold GL08\ - --ignore_errors pandas.Timestamp.fromordinal SA01\ - --ignore_errors pandas.Timestamp.fromtimestamp PR01,SA01\ - --ignore_errors pandas.Timestamp.hour GL08\ - --ignore_errors pandas.Timestamp.is_leap_year SA01\ - --ignore_errors pandas.Timestamp.isocalendar SA01\ - --ignore_errors pandas.Timestamp.isoformat SA01\ - --ignore_errors pandas.Timestamp.isoweekday SA01\ - --ignore_errors pandas.Timestamp.max PR02,PR07,SA01\ - --ignore_errors pandas.Timestamp.microsecond GL08\ - --ignore_errors pandas.Timestamp.min PR02,PR07,SA01\ - --ignore_errors pandas.Timestamp.minute GL08\ - --ignore_errors pandas.Timestamp.month GL08\ - --ignore_errors pandas.Timestamp.month_name SA01\ - --ignore_errors pandas.Timestamp.nanosecond GL08\ - --ignore_errors pandas.Timestamp.normalize SA01\ - --ignore_errors pandas.Timestamp.now SA01\ - --ignore_errors pandas.Timestamp.quarter SA01\ - --ignore_errors pandas.Timestamp.replace PR07,SA01\ - --ignore_errors pandas.Timestamp.resolution PR02,PR07,SA01\ - --ignore_errors pandas.Timestamp.round SA01\ - --ignore_errors pandas.Timestamp.second GL08\ - --ignore_errors pandas.Timestamp.strftime SA01\ - --ignore_errors pandas.Timestamp.strptime PR01,SA01\ - --ignore_errors pandas.Timestamp.time SA01\ - --ignore_errors pandas.Timestamp.timestamp SA01\ - --ignore_errors pandas.Timestamp.timetuple SA01\ - --ignore_errors pandas.Timestamp.timetz SA01\ - --ignore_errors pandas.Timestamp.to_datetime64 SA01\ - --ignore_errors pandas.Timestamp.to_julian_date SA01\ - --ignore_errors pandas.Timestamp.to_numpy PR01\ - --ignore_errors pandas.Timestamp.to_period PR01,SA01\ - --ignore_errors pandas.Timestamp.to_pydatetime PR01,SA01\ - --ignore_errors pandas.Timestamp.today SA01\ - --ignore_errors pandas.Timestamp.toordinal SA01\ - --ignore_errors pandas.Timestamp.tz SA01\ - --ignore_errors pandas.Timestamp.tz_convert SA01\ - --ignore_errors pandas.Timestamp.tz_localize SA01\ - --ignore_errors pandas.Timestamp.tzinfo GL08\ - --ignore_errors pandas.Timestamp.tzname SA01\ - --ignore_errors pandas.Timestamp.unit SA01\ - --ignore_errors pandas.Timestamp.utcfromtimestamp PR01,SA01\ - --ignore_errors pandas.Timestamp.utcnow SA01\ - --ignore_errors pandas.Timestamp.utcoffset SA01\ - --ignore_errors pandas.Timestamp.utctimetuple SA01\ - --ignore_errors pandas.Timestamp.value GL08\ - --ignore_errors pandas.Timestamp.week SA01\ - --ignore_errors pandas.Timestamp.weekday SA01\ - --ignore_errors pandas.Timestamp.weekofyear SA01\ - --ignore_errors pandas.Timestamp.year GL08\ - --ignore_errors pandas.UInt16Dtype SA01\ - --ignore_errors pandas.UInt32Dtype SA01\ - --ignore_errors pandas.UInt64Dtype SA01\ - --ignore_errors pandas.UInt8Dtype SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._accumulate RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._formatter SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._from_sequence SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._reduce RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._values_for_factorize SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.astype SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.copy RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.dropna RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.dtype SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.duplicated RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.equals SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.fillna SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.interpolate PR01,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.isna SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.nbytes SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.ndim SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.ravel RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.shape SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.shift SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.take RT03\ - --ignore_errors pandas.api.extensions.ExtensionArray.tolist RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.unique RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.view SA01\ - --ignore_errors pandas.api.extensions.register_extension_dtype SA01\ - --ignore_errors pandas.api.indexers.BaseIndexer PR01,SA01\ - --ignore_errors pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01\ - --ignore_errors pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01\ - --ignore_errors pandas.api.interchange.from_dataframe RT03,SA01\ - --ignore_errors pandas.api.types.infer_dtype PR07,SA01\ - --ignore_errors pandas.api.types.is_any_real_numeric_dtype SA01\ - --ignore_errors pandas.api.types.is_bool PR01,SA01\ - --ignore_errors pandas.api.types.is_bool_dtype SA01\ - --ignore_errors pandas.api.types.is_categorical_dtype SA01\ - --ignore_errors pandas.api.types.is_complex PR01,SA01\ - --ignore_errors pandas.api.types.is_complex_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64_any_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64_ns_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64tz_dtype SA01\ - --ignore_errors pandas.api.types.is_dict_like PR07,SA01\ - --ignore_errors pandas.api.types.is_extension_array_dtype SA01\ - --ignore_errors pandas.api.types.is_file_like PR07,SA01\ - --ignore_errors pandas.api.types.is_float PR01,SA01\ - --ignore_errors pandas.api.types.is_float_dtype SA01\ - --ignore_errors pandas.api.types.is_hashable PR01,RT03,SA01\ - --ignore_errors pandas.api.types.is_int64_dtype SA01\ - --ignore_errors pandas.api.types.is_integer PR01,SA01\ - --ignore_errors pandas.api.types.is_integer_dtype SA01\ - --ignore_errors pandas.api.types.is_interval_dtype SA01\ - --ignore_errors pandas.api.types.is_iterator PR07,SA01\ - --ignore_errors pandas.api.types.is_list_like SA01\ - --ignore_errors pandas.api.types.is_named_tuple PR07,SA01\ - --ignore_errors pandas.api.types.is_numeric_dtype SA01\ - --ignore_errors pandas.api.types.is_object_dtype SA01\ - --ignore_errors pandas.api.types.is_period_dtype SA01\ - --ignore_errors pandas.api.types.is_re PR07,SA01\ - --ignore_errors pandas.api.types.is_re_compilable PR07,SA01\ - --ignore_errors pandas.api.types.is_scalar SA01\ - --ignore_errors pandas.api.types.is_signed_integer_dtype SA01\ - --ignore_errors pandas.api.types.is_sparse SA01\ - --ignore_errors pandas.api.types.is_string_dtype SA01\ - --ignore_errors pandas.api.types.is_timedelta64_dtype SA01\ - --ignore_errors pandas.api.types.is_timedelta64_ns_dtype SA01\ - --ignore_errors pandas.api.types.is_unsigned_integer_dtype SA01\ - --ignore_errors pandas.api.types.pandas_dtype PR07,RT03,SA01\ - --ignore_errors pandas.api.types.union_categoricals RT03,SA01\ - --ignore_errors pandas.arrays.ArrowExtensionArray PR07,SA01\ - --ignore_errors pandas.arrays.BooleanArray SA01\ - --ignore_errors pandas.arrays.DatetimeArray SA01\ - --ignore_errors pandas.arrays.FloatingArray SA01\ - --ignore_errors pandas.arrays.IntegerArray SA01\ - --ignore_errors pandas.arrays.IntervalArray.closed SA01\ - --ignore_errors pandas.arrays.IntervalArray.contains RT03\ - --ignore_errors pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01\ - --ignore_errors pandas.arrays.IntervalArray.left SA01\ - --ignore_errors pandas.arrays.IntervalArray.length SA01\ - --ignore_errors pandas.arrays.IntervalArray.mid SA01\ - --ignore_errors pandas.arrays.IntervalArray.right SA01\ - --ignore_errors pandas.arrays.IntervalArray.set_closed RT03,SA01\ - --ignore_errors pandas.arrays.IntervalArray.to_tuples RT03,SA01\ - --ignore_errors pandas.arrays.NumpyExtensionArray SA01\ - --ignore_errors pandas.arrays.SparseArray PR07,SA01\ - --ignore_errors pandas.arrays.TimedeltaArray PR07,SA01\ - --ignore_errors pandas.bdate_range RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.agg RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.aggregate RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.apply RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cummax RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cummin RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cumprod RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cumsum RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.groups SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.hist RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.indices SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.max SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.mean RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.median SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.min SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.nth PR02\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.ohlc SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.prod SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.rank RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.resample RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.sem SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.skew RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.sum SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.transform RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.agg RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.aggregate RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.apply RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cummax RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cummin RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cumprod RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cumsum RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.groups SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.indices SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.max SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.mean RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.median SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.min SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.nth PR02\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.nunique SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.ohlc SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.plot PR02,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.prod SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.rank RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.resample RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.sem SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.skew RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.sum SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.transform RT03\ - --ignore_errors pandas.core.resample.Resampler.__iter__ RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.ffill RT03\ - --ignore_errors pandas.core.resample.Resampler.get_group RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.groups SA01\ - --ignore_errors pandas.core.resample.Resampler.indices SA01\ - --ignore_errors pandas.core.resample.Resampler.max PR01,RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.mean SA01\ - --ignore_errors pandas.core.resample.Resampler.median SA01\ - --ignore_errors pandas.core.resample.Resampler.min PR01,RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.nunique SA01\ - --ignore_errors pandas.core.resample.Resampler.ohlc SA01\ - --ignore_errors pandas.core.resample.Resampler.prod SA01\ - --ignore_errors pandas.core.resample.Resampler.quantile PR01,PR07\ - --ignore_errors pandas.core.resample.Resampler.sem SA01\ - --ignore_errors pandas.core.resample.Resampler.std SA01\ - --ignore_errors pandas.core.resample.Resampler.sum SA01\ - --ignore_errors pandas.core.resample.Resampler.transform PR01,RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.var SA01\ - --ignore_errors pandas.core.window.expanding.Expanding.corr PR01\ - --ignore_errors pandas.core.window.expanding.Expanding.count PR01\ - --ignore_errors pandas.core.window.rolling.Rolling.max PR01\ - --ignore_errors pandas.core.window.rolling.Window.std PR01\ - --ignore_errors pandas.core.window.rolling.Window.var PR01\ - --ignore_errors pandas.date_range RT03\ - --ignore_errors pandas.describe_option SA01\ - --ignore_errors pandas.errors.AbstractMethodError PR01,SA01\ - --ignore_errors pandas.errors.AttributeConflictWarning SA01\ - --ignore_errors pandas.errors.CSSWarning SA01\ - --ignore_errors pandas.errors.CategoricalConversionWarning SA01\ - --ignore_errors pandas.errors.ChainedAssignmentError SA01\ - --ignore_errors pandas.errors.ClosedFileError SA01\ - --ignore_errors pandas.errors.DataError SA01\ - --ignore_errors pandas.errors.DuplicateLabelError SA01\ - --ignore_errors pandas.errors.EmptyDataError SA01\ - --ignore_errors pandas.errors.IntCastingNaNError SA01\ - --ignore_errors pandas.errors.InvalidIndexError SA01\ - --ignore_errors pandas.errors.InvalidVersion SA01\ - --ignore_errors pandas.errors.MergeError SA01\ - --ignore_errors pandas.errors.NullFrequencyError SA01\ - --ignore_errors pandas.errors.NumExprClobberingError SA01\ - --ignore_errors pandas.errors.NumbaUtilError SA01\ - --ignore_errors pandas.errors.OptionError SA01\ - --ignore_errors pandas.errors.OutOfBoundsDatetime SA01\ - --ignore_errors pandas.errors.OutOfBoundsTimedelta SA01\ - --ignore_errors pandas.errors.PerformanceWarning SA01\ - --ignore_errors pandas.errors.PossibleDataLossError SA01\ - --ignore_errors pandas.errors.PossiblePrecisionLoss SA01\ - --ignore_errors pandas.errors.SpecificationError SA01\ - --ignore_errors pandas.errors.UndefinedVariableError PR01,SA01\ - --ignore_errors pandas.errors.UnsortedIndexError SA01\ - --ignore_errors pandas.errors.UnsupportedFunctionCall SA01\ - --ignore_errors pandas.errors.ValueLabelTypeMismatch SA01\ - --ignore_errors pandas.get_option PR01,SA01\ - --ignore_errors pandas.infer_freq SA01\ - --ignore_errors pandas.interval_range RT03\ - --ignore_errors pandas.io.formats.style.Styler.apply RT03\ - --ignore_errors pandas.io.formats.style.Styler.apply_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.background_gradient RT03\ - --ignore_errors pandas.io.formats.style.Styler.bar RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.clear SA01\ - --ignore_errors pandas.io.formats.style.Styler.concat RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.export RT03\ - --ignore_errors pandas.io.formats.style.Styler.format RT03\ - --ignore_errors pandas.io.formats.style.Styler.format_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.from_custom_template SA01\ - --ignore_errors pandas.io.formats.style.Styler.hide RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.highlight_between RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_max RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_min RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_null RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_quantile RT03\ - --ignore_errors pandas.io.formats.style.Styler.map RT03\ - --ignore_errors pandas.io.formats.style.Styler.map_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.relabel_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_caption RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_properties RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_sticky RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_table_attributes PR07,RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_table_styles RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_td_classes RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_tooltips RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.text_gradient RT03\ - --ignore_errors pandas.io.formats.style.Styler.to_excel PR01\ - --ignore_errors pandas.io.formats.style.Styler.to_string SA01\ - --ignore_errors pandas.io.formats.style.Styler.use RT03\ - --ignore_errors pandas.io.json.build_table_schema PR07,RT03,SA01\ - --ignore_errors pandas.io.stata.StataReader.data_label SA01\ - --ignore_errors pandas.io.stata.StataReader.value_labels RT03,SA01\ - --ignore_errors pandas.io.stata.StataReader.variable_labels RT03,SA01\ - --ignore_errors pandas.io.stata.StataWriter.write_file SA01\ - --ignore_errors pandas.json_normalize RT03,SA01\ - --ignore_errors pandas.merge PR07\ - --ignore_errors pandas.merge_asof PR07,RT03\ - --ignore_errors pandas.merge_ordered PR07\ - --ignore_errors pandas.option_context SA01\ - --ignore_errors pandas.period_range RT03,SA01\ - --ignore_errors pandas.pivot PR07\ - --ignore_errors pandas.pivot_table PR07\ - --ignore_errors pandas.plotting.andrews_curves RT03,SA01\ - --ignore_errors pandas.plotting.autocorrelation_plot RT03,SA01\ - --ignore_errors pandas.plotting.lag_plot RT03,SA01\ - --ignore_errors pandas.plotting.parallel_coordinates PR07,RT03,SA01\ - --ignore_errors pandas.plotting.plot_params SA01\ - --ignore_errors pandas.plotting.radviz RT03\ - --ignore_errors pandas.plotting.scatter_matrix PR07,SA01\ - --ignore_errors pandas.plotting.table PR07,RT03,SA01\ - --ignore_errors pandas.qcut PR07,SA01\ - --ignore_errors pandas.read_feather SA01\ - --ignore_errors pandas.read_orc SA01\ - --ignore_errors pandas.read_sas SA01\ - --ignore_errors pandas.read_spss SA01\ - --ignore_errors pandas.reset_option SA01\ - --ignore_errors pandas.set_eng_float_format RT03,SA01\ - --ignore_errors pandas.set_option SA01\ - --ignore_errors pandas.show_versions SA01\ - --ignore_errors pandas.test SA01\ - --ignore_errors pandas.testing.assert_extension_array_equal SA01\ - --ignore_errors pandas.testing.assert_index_equal PR07,SA01\ - --ignore_errors pandas.testing.assert_series_equal PR07,SA01\ - --ignore_errors pandas.timedelta_range SA01\ - --ignore_errors pandas.tseries.api.guess_datetime_format SA01\ - --ignore_errors pandas.tseries.offsets.BDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.BMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.BMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin PR02\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd PR02\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin PR02\ - --ignore_errors pandas.tseries.offsets.BYearBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.month GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd PR02\ - --ignore_errors pandas.tseries.offsets.BYearEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.month GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.calendar GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.holidays GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour PR02,SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.calendar GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.end GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.holidays GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.start GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CBMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.CBMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.CDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour PR02,SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.end GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.start GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset PR02\ - --ignore_errors pandas.tseries.offsets.DateOffset.copy SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.kwds SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.n GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.name SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.nanos GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.normalize GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Day PR02\ - --ignore_errors pandas.tseries.offsets.Day.copy SA01\ - --ignore_errors pandas.tseries.offsets.Day.delta GL08\ - --ignore_errors pandas.tseries.offsets.Day.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Day.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Day.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Day.n GL08\ - --ignore_errors pandas.tseries.offsets.Day.name SA01\ - --ignore_errors pandas.tseries.offsets.Day.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Day.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Day.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Easter PR02\ - --ignore_errors pandas.tseries.offsets.Easter.copy SA01\ - --ignore_errors pandas.tseries.offsets.Easter.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Easter.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Easter.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Easter.n GL08\ - --ignore_errors pandas.tseries.offsets.Easter.name SA01\ - --ignore_errors pandas.tseries.offsets.Easter.nanos GL08\ - --ignore_errors pandas.tseries.offsets.Easter.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Easter.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.FY5253 PR02\ - --ignore_errors pandas.tseries.offsets.FY5253.copy SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.get_year_end GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.kwds SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.n GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.name SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.nanos GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.normalize GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.variation GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.weekday GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter PR02\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.copy SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.get_weeks GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.kwds SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.n GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.name SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.nanos GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.normalize GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.variation GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.weekday GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08\ - --ignore_errors pandas.tseries.offsets.Hour PR02\ - --ignore_errors pandas.tseries.offsets.Hour.copy SA01\ - --ignore_errors pandas.tseries.offsets.Hour.delta GL08\ - --ignore_errors pandas.tseries.offsets.Hour.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Hour.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Hour.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Hour.n GL08\ - --ignore_errors pandas.tseries.offsets.Hour.name SA01\ - --ignore_errors pandas.tseries.offsets.Hour.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Hour.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Hour.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth PR02,SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.copy SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.kwds SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.n GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.name SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.nanos GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.normalize GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.week GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.weekday GL08\ - --ignore_errors pandas.tseries.offsets.Micro PR02\ - --ignore_errors pandas.tseries.offsets.Micro.copy SA01\ - --ignore_errors pandas.tseries.offsets.Micro.delta GL08\ - --ignore_errors pandas.tseries.offsets.Micro.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Micro.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Micro.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Micro.n GL08\ - --ignore_errors pandas.tseries.offsets.Micro.name SA01\ - --ignore_errors pandas.tseries.offsets.Micro.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Micro.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Micro.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Milli PR02\ - --ignore_errors pandas.tseries.offsets.Milli.copy SA01\ - --ignore_errors pandas.tseries.offsets.Milli.delta GL08\ - --ignore_errors pandas.tseries.offsets.Milli.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Milli.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Milli.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Milli.n GL08\ - --ignore_errors pandas.tseries.offsets.Milli.name SA01\ - --ignore_errors pandas.tseries.offsets.Milli.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Milli.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Milli.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Minute PR02\ - --ignore_errors pandas.tseries.offsets.Minute.copy SA01\ - --ignore_errors pandas.tseries.offsets.Minute.delta GL08\ - --ignore_errors pandas.tseries.offsets.Minute.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Minute.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Minute.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Minute.n GL08\ - --ignore_errors pandas.tseries.offsets.Minute.name SA01\ - --ignore_errors pandas.tseries.offsets.Minute.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Minute.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Minute.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.MonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.MonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Nano PR02\ - --ignore_errors pandas.tseries.offsets.Nano.copy SA01\ - --ignore_errors pandas.tseries.offsets.Nano.delta GL08\ - --ignore_errors pandas.tseries.offsets.Nano.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Nano.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Nano.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Nano.n GL08\ - --ignore_errors pandas.tseries.offsets.Nano.name SA01\ - --ignore_errors pandas.tseries.offsets.Nano.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Nano.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Nano.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin PR02\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd PR02\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.Second PR02\ - --ignore_errors pandas.tseries.offsets.Second.copy SA01\ - --ignore_errors pandas.tseries.offsets.Second.delta GL08\ - --ignore_errors pandas.tseries.offsets.Second.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Second.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Second.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Second.n GL08\ - --ignore_errors pandas.tseries.offsets.Second.name SA01\ - --ignore_errors pandas.tseries.offsets.Second.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Second.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Second.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin PR02,SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd PR02,SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Tick GL08\ - --ignore_errors pandas.tseries.offsets.Tick.copy SA01\ - --ignore_errors pandas.tseries.offsets.Tick.delta GL08\ - --ignore_errors pandas.tseries.offsets.Tick.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Tick.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Tick.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Tick.n GL08\ - --ignore_errors pandas.tseries.offsets.Tick.name SA01\ - --ignore_errors pandas.tseries.offsets.Tick.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Tick.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Tick.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Week PR02\ - --ignore_errors pandas.tseries.offsets.Week.copy SA01\ - --ignore_errors pandas.tseries.offsets.Week.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Week.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Week.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Week.n GL08\ - --ignore_errors pandas.tseries.offsets.Week.name SA01\ - --ignore_errors pandas.tseries.offsets.Week.nanos GL08\ - --ignore_errors pandas.tseries.offsets.Week.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Week.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Week.weekday GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth PR02,SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.copy SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.kwds SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.n GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.name SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.nanos GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.normalize GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.week GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.weekday GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin PR02\ - --ignore_errors pandas.tseries.offsets.YearBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.month GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd PR02\ - --ignore_errors pandas.tseries.offsets.YearEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.month GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.rule_code GL08\ - --ignore_errors pandas.unique PR07\ - --ignore_errors pandas.util.hash_array PR07,SA01\ - --ignore_errors pandas.util.hash_pandas_object PR07,SA01 # There should be no backslash in the final line, please keep this comment in the last ignored function - ) - $BASE_DIR/scripts/validate_docstrings.py ${PARAMETERS[@]} - RET=$(($RET + $?)) ; + MSG='Validate Docstrings' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py \ + --format=actions \ + -i '*' ES01 `# For now it is ok if docstrings are missing the extended summary` \ + -i pandas.Series.dt PR01 `# Accessors are implemented as classes, but we do not document the Parameters section` \ + -i pandas.Categorical.__array__ SA01\ + -i pandas.Categorical.codes SA01\ + -i pandas.Categorical.dtype SA01\ + -i pandas.Categorical.from_codes SA01\ + -i pandas.Categorical.ordered SA01\ + -i pandas.CategoricalDtype.categories SA01\ + -i pandas.CategoricalDtype.ordered SA01\ + -i pandas.CategoricalIndex.codes SA01\ + -i pandas.CategoricalIndex.ordered SA01\ + -i pandas.DataFrame.__dataframe__ SA01\ + -i pandas.DataFrame.__iter__ SA01\ + -i pandas.DataFrame.assign SA01\ + -i pandas.DataFrame.at_time PR01\ + -i pandas.DataFrame.axes SA01\ + -i pandas.DataFrame.backfill PR01,SA01\ + -i pandas.DataFrame.bfill SA01\ + -i pandas.DataFrame.columns SA01\ + -i pandas.DataFrame.copy SA01\ + -i pandas.DataFrame.droplevel SA01\ + -i pandas.DataFrame.dtypes SA01\ + -i pandas.DataFrame.ffill SA01\ + -i pandas.DataFrame.first_valid_index SA01\ + -i pandas.DataFrame.get SA01\ + -i pandas.DataFrame.hist RT03\ + -i pandas.DataFrame.infer_objects RT03\ + -i pandas.DataFrame.keys SA01\ + -i pandas.DataFrame.kurt RT03,SA01\ + -i pandas.DataFrame.kurtosis RT03,SA01\ + -i pandas.DataFrame.last_valid_index SA01\ + -i pandas.DataFrame.mask RT03\ + -i pandas.DataFrame.max RT03\ + -i pandas.DataFrame.mean RT03,SA01\ + -i pandas.DataFrame.median RT03,SA01\ + -i pandas.DataFrame.min RT03\ + -i pandas.DataFrame.pad PR01,SA01\ + -i pandas.DataFrame.plot PR02,SA01\ + -i pandas.DataFrame.pop SA01\ + -i pandas.DataFrame.prod RT03\ + -i pandas.DataFrame.product RT03\ + -i pandas.DataFrame.reorder_levels SA01\ + -i pandas.DataFrame.sem PR01,RT03,SA01\ + -i pandas.DataFrame.skew RT03,SA01\ + -i pandas.DataFrame.sparse PR01,SA01\ + -i pandas.DataFrame.sparse.density SA01\ + -i pandas.DataFrame.sparse.from_spmatrix SA01\ + -i pandas.DataFrame.sparse.to_coo SA01\ + -i pandas.DataFrame.sparse.to_dense SA01\ + -i pandas.DataFrame.std PR01,RT03,SA01\ + -i pandas.DataFrame.sum RT03\ + -i pandas.DataFrame.swapaxes PR01,SA01\ + -i pandas.DataFrame.swaplevel SA01\ + -i pandas.DataFrame.to_feather SA01\ + -i pandas.DataFrame.to_markdown SA01\ + -i pandas.DataFrame.to_parquet RT03\ + -i pandas.DataFrame.to_period SA01\ + -i pandas.DataFrame.to_timestamp SA01\ + -i pandas.DataFrame.tz_convert SA01\ + -i pandas.DataFrame.tz_localize SA01\ + -i pandas.DataFrame.unstack RT03\ + -i pandas.DataFrame.value_counts RT03\ + -i pandas.DataFrame.var PR01,RT03,SA01\ + -i pandas.DataFrame.where RT03\ + -i pandas.DatetimeIndex.ceil SA01\ + -i pandas.DatetimeIndex.date SA01\ + -i pandas.DatetimeIndex.day SA01\ + -i pandas.DatetimeIndex.day_name SA01\ + -i pandas.DatetimeIndex.day_of_year SA01\ + -i pandas.DatetimeIndex.dayofyear SA01\ + -i pandas.DatetimeIndex.floor SA01\ + -i pandas.DatetimeIndex.freqstr SA01\ + -i pandas.DatetimeIndex.hour SA01\ + -i pandas.DatetimeIndex.indexer_at_time PR01,RT03\ + -i pandas.DatetimeIndex.indexer_between_time RT03\ + -i pandas.DatetimeIndex.inferred_freq SA01\ + -i pandas.DatetimeIndex.is_leap_year SA01\ + -i pandas.DatetimeIndex.microsecond SA01\ + -i pandas.DatetimeIndex.minute SA01\ + -i pandas.DatetimeIndex.month SA01\ + -i pandas.DatetimeIndex.month_name SA01\ + -i pandas.DatetimeIndex.nanosecond SA01\ + -i pandas.DatetimeIndex.quarter SA01\ + -i pandas.DatetimeIndex.round SA01\ + -i pandas.DatetimeIndex.second SA01\ + -i pandas.DatetimeIndex.snap PR01,RT03,SA01\ + -i pandas.DatetimeIndex.std PR01,RT03\ + -i pandas.DatetimeIndex.time SA01\ + -i pandas.DatetimeIndex.timetz SA01\ + -i pandas.DatetimeIndex.to_period RT03\ + -i pandas.DatetimeIndex.to_pydatetime RT03,SA01\ + -i pandas.DatetimeIndex.tz SA01\ + -i pandas.DatetimeIndex.tz_convert RT03\ + -i pandas.DatetimeIndex.year SA01\ + -i pandas.DatetimeTZDtype SA01\ + -i pandas.DatetimeTZDtype.tz SA01\ + -i pandas.DatetimeTZDtype.unit SA01\ + -i pandas.ExcelFile PR01,SA01\ + -i pandas.ExcelFile.parse PR01,SA01\ + -i pandas.ExcelWriter SA01\ + -i pandas.Float32Dtype SA01\ + -i pandas.Float64Dtype SA01\ + -i pandas.Grouper PR02,SA01\ + -i pandas.HDFStore.append PR01,SA01\ + -i pandas.HDFStore.get SA01\ + -i pandas.HDFStore.groups SA01\ + -i pandas.HDFStore.info RT03,SA01\ + -i pandas.HDFStore.keys SA01\ + -i pandas.HDFStore.put PR01,SA01\ + -i pandas.HDFStore.select SA01\ + -i pandas.HDFStore.walk SA01\ + -i pandas.Index PR07\ + -i pandas.Index.T SA01\ + -i pandas.Index.append PR07,RT03,SA01\ + -i pandas.Index.astype SA01\ + -i pandas.Index.copy PR07,SA01\ + -i pandas.Index.difference PR07,RT03,SA01\ + -i pandas.Index.drop PR07,SA01\ + -i pandas.Index.drop_duplicates RT03\ + -i pandas.Index.droplevel RT03,SA01\ + -i pandas.Index.dropna RT03,SA01\ + -i pandas.Index.dtype SA01\ + -i pandas.Index.duplicated RT03\ + -i pandas.Index.empty GL08\ + -i pandas.Index.equals SA01\ + -i pandas.Index.fillna RT03\ + -i pandas.Index.get_indexer PR07,SA01\ + -i pandas.Index.get_indexer_for PR01,SA01\ + -i pandas.Index.get_indexer_non_unique PR07,SA01\ + -i pandas.Index.get_loc PR07,RT03,SA01\ + -i pandas.Index.get_slice_bound PR07\ + -i pandas.Index.hasnans SA01\ + -i pandas.Index.identical PR01,SA01\ + -i pandas.Index.inferred_type SA01\ + -i pandas.Index.insert PR07,RT03,SA01\ + -i pandas.Index.intersection PR07,RT03,SA01\ + -i pandas.Index.item SA01\ + -i pandas.Index.join PR07,RT03,SA01\ + -i pandas.Index.map SA01\ + -i pandas.Index.memory_usage RT03\ + -i pandas.Index.name SA01\ + -i pandas.Index.names GL08\ + -i pandas.Index.nbytes SA01\ + -i pandas.Index.ndim SA01\ + -i pandas.Index.nunique RT03\ + -i pandas.Index.putmask PR01,RT03\ + -i pandas.Index.ravel PR01,RT03\ + -i pandas.Index.reindex PR07\ + -i pandas.Index.shape SA01\ + -i pandas.Index.size SA01\ + -i pandas.Index.slice_indexer PR07,RT03,SA01\ + -i pandas.Index.slice_locs RT03\ + -i pandas.Index.str PR01,SA01\ + -i pandas.Index.symmetric_difference PR07,RT03,SA01\ + -i pandas.Index.take PR01,PR07\ + -i pandas.Index.to_list RT03\ + -i pandas.Index.union PR07,RT03,SA01\ + -i pandas.Index.unique RT03\ + -i pandas.Index.value_counts RT03\ + -i pandas.Index.view GL08\ + -i pandas.Int16Dtype SA01\ + -i pandas.Int32Dtype SA01\ + -i pandas.Int64Dtype SA01\ + -i pandas.Int8Dtype SA01\ + -i pandas.Interval PR02\ + -i pandas.Interval.closed SA01\ + -i pandas.Interval.left SA01\ + -i pandas.Interval.mid SA01\ + -i pandas.Interval.right SA01\ + -i pandas.IntervalDtype PR01,SA01\ + -i pandas.IntervalDtype.subtype SA01\ + -i pandas.IntervalIndex.closed SA01\ + -i pandas.IntervalIndex.contains RT03\ + -i pandas.IntervalIndex.get_indexer PR07,SA01\ + -i pandas.IntervalIndex.get_loc PR07,RT03,SA01\ + -i pandas.IntervalIndex.is_non_overlapping_monotonic SA01\ + -i pandas.IntervalIndex.left GL08\ + -i pandas.IntervalIndex.length GL08\ + -i pandas.IntervalIndex.mid GL08\ + -i pandas.IntervalIndex.right GL08\ + -i pandas.IntervalIndex.set_closed RT03,SA01\ + -i pandas.IntervalIndex.to_tuples RT03,SA01\ + -i pandas.MultiIndex PR01\ + -i pandas.MultiIndex.append PR07,SA01\ + -i pandas.MultiIndex.copy PR07,RT03,SA01\ + -i pandas.MultiIndex.drop PR07,RT03,SA01\ + -i pandas.MultiIndex.droplevel RT03,SA01\ + -i pandas.MultiIndex.dtypes SA01\ + -i pandas.MultiIndex.get_indexer PR07,SA01\ + -i pandas.MultiIndex.get_level_values SA01\ + -i pandas.MultiIndex.get_loc PR07\ + -i pandas.MultiIndex.get_loc_level PR07\ + -i pandas.MultiIndex.levels SA01\ + -i pandas.MultiIndex.levshape SA01\ + -i pandas.MultiIndex.names SA01\ + -i pandas.MultiIndex.nlevels SA01\ + -i pandas.MultiIndex.remove_unused_levels RT03,SA01\ + -i pandas.MultiIndex.reorder_levels RT03,SA01\ + -i pandas.MultiIndex.set_codes SA01\ + -i pandas.MultiIndex.set_levels RT03,SA01\ + -i pandas.MultiIndex.sortlevel PR07,SA01\ + -i pandas.MultiIndex.to_frame RT03\ + -i pandas.MultiIndex.truncate SA01\ + -i pandas.NA SA01\ + -i pandas.NaT SA01\ + -i pandas.NamedAgg SA01\ + -i pandas.Period SA01\ + -i pandas.Period.asfreq SA01\ + -i pandas.Period.freq GL08\ + -i pandas.Period.freqstr SA01\ + -i pandas.Period.is_leap_year SA01\ + -i pandas.Period.month SA01\ + -i pandas.Period.now SA01\ + -i pandas.Period.ordinal GL08\ + -i pandas.Period.quarter SA01\ + -i pandas.Period.strftime PR01,SA01\ + -i pandas.Period.to_timestamp SA01\ + -i pandas.Period.year SA01\ + -i pandas.PeriodDtype SA01\ + -i pandas.PeriodDtype.freq SA01\ + -i pandas.PeriodIndex.day SA01\ + -i pandas.PeriodIndex.day_of_week SA01\ + -i pandas.PeriodIndex.day_of_year SA01\ + -i pandas.PeriodIndex.dayofweek SA01\ + -i pandas.PeriodIndex.dayofyear SA01\ + -i pandas.PeriodIndex.days_in_month SA01\ + -i pandas.PeriodIndex.daysinmonth SA01\ + -i pandas.PeriodIndex.freqstr SA01\ + -i pandas.PeriodIndex.from_fields PR07,SA01\ + -i pandas.PeriodIndex.from_ordinals SA01\ + -i pandas.PeriodIndex.hour SA01\ + -i pandas.PeriodIndex.is_leap_year SA01\ + -i pandas.PeriodIndex.minute SA01\ + -i pandas.PeriodIndex.month SA01\ + -i pandas.PeriodIndex.quarter SA01\ + -i pandas.PeriodIndex.qyear GL08\ + -i pandas.PeriodIndex.second SA01\ + -i pandas.PeriodIndex.to_timestamp RT03,SA01\ + -i pandas.PeriodIndex.week SA01\ + -i pandas.PeriodIndex.weekday SA01\ + -i pandas.PeriodIndex.weekofyear SA01\ + -i pandas.PeriodIndex.year SA01\ + -i pandas.RangeIndex PR07\ + -i pandas.RangeIndex.from_range PR01,SA01\ + -i pandas.RangeIndex.start SA01\ + -i pandas.RangeIndex.step SA01\ + -i pandas.RangeIndex.stop SA01\ + -i pandas.Series SA01\ + -i pandas.Series.T SA01\ + -i pandas.Series.__iter__ RT03,SA01\ + -i pandas.Series.add PR07\ + -i pandas.Series.at_time PR01\ + -i pandas.Series.backfill PR01,SA01\ + -i pandas.Series.bfill SA01\ + -i pandas.Series.case_when RT03\ + -i pandas.Series.cat PR07,SA01\ + -i pandas.Series.cat.add_categories PR01,PR02\ + -i pandas.Series.cat.as_ordered PR01\ + -i pandas.Series.cat.as_unordered PR01\ + -i pandas.Series.cat.codes SA01\ + -i pandas.Series.cat.ordered SA01\ + -i pandas.Series.cat.remove_categories PR01,PR02\ + -i pandas.Series.cat.remove_unused_categories PR01\ + -i pandas.Series.cat.rename_categories PR01,PR02\ + -i pandas.Series.cat.reorder_categories PR01,PR02\ + -i pandas.Series.cat.set_categories PR01,PR02\ + -i pandas.Series.copy SA01\ + -i pandas.Series.div PR07\ + -i pandas.Series.droplevel SA01\ + -i pandas.Series.dt.as_unit PR01,PR02\ + -i pandas.Series.dt.ceil PR01,PR02,SA01\ + -i pandas.Series.dt.components SA01\ + -i pandas.Series.dt.date SA01\ + -i pandas.Series.dt.day SA01\ + -i pandas.Series.dt.day_name PR01,PR02,SA01\ + -i pandas.Series.dt.day_of_year SA01\ + -i pandas.Series.dt.dayofyear SA01\ + -i pandas.Series.dt.days SA01\ + -i pandas.Series.dt.days_in_month SA01\ + -i pandas.Series.dt.daysinmonth SA01\ + -i pandas.Series.dt.floor PR01,PR02,SA01\ + -i pandas.Series.dt.freq GL08\ + -i pandas.Series.dt.hour SA01\ + -i pandas.Series.dt.is_leap_year SA01\ + -i pandas.Series.dt.microsecond SA01\ + -i pandas.Series.dt.microseconds SA01\ + -i pandas.Series.dt.minute SA01\ + -i pandas.Series.dt.month SA01\ + -i pandas.Series.dt.month_name PR01,PR02,SA01\ + -i pandas.Series.dt.nanosecond SA01\ + -i pandas.Series.dt.nanoseconds SA01\ + -i pandas.Series.dt.normalize PR01\ + -i pandas.Series.dt.quarter SA01\ + -i pandas.Series.dt.qyear GL08\ + -i pandas.Series.dt.round PR01,PR02,SA01\ + -i pandas.Series.dt.second SA01\ + -i pandas.Series.dt.seconds SA01\ + -i pandas.Series.dt.strftime PR01,PR02\ + -i pandas.Series.dt.time SA01\ + -i pandas.Series.dt.timetz SA01\ + -i pandas.Series.dt.to_period PR01,PR02,RT03\ + -i pandas.Series.dt.total_seconds PR01\ + -i pandas.Series.dt.tz SA01\ + -i pandas.Series.dt.tz_convert PR01,PR02,RT03\ + -i pandas.Series.dt.tz_localize PR01,PR02\ + -i pandas.Series.dt.unit GL08\ + -i pandas.Series.dt.year SA01\ + -i pandas.Series.dtype SA01\ + -i pandas.Series.dtypes SA01\ + -i pandas.Series.empty GL08\ + -i pandas.Series.eq PR07,SA01\ + -i pandas.Series.ffill SA01\ + -i pandas.Series.first_valid_index SA01\ + -i pandas.Series.floordiv PR07\ + -i pandas.Series.ge PR07,SA01\ + -i pandas.Series.get SA01\ + -i pandas.Series.gt PR07,SA01\ + -i pandas.Series.hasnans SA01\ + -i pandas.Series.infer_objects RT03\ + -i pandas.Series.is_monotonic_decreasing SA01\ + -i pandas.Series.is_monotonic_increasing SA01\ + -i pandas.Series.is_unique SA01\ + -i pandas.Series.item SA01\ + -i pandas.Series.keys SA01\ + -i pandas.Series.kurt RT03,SA01\ + -i pandas.Series.kurtosis RT03,SA01\ + -i pandas.Series.last_valid_index SA01\ + -i pandas.Series.le PR07,SA01\ + -i pandas.Series.list.__getitem__ SA01\ + -i pandas.Series.list.flatten SA01\ + -i pandas.Series.list.len SA01\ + -i pandas.Series.lt PR07,SA01\ + -i pandas.Series.mask RT03\ + -i pandas.Series.max RT03\ + -i pandas.Series.mean RT03,SA01\ + -i pandas.Series.median RT03,SA01\ + -i pandas.Series.min RT03\ + -i pandas.Series.mod PR07\ + -i pandas.Series.mode SA01\ + -i pandas.Series.mul PR07\ + -i pandas.Series.nbytes SA01\ + -i pandas.Series.ndim SA01\ + -i pandas.Series.ne PR07,SA01\ + -i pandas.Series.nunique RT03\ + -i pandas.Series.pad PR01,SA01\ + -i pandas.Series.plot PR02,SA01\ + -i pandas.Series.pop RT03,SA01\ + -i pandas.Series.pow PR07\ + -i pandas.Series.prod RT03\ + -i pandas.Series.product RT03\ + -i pandas.Series.radd PR07\ + -i pandas.Series.rdiv PR07\ + -i pandas.Series.reorder_levels RT03,SA01\ + -i pandas.Series.rfloordiv PR07\ + -i pandas.Series.rmod PR07\ + -i pandas.Series.rmul PR07\ + -i pandas.Series.rpow PR07\ + -i pandas.Series.rsub PR07\ + -i pandas.Series.rtruediv PR07\ + -i pandas.Series.sem PR01,RT03,SA01\ + -i pandas.Series.shape SA01\ + -i pandas.Series.size SA01\ + -i pandas.Series.skew RT03,SA01\ + -i pandas.Series.sparse PR01,SA01\ + -i pandas.Series.sparse.density SA01\ + -i pandas.Series.sparse.fill_value SA01\ + -i pandas.Series.sparse.from_coo PR07,SA01\ + -i pandas.Series.sparse.npoints SA01\ + -i pandas.Series.sparse.sp_values SA01\ + -i pandas.Series.sparse.to_coo PR07,RT03,SA01\ + -i pandas.Series.std PR01,RT03,SA01\ + -i pandas.Series.str PR01,SA01\ + -i pandas.Series.str.capitalize RT03\ + -i pandas.Series.str.casefold RT03\ + -i pandas.Series.str.center RT03,SA01\ + -i pandas.Series.str.decode PR07,RT03,SA01\ + -i pandas.Series.str.encode PR07,RT03,SA01\ + -i pandas.Series.str.find RT03\ + -i pandas.Series.str.fullmatch RT03\ + -i pandas.Series.str.get RT03,SA01\ + -i pandas.Series.str.index RT03\ + -i pandas.Series.str.ljust RT03,SA01\ + -i pandas.Series.str.lower RT03\ + -i pandas.Series.str.lstrip RT03\ + -i pandas.Series.str.match RT03\ + -i pandas.Series.str.normalize RT03,SA01\ + -i pandas.Series.str.partition RT03\ + -i pandas.Series.str.repeat SA01\ + -i pandas.Series.str.replace SA01\ + -i pandas.Series.str.rfind RT03\ + -i pandas.Series.str.rindex RT03\ + -i pandas.Series.str.rjust RT03,SA01\ + -i pandas.Series.str.rpartition RT03\ + -i pandas.Series.str.rstrip RT03\ + -i pandas.Series.str.strip RT03\ + -i pandas.Series.str.swapcase RT03\ + -i pandas.Series.str.title RT03\ + -i pandas.Series.str.translate RT03,SA01\ + -i pandas.Series.str.upper RT03\ + -i pandas.Series.str.wrap RT03,SA01\ + -i pandas.Series.str.zfill RT03\ + -i pandas.Series.struct.dtypes SA01\ + -i pandas.Series.sub PR07\ + -i pandas.Series.sum RT03\ + -i pandas.Series.swaplevel SA01\ + -i pandas.Series.to_dict SA01\ + -i pandas.Series.to_frame SA01\ + -i pandas.Series.to_list RT03\ + -i pandas.Series.to_markdown SA01\ + -i pandas.Series.to_period SA01\ + -i pandas.Series.to_string SA01\ + -i pandas.Series.to_timestamp RT03,SA01\ + -i pandas.Series.truediv PR07\ + -i pandas.Series.tz_convert SA01\ + -i pandas.Series.tz_localize SA01\ + -i pandas.Series.unstack SA01\ + -i pandas.Series.update PR07,SA01\ + -i pandas.Series.value_counts RT03\ + -i pandas.Series.var PR01,RT03,SA01\ + -i pandas.Series.where RT03\ + -i pandas.SparseDtype SA01\ + -i pandas.Timedelta PR07,SA01\ + -i pandas.Timedelta.as_unit SA01\ + -i pandas.Timedelta.asm8 SA01\ + -i pandas.Timedelta.ceil SA01\ + -i pandas.Timedelta.components SA01\ + -i pandas.Timedelta.days SA01\ + -i pandas.Timedelta.floor SA01\ + -i pandas.Timedelta.max PR02,PR07,SA01\ + -i pandas.Timedelta.min PR02,PR07,SA01\ + -i pandas.Timedelta.resolution PR02,PR07,SA01\ + -i pandas.Timedelta.round SA01\ + -i pandas.Timedelta.to_numpy PR01\ + -i pandas.Timedelta.to_timedelta64 SA01\ + -i pandas.Timedelta.total_seconds SA01\ + -i pandas.Timedelta.view SA01\ + -i pandas.TimedeltaIndex PR01\ + -i pandas.TimedeltaIndex.as_unit RT03,SA01\ + -i pandas.TimedeltaIndex.ceil SA01\ + -i pandas.TimedeltaIndex.components SA01\ + -i pandas.TimedeltaIndex.days SA01\ + -i pandas.TimedeltaIndex.floor SA01\ + -i pandas.TimedeltaIndex.inferred_freq SA01\ + -i pandas.TimedeltaIndex.microseconds SA01\ + -i pandas.TimedeltaIndex.nanoseconds SA01\ + -i pandas.TimedeltaIndex.round SA01\ + -i pandas.TimedeltaIndex.seconds SA01\ + -i pandas.TimedeltaIndex.to_pytimedelta RT03,SA01\ + -i pandas.Timestamp PR07,SA01\ + -i pandas.Timestamp.as_unit SA01\ + -i pandas.Timestamp.asm8 SA01\ + -i pandas.Timestamp.astimezone SA01\ + -i pandas.Timestamp.ceil SA01\ + -i pandas.Timestamp.combine PR01,SA01\ + -i pandas.Timestamp.ctime SA01\ + -i pandas.Timestamp.date SA01\ + -i pandas.Timestamp.day GL08\ + -i pandas.Timestamp.day_name SA01\ + -i pandas.Timestamp.day_of_week SA01\ + -i pandas.Timestamp.day_of_year SA01\ + -i pandas.Timestamp.dayofweek SA01\ + -i pandas.Timestamp.dayofyear SA01\ + -i pandas.Timestamp.days_in_month SA01\ + -i pandas.Timestamp.daysinmonth SA01\ + -i pandas.Timestamp.dst SA01\ + -i pandas.Timestamp.floor SA01\ + -i pandas.Timestamp.fold GL08\ + -i pandas.Timestamp.fromordinal SA01\ + -i pandas.Timestamp.fromtimestamp PR01,SA01\ + -i pandas.Timestamp.hour GL08\ + -i pandas.Timestamp.is_leap_year SA01\ + -i pandas.Timestamp.isocalendar SA01\ + -i pandas.Timestamp.isoformat SA01\ + -i pandas.Timestamp.isoweekday SA01\ + -i pandas.Timestamp.max PR02,PR07,SA01\ + -i pandas.Timestamp.microsecond GL08\ + -i pandas.Timestamp.min PR02,PR07,SA01\ + -i pandas.Timestamp.minute GL08\ + -i pandas.Timestamp.month GL08\ + -i pandas.Timestamp.month_name SA01\ + -i pandas.Timestamp.nanosecond GL08\ + -i pandas.Timestamp.normalize SA01\ + -i pandas.Timestamp.now SA01\ + -i pandas.Timestamp.quarter SA01\ + -i pandas.Timestamp.replace PR07,SA01\ + -i pandas.Timestamp.resolution PR02,PR07,SA01\ + -i pandas.Timestamp.round SA01\ + -i pandas.Timestamp.second GL08\ + -i pandas.Timestamp.strftime SA01\ + -i pandas.Timestamp.strptime PR01,SA01\ + -i pandas.Timestamp.time SA01\ + -i pandas.Timestamp.timestamp SA01\ + -i pandas.Timestamp.timetuple SA01\ + -i pandas.Timestamp.timetz SA01\ + -i pandas.Timestamp.to_datetime64 SA01\ + -i pandas.Timestamp.to_julian_date SA01\ + -i pandas.Timestamp.to_numpy PR01\ + -i pandas.Timestamp.to_period PR01,SA01\ + -i pandas.Timestamp.to_pydatetime PR01,SA01\ + -i pandas.Timestamp.today SA01\ + -i pandas.Timestamp.toordinal SA01\ + -i pandas.Timestamp.tz SA01\ + -i pandas.Timestamp.tz_convert SA01\ + -i pandas.Timestamp.tz_localize SA01\ + -i pandas.Timestamp.tzinfo GL08\ + -i pandas.Timestamp.tzname SA01\ + -i pandas.Timestamp.unit SA01\ + -i pandas.Timestamp.utcfromtimestamp PR01,SA01\ + -i pandas.Timestamp.utcnow SA01\ + -i pandas.Timestamp.utcoffset SA01\ + -i pandas.Timestamp.utctimetuple SA01\ + -i pandas.Timestamp.value GL08\ + -i pandas.Timestamp.week SA01\ + -i pandas.Timestamp.weekday SA01\ + -i pandas.Timestamp.weekofyear SA01\ + -i pandas.Timestamp.year GL08\ + -i pandas.UInt16Dtype SA01\ + -i pandas.UInt32Dtype SA01\ + -i pandas.UInt64Dtype SA01\ + -i pandas.UInt8Dtype SA01\ + -i pandas.api.extensions.ExtensionArray SA01\ + -i pandas.api.extensions.ExtensionArray._accumulate RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01\ + -i pandas.api.extensions.ExtensionArray._formatter SA01\ + -i pandas.api.extensions.ExtensionArray._from_sequence SA01\ + -i pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01\ + -i pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._reduce RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._values_for_factorize SA01\ + -i pandas.api.extensions.ExtensionArray.astype SA01\ + -i pandas.api.extensions.ExtensionArray.copy RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.dropna RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.dtype SA01\ + -i pandas.api.extensions.ExtensionArray.duplicated RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.equals SA01\ + -i pandas.api.extensions.ExtensionArray.fillna SA01\ + -i pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.interpolate PR01,SA01\ + -i pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.isna SA01\ + -i pandas.api.extensions.ExtensionArray.nbytes SA01\ + -i pandas.api.extensions.ExtensionArray.ndim SA01\ + -i pandas.api.extensions.ExtensionArray.ravel RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.shape SA01\ + -i pandas.api.extensions.ExtensionArray.shift SA01\ + -i pandas.api.extensions.ExtensionArray.take RT03\ + -i pandas.api.extensions.ExtensionArray.tolist RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.unique RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.view SA01\ + -i pandas.api.extensions.register_extension_dtype SA01\ + -i pandas.api.indexers.BaseIndexer PR01,SA01\ + -i pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01\ + -i pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01\ + -i pandas.api.interchange.from_dataframe RT03,SA01\ + -i pandas.api.types.infer_dtype PR07,SA01\ + -i pandas.api.types.is_any_real_numeric_dtype SA01\ + -i pandas.api.types.is_bool PR01,SA01\ + -i pandas.api.types.is_bool_dtype SA01\ + -i pandas.api.types.is_categorical_dtype SA01\ + -i pandas.api.types.is_complex PR01,SA01\ + -i pandas.api.types.is_complex_dtype SA01\ + -i pandas.api.types.is_datetime64_any_dtype SA01\ + -i pandas.api.types.is_datetime64_dtype SA01\ + -i pandas.api.types.is_datetime64_ns_dtype SA01\ + -i pandas.api.types.is_datetime64tz_dtype SA01\ + -i pandas.api.types.is_dict_like PR07,SA01\ + -i pandas.api.types.is_extension_array_dtype SA01\ + -i pandas.api.types.is_file_like PR07,SA01\ + -i pandas.api.types.is_float PR01,SA01\ + -i pandas.api.types.is_float_dtype SA01\ + -i pandas.api.types.is_hashable PR01,RT03,SA01\ + -i pandas.api.types.is_int64_dtype SA01\ + -i pandas.api.types.is_integer PR01,SA01\ + -i pandas.api.types.is_integer_dtype SA01\ + -i pandas.api.types.is_interval_dtype SA01\ + -i pandas.api.types.is_iterator PR07,SA01\ + -i pandas.api.types.is_list_like SA01\ + -i pandas.api.types.is_named_tuple PR07,SA01\ + -i pandas.api.types.is_numeric_dtype SA01\ + -i pandas.api.types.is_object_dtype SA01\ + -i pandas.api.types.is_period_dtype SA01\ + -i pandas.api.types.is_re PR07,SA01\ + -i pandas.api.types.is_re_compilable PR07,SA01\ + -i pandas.api.types.is_scalar SA01\ + -i pandas.api.types.is_signed_integer_dtype SA01\ + -i pandas.api.types.is_sparse SA01\ + -i pandas.api.types.is_string_dtype SA01\ + -i pandas.api.types.is_timedelta64_dtype SA01\ + -i pandas.api.types.is_timedelta64_ns_dtype SA01\ + -i pandas.api.types.is_unsigned_integer_dtype SA01\ + -i pandas.api.types.pandas_dtype PR07,RT03,SA01\ + -i pandas.api.types.union_categoricals RT03,SA01\ + -i pandas.arrays.ArrowExtensionArray PR07,SA01\ + -i pandas.arrays.BooleanArray SA01\ + -i pandas.arrays.DatetimeArray SA01\ + -i pandas.arrays.FloatingArray SA01\ + -i pandas.arrays.IntegerArray SA01\ + -i pandas.arrays.IntervalArray.closed SA01\ + -i pandas.arrays.IntervalArray.contains RT03\ + -i pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01\ + -i pandas.arrays.IntervalArray.left SA01\ + -i pandas.arrays.IntervalArray.length SA01\ + -i pandas.arrays.IntervalArray.mid SA01\ + -i pandas.arrays.IntervalArray.right SA01\ + -i pandas.arrays.IntervalArray.set_closed RT03,SA01\ + -i pandas.arrays.IntervalArray.to_tuples RT03,SA01\ + -i pandas.arrays.NumpyExtensionArray SA01\ + -i pandas.arrays.SparseArray PR07,SA01\ + -i pandas.arrays.TimedeltaArray PR07,SA01\ + -i pandas.bdate_range RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.agg RT03\ + -i pandas.core.groupby.DataFrameGroupBy.aggregate RT03\ + -i pandas.core.groupby.DataFrameGroupBy.apply RT03\ + -i pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.cummax RT03\ + -i pandas.core.groupby.DataFrameGroupBy.cummin RT03\ + -i pandas.core.groupby.DataFrameGroupBy.cumprod RT03\ + -i pandas.core.groupby.DataFrameGroupBy.cumsum RT03\ + -i pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.groups SA01\ + -i pandas.core.groupby.DataFrameGroupBy.hist RT03\ + -i pandas.core.groupby.DataFrameGroupBy.indices SA01\ + -i pandas.core.groupby.DataFrameGroupBy.max SA01\ + -i pandas.core.groupby.DataFrameGroupBy.mean RT03\ + -i pandas.core.groupby.DataFrameGroupBy.median SA01\ + -i pandas.core.groupby.DataFrameGroupBy.min SA01\ + -i pandas.core.groupby.DataFrameGroupBy.nth PR02\ + -i pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.ohlc SA01\ + -i pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.prod SA01\ + -i pandas.core.groupby.DataFrameGroupBy.rank RT03\ + -i pandas.core.groupby.DataFrameGroupBy.resample RT03\ + -i pandas.core.groupby.DataFrameGroupBy.sem SA01\ + -i pandas.core.groupby.DataFrameGroupBy.skew RT03\ + -i pandas.core.groupby.DataFrameGroupBy.sum SA01\ + -i pandas.core.groupby.DataFrameGroupBy.transform RT03\ + -i pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01\ + -i pandas.core.groupby.SeriesGroupBy.agg RT03\ + -i pandas.core.groupby.SeriesGroupBy.aggregate RT03\ + -i pandas.core.groupby.SeriesGroupBy.apply RT03\ + -i pandas.core.groupby.SeriesGroupBy.cummax RT03\ + -i pandas.core.groupby.SeriesGroupBy.cummin RT03\ + -i pandas.core.groupby.SeriesGroupBy.cumprod RT03\ + -i pandas.core.groupby.SeriesGroupBy.cumsum RT03\ + -i pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01\ + -i pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01\ + -i pandas.core.groupby.SeriesGroupBy.groups SA01\ + -i pandas.core.groupby.SeriesGroupBy.indices SA01\ + -i pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01\ + -i pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01\ + -i pandas.core.groupby.SeriesGroupBy.max SA01\ + -i pandas.core.groupby.SeriesGroupBy.mean RT03\ + -i pandas.core.groupby.SeriesGroupBy.median SA01\ + -i pandas.core.groupby.SeriesGroupBy.min SA01\ + -i pandas.core.groupby.SeriesGroupBy.nth PR02\ + -i pandas.core.groupby.SeriesGroupBy.ohlc SA01\ + -i pandas.core.groupby.SeriesGroupBy.plot PR02,SA01\ + -i pandas.core.groupby.SeriesGroupBy.prod SA01\ + -i pandas.core.groupby.SeriesGroupBy.rank RT03\ + -i pandas.core.groupby.SeriesGroupBy.resample RT03\ + -i pandas.core.groupby.SeriesGroupBy.sem SA01\ + -i pandas.core.groupby.SeriesGroupBy.skew RT03\ + -i pandas.core.groupby.SeriesGroupBy.sum SA01\ + -i pandas.core.groupby.SeriesGroupBy.transform RT03\ + -i pandas.core.resample.Resampler.__iter__ RT03,SA01\ + -i pandas.core.resample.Resampler.ffill RT03\ + -i pandas.core.resample.Resampler.get_group RT03,SA01\ + -i pandas.core.resample.Resampler.groups SA01\ + -i pandas.core.resample.Resampler.indices SA01\ + -i pandas.core.resample.Resampler.max PR01,RT03,SA01\ + -i pandas.core.resample.Resampler.mean SA01\ + -i pandas.core.resample.Resampler.median SA01\ + -i pandas.core.resample.Resampler.min PR01,RT03,SA01\ + -i pandas.core.resample.Resampler.ohlc SA01\ + -i pandas.core.resample.Resampler.prod SA01\ + -i pandas.core.resample.Resampler.quantile PR01,PR07\ + -i pandas.core.resample.Resampler.sem SA01\ + -i pandas.core.resample.Resampler.std SA01\ + -i pandas.core.resample.Resampler.sum SA01\ + -i pandas.core.resample.Resampler.transform PR01,RT03,SA01\ + -i pandas.core.resample.Resampler.var SA01\ + -i pandas.core.window.expanding.Expanding.corr PR01\ + -i pandas.core.window.expanding.Expanding.count PR01\ + -i pandas.core.window.rolling.Rolling.max PR01\ + -i pandas.core.window.rolling.Window.std PR01\ + -i pandas.core.window.rolling.Window.var PR01\ + -i pandas.date_range RT03\ + -i pandas.describe_option SA01\ + -i pandas.errors.AbstractMethodError PR01,SA01\ + -i pandas.errors.AttributeConflictWarning SA01\ + -i pandas.errors.CSSWarning SA01\ + -i pandas.errors.CategoricalConversionWarning SA01\ + -i pandas.errors.ChainedAssignmentError SA01\ + -i pandas.errors.ClosedFileError SA01\ + -i pandas.errors.DataError SA01\ + -i pandas.errors.DuplicateLabelError SA01\ + -i pandas.errors.EmptyDataError SA01\ + -i pandas.errors.IntCastingNaNError SA01\ + -i pandas.errors.InvalidIndexError SA01\ + -i pandas.errors.InvalidVersion SA01\ + -i pandas.errors.MergeError SA01\ + -i pandas.errors.NullFrequencyError SA01\ + -i pandas.errors.NumExprClobberingError SA01\ + -i pandas.errors.NumbaUtilError SA01\ + -i pandas.errors.OptionError SA01\ + -i pandas.errors.OutOfBoundsDatetime SA01\ + -i pandas.errors.OutOfBoundsTimedelta SA01\ + -i pandas.errors.PerformanceWarning SA01\ + -i pandas.errors.PossibleDataLossError SA01\ + -i pandas.errors.PossiblePrecisionLoss SA01\ + -i pandas.errors.SpecificationError SA01\ + -i pandas.errors.UndefinedVariableError PR01,SA01\ + -i pandas.errors.UnsortedIndexError SA01\ + -i pandas.errors.UnsupportedFunctionCall SA01\ + -i pandas.errors.ValueLabelTypeMismatch SA01\ + -i pandas.get_option SA01\ + -i pandas.infer_freq SA01\ + -i pandas.interval_range RT03\ + -i pandas.io.formats.style.Styler.apply RT03\ + -i pandas.io.formats.style.Styler.apply_index RT03\ + -i pandas.io.formats.style.Styler.background_gradient RT03\ + -i pandas.io.formats.style.Styler.bar RT03,SA01\ + -i pandas.io.formats.style.Styler.clear SA01\ + -i pandas.io.formats.style.Styler.concat RT03,SA01\ + -i pandas.io.formats.style.Styler.export RT03\ + -i pandas.io.formats.style.Styler.format RT03\ + -i pandas.io.formats.style.Styler.format_index RT03\ + -i pandas.io.formats.style.Styler.from_custom_template SA01\ + -i pandas.io.formats.style.Styler.hide RT03,SA01\ + -i pandas.io.formats.style.Styler.highlight_between RT03\ + -i pandas.io.formats.style.Styler.highlight_max RT03\ + -i pandas.io.formats.style.Styler.highlight_min RT03\ + -i pandas.io.formats.style.Styler.highlight_null RT03\ + -i pandas.io.formats.style.Styler.highlight_quantile RT03\ + -i pandas.io.formats.style.Styler.map RT03\ + -i pandas.io.formats.style.Styler.map_index RT03\ + -i pandas.io.formats.style.Styler.relabel_index RT03\ + -i pandas.io.formats.style.Styler.set_caption RT03,SA01\ + -i pandas.io.formats.style.Styler.set_properties RT03,SA01\ + -i pandas.io.formats.style.Styler.set_sticky RT03,SA01\ + -i pandas.io.formats.style.Styler.set_table_attributes PR07,RT03\ + -i pandas.io.formats.style.Styler.set_table_styles RT03\ + -i pandas.io.formats.style.Styler.set_td_classes RT03\ + -i pandas.io.formats.style.Styler.set_tooltips RT03,SA01\ + -i pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01\ + -i pandas.io.formats.style.Styler.text_gradient RT03\ + -i pandas.io.formats.style.Styler.to_excel PR01\ + -i pandas.io.formats.style.Styler.to_string SA01\ + -i pandas.io.formats.style.Styler.use RT03\ + -i pandas.io.json.build_table_schema PR07,RT03,SA01\ + -i pandas.io.stata.StataReader.data_label SA01\ + -i pandas.io.stata.StataReader.value_labels RT03,SA01\ + -i pandas.io.stata.StataReader.variable_labels RT03,SA01\ + -i pandas.io.stata.StataWriter.write_file SA01\ + -i pandas.json_normalize RT03,SA01\ + -i pandas.merge PR07\ + -i pandas.merge_asof PR07,RT03\ + -i pandas.merge_ordered PR07\ + -i pandas.option_context SA01\ + -i pandas.period_range RT03,SA01\ + -i pandas.pivot PR07\ + -i pandas.pivot_table PR07\ + -i pandas.plotting.andrews_curves RT03,SA01\ + -i pandas.plotting.autocorrelation_plot RT03,SA01\ + -i pandas.plotting.lag_plot RT03,SA01\ + -i pandas.plotting.parallel_coordinates PR07,RT03,SA01\ + -i pandas.plotting.plot_params SA01\ + -i pandas.plotting.scatter_matrix PR07,SA01\ + -i pandas.plotting.table PR07,RT03,SA01\ + -i pandas.qcut PR07,SA01\ + -i pandas.read_feather SA01\ + -i pandas.read_orc SA01\ + -i pandas.read_sas SA01\ + -i pandas.read_spss SA01\ + -i pandas.reset_option SA01\ + -i pandas.set_eng_float_format RT03,SA01\ + -i pandas.set_option SA01\ + -i pandas.show_versions SA01\ + -i pandas.test SA01\ + -i pandas.testing.assert_extension_array_equal SA01\ + -i pandas.testing.assert_index_equal PR07,SA01\ + -i pandas.testing.assert_series_equal PR07,SA01\ + -i pandas.timedelta_range SA01\ + -i pandas.tseries.api.guess_datetime_format SA01\ + -i pandas.tseries.offsets.BDay PR02,SA01\ + -i pandas.tseries.offsets.BMonthBegin PR02\ + -i pandas.tseries.offsets.BMonthEnd PR02\ + -i pandas.tseries.offsets.BQuarterBegin PR02\ + -i pandas.tseries.offsets.BQuarterBegin.copy SA01\ + -i pandas.tseries.offsets.BQuarterBegin.freqstr SA01\ + -i pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.BQuarterBegin.kwds SA01\ + -i pandas.tseries.offsets.BQuarterBegin.n GL08\ + -i pandas.tseries.offsets.BQuarterBegin.name SA01\ + -i pandas.tseries.offsets.BQuarterBegin.nanos GL08\ + -i pandas.tseries.offsets.BQuarterBegin.normalize GL08\ + -i pandas.tseries.offsets.BQuarterBegin.rule_code GL08\ + -i pandas.tseries.offsets.BQuarterBegin.startingMonth GL08\ + -i pandas.tseries.offsets.BQuarterEnd PR02\ + -i pandas.tseries.offsets.BQuarterEnd.copy SA01\ + -i pandas.tseries.offsets.BQuarterEnd.freqstr SA01\ + -i pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.BQuarterEnd.kwds SA01\ + -i pandas.tseries.offsets.BQuarterEnd.n GL08\ + -i pandas.tseries.offsets.BQuarterEnd.name SA01\ + -i pandas.tseries.offsets.BQuarterEnd.nanos GL08\ + -i pandas.tseries.offsets.BQuarterEnd.normalize GL08\ + -i pandas.tseries.offsets.BQuarterEnd.rule_code GL08\ + -i pandas.tseries.offsets.BQuarterEnd.startingMonth GL08\ + -i pandas.tseries.offsets.BYearBegin PR02\ + -i pandas.tseries.offsets.BYearBegin.copy SA01\ + -i pandas.tseries.offsets.BYearBegin.freqstr SA01\ + -i pandas.tseries.offsets.BYearBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.BYearBegin.kwds SA01\ + -i pandas.tseries.offsets.BYearBegin.month GL08\ + -i pandas.tseries.offsets.BYearBegin.n GL08\ + -i pandas.tseries.offsets.BYearBegin.name SA01\ + -i pandas.tseries.offsets.BYearBegin.nanos GL08\ + -i pandas.tseries.offsets.BYearBegin.normalize GL08\ + -i pandas.tseries.offsets.BYearBegin.rule_code GL08\ + -i pandas.tseries.offsets.BYearEnd PR02\ + -i pandas.tseries.offsets.BYearEnd.copy SA01\ + -i pandas.tseries.offsets.BYearEnd.freqstr SA01\ + -i pandas.tseries.offsets.BYearEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.BYearEnd.kwds SA01\ + -i pandas.tseries.offsets.BYearEnd.month GL08\ + -i pandas.tseries.offsets.BYearEnd.n GL08\ + -i pandas.tseries.offsets.BYearEnd.name SA01\ + -i pandas.tseries.offsets.BYearEnd.nanos GL08\ + -i pandas.tseries.offsets.BYearEnd.normalize GL08\ + -i pandas.tseries.offsets.BYearEnd.rule_code GL08\ + -i pandas.tseries.offsets.BusinessDay PR02,SA01\ + -i pandas.tseries.offsets.BusinessDay.calendar GL08\ + -i pandas.tseries.offsets.BusinessDay.copy SA01\ + -i pandas.tseries.offsets.BusinessDay.freqstr SA01\ + -i pandas.tseries.offsets.BusinessDay.holidays GL08\ + -i pandas.tseries.offsets.BusinessDay.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessDay.kwds SA01\ + -i pandas.tseries.offsets.BusinessDay.n GL08\ + -i pandas.tseries.offsets.BusinessDay.name SA01\ + -i pandas.tseries.offsets.BusinessDay.nanos GL08\ + -i pandas.tseries.offsets.BusinessDay.normalize GL08\ + -i pandas.tseries.offsets.BusinessDay.rule_code GL08\ + -i pandas.tseries.offsets.BusinessDay.weekmask GL08\ + -i pandas.tseries.offsets.BusinessHour PR02,SA01\ + -i pandas.tseries.offsets.BusinessHour.calendar GL08\ + -i pandas.tseries.offsets.BusinessHour.copy SA01\ + -i pandas.tseries.offsets.BusinessHour.end GL08\ + -i pandas.tseries.offsets.BusinessHour.freqstr SA01\ + -i pandas.tseries.offsets.BusinessHour.holidays GL08\ + -i pandas.tseries.offsets.BusinessHour.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessHour.kwds SA01\ + -i pandas.tseries.offsets.BusinessHour.n GL08\ + -i pandas.tseries.offsets.BusinessHour.name SA01\ + -i pandas.tseries.offsets.BusinessHour.nanos GL08\ + -i pandas.tseries.offsets.BusinessHour.normalize GL08\ + -i pandas.tseries.offsets.BusinessHour.rule_code GL08\ + -i pandas.tseries.offsets.BusinessHour.start GL08\ + -i pandas.tseries.offsets.BusinessHour.weekmask GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin PR02\ + -i pandas.tseries.offsets.BusinessMonthBegin.copy SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.kwds SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.n GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.name SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.nanos GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.normalize GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd PR02\ + -i pandas.tseries.offsets.BusinessMonthEnd.copy SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.kwds SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.n GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.name SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.nanos GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.normalize GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.CBMonthBegin PR02\ + -i pandas.tseries.offsets.CBMonthEnd PR02\ + -i pandas.tseries.offsets.CDay PR02,SA01\ + -i pandas.tseries.offsets.CustomBusinessDay PR02,SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.n GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.name SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.weekmask GL08\ + -i pandas.tseries.offsets.CustomBusinessHour PR02,SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.end GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.n GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.name SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.start GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.weekmask GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin PR02\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd PR02\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08\ + -i pandas.tseries.offsets.DateOffset PR02\ + -i pandas.tseries.offsets.DateOffset.copy SA01\ + -i pandas.tseries.offsets.DateOffset.freqstr SA01\ + -i pandas.tseries.offsets.DateOffset.is_on_offset GL08\ + -i pandas.tseries.offsets.DateOffset.kwds SA01\ + -i pandas.tseries.offsets.DateOffset.n GL08\ + -i pandas.tseries.offsets.DateOffset.name SA01\ + -i pandas.tseries.offsets.DateOffset.nanos GL08\ + -i pandas.tseries.offsets.DateOffset.normalize GL08\ + -i pandas.tseries.offsets.DateOffset.rule_code GL08\ + -i pandas.tseries.offsets.Day PR02\ + -i pandas.tseries.offsets.Day.copy SA01\ + -i pandas.tseries.offsets.Day.delta GL08\ + -i pandas.tseries.offsets.Day.freqstr SA01\ + -i pandas.tseries.offsets.Day.is_on_offset GL08\ + -i pandas.tseries.offsets.Day.kwds SA01\ + -i pandas.tseries.offsets.Day.n GL08\ + -i pandas.tseries.offsets.Day.name SA01\ + -i pandas.tseries.offsets.Day.nanos SA01\ + -i pandas.tseries.offsets.Day.normalize GL08\ + -i pandas.tseries.offsets.Day.rule_code GL08\ + -i pandas.tseries.offsets.Easter PR02\ + -i pandas.tseries.offsets.Easter.copy SA01\ + -i pandas.tseries.offsets.Easter.freqstr SA01\ + -i pandas.tseries.offsets.Easter.is_on_offset GL08\ + -i pandas.tseries.offsets.Easter.kwds SA01\ + -i pandas.tseries.offsets.Easter.n GL08\ + -i pandas.tseries.offsets.Easter.name SA01\ + -i pandas.tseries.offsets.Easter.nanos GL08\ + -i pandas.tseries.offsets.Easter.normalize GL08\ + -i pandas.tseries.offsets.Easter.rule_code GL08\ + -i pandas.tseries.offsets.FY5253 PR02\ + -i pandas.tseries.offsets.FY5253.copy SA01\ + -i pandas.tseries.offsets.FY5253.freqstr SA01\ + -i pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08\ + -i pandas.tseries.offsets.FY5253.get_year_end GL08\ + -i pandas.tseries.offsets.FY5253.is_on_offset GL08\ + -i pandas.tseries.offsets.FY5253.kwds SA01\ + -i pandas.tseries.offsets.FY5253.n GL08\ + -i pandas.tseries.offsets.FY5253.name SA01\ + -i pandas.tseries.offsets.FY5253.nanos GL08\ + -i pandas.tseries.offsets.FY5253.normalize GL08\ + -i pandas.tseries.offsets.FY5253.rule_code GL08\ + -i pandas.tseries.offsets.FY5253.startingMonth GL08\ + -i pandas.tseries.offsets.FY5253.variation GL08\ + -i pandas.tseries.offsets.FY5253.weekday GL08\ + -i pandas.tseries.offsets.FY5253Quarter PR02\ + -i pandas.tseries.offsets.FY5253Quarter.copy SA01\ + -i pandas.tseries.offsets.FY5253Quarter.freqstr SA01\ + -i pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08\ + -i pandas.tseries.offsets.FY5253Quarter.get_weeks GL08\ + -i pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08\ + -i pandas.tseries.offsets.FY5253Quarter.kwds SA01\ + -i pandas.tseries.offsets.FY5253Quarter.n GL08\ + -i pandas.tseries.offsets.FY5253Quarter.name SA01\ + -i pandas.tseries.offsets.FY5253Quarter.nanos GL08\ + -i pandas.tseries.offsets.FY5253Quarter.normalize GL08\ + -i pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08\ + -i pandas.tseries.offsets.FY5253Quarter.rule_code GL08\ + -i pandas.tseries.offsets.FY5253Quarter.startingMonth GL08\ + -i pandas.tseries.offsets.FY5253Quarter.variation GL08\ + -i pandas.tseries.offsets.FY5253Quarter.weekday GL08\ + -i pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08\ + -i pandas.tseries.offsets.Hour PR02\ + -i pandas.tseries.offsets.Hour.copy SA01\ + -i pandas.tseries.offsets.Hour.delta GL08\ + -i pandas.tseries.offsets.Hour.freqstr SA01\ + -i pandas.tseries.offsets.Hour.is_on_offset GL08\ + -i pandas.tseries.offsets.Hour.kwds SA01\ + -i pandas.tseries.offsets.Hour.n GL08\ + -i pandas.tseries.offsets.Hour.name SA01\ + -i pandas.tseries.offsets.Hour.nanos SA01\ + -i pandas.tseries.offsets.Hour.normalize GL08\ + -i pandas.tseries.offsets.Hour.rule_code GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth PR02,SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.copy SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.kwds SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.n GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.name SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.nanos GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.normalize GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.week GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.weekday GL08\ + -i pandas.tseries.offsets.Micro PR02\ + -i pandas.tseries.offsets.Micro.copy SA01\ + -i pandas.tseries.offsets.Micro.delta GL08\ + -i pandas.tseries.offsets.Micro.freqstr SA01\ + -i pandas.tseries.offsets.Micro.is_on_offset GL08\ + -i pandas.tseries.offsets.Micro.kwds SA01\ + -i pandas.tseries.offsets.Micro.n GL08\ + -i pandas.tseries.offsets.Micro.name SA01\ + -i pandas.tseries.offsets.Micro.nanos SA01\ + -i pandas.tseries.offsets.Micro.normalize GL08\ + -i pandas.tseries.offsets.Micro.rule_code GL08\ + -i pandas.tseries.offsets.Milli PR02\ + -i pandas.tseries.offsets.Milli.copy SA01\ + -i pandas.tseries.offsets.Milli.delta GL08\ + -i pandas.tseries.offsets.Milli.freqstr SA01\ + -i pandas.tseries.offsets.Milli.is_on_offset GL08\ + -i pandas.tseries.offsets.Milli.kwds SA01\ + -i pandas.tseries.offsets.Milli.n GL08\ + -i pandas.tseries.offsets.Milli.name SA01\ + -i pandas.tseries.offsets.Milli.nanos SA01\ + -i pandas.tseries.offsets.Milli.normalize GL08\ + -i pandas.tseries.offsets.Milli.rule_code GL08\ + -i pandas.tseries.offsets.Minute PR02\ + -i pandas.tseries.offsets.Minute.copy SA01\ + -i pandas.tseries.offsets.Minute.delta GL08\ + -i pandas.tseries.offsets.Minute.freqstr SA01\ + -i pandas.tseries.offsets.Minute.is_on_offset GL08\ + -i pandas.tseries.offsets.Minute.kwds SA01\ + -i pandas.tseries.offsets.Minute.n GL08\ + -i pandas.tseries.offsets.Minute.name SA01\ + -i pandas.tseries.offsets.Minute.nanos SA01\ + -i pandas.tseries.offsets.Minute.normalize GL08\ + -i pandas.tseries.offsets.Minute.rule_code GL08\ + -i pandas.tseries.offsets.MonthBegin PR02\ + -i pandas.tseries.offsets.MonthBegin.copy SA01\ + -i pandas.tseries.offsets.MonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.MonthBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.MonthBegin.kwds SA01\ + -i pandas.tseries.offsets.MonthBegin.n GL08\ + -i pandas.tseries.offsets.MonthBegin.name SA01\ + -i pandas.tseries.offsets.MonthBegin.nanos GL08\ + -i pandas.tseries.offsets.MonthBegin.normalize GL08\ + -i pandas.tseries.offsets.MonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.MonthEnd PR02\ + -i pandas.tseries.offsets.MonthEnd.copy SA01\ + -i pandas.tseries.offsets.MonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.MonthEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.MonthEnd.kwds SA01\ + -i pandas.tseries.offsets.MonthEnd.n GL08\ + -i pandas.tseries.offsets.MonthEnd.name SA01\ + -i pandas.tseries.offsets.MonthEnd.nanos GL08\ + -i pandas.tseries.offsets.MonthEnd.normalize GL08\ + -i pandas.tseries.offsets.MonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.Nano PR02\ + -i pandas.tseries.offsets.Nano.copy SA01\ + -i pandas.tseries.offsets.Nano.delta GL08\ + -i pandas.tseries.offsets.Nano.freqstr SA01\ + -i pandas.tseries.offsets.Nano.is_on_offset GL08\ + -i pandas.tseries.offsets.Nano.kwds SA01\ + -i pandas.tseries.offsets.Nano.n GL08\ + -i pandas.tseries.offsets.Nano.name SA01\ + -i pandas.tseries.offsets.Nano.nanos SA01\ + -i pandas.tseries.offsets.Nano.normalize GL08\ + -i pandas.tseries.offsets.Nano.rule_code GL08\ + -i pandas.tseries.offsets.QuarterBegin PR02\ + -i pandas.tseries.offsets.QuarterBegin.copy SA01\ + -i pandas.tseries.offsets.QuarterBegin.freqstr SA01\ + -i pandas.tseries.offsets.QuarterBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.QuarterBegin.kwds SA01\ + -i pandas.tseries.offsets.QuarterBegin.n GL08\ + -i pandas.tseries.offsets.QuarterBegin.name SA01\ + -i pandas.tseries.offsets.QuarterBegin.nanos GL08\ + -i pandas.tseries.offsets.QuarterBegin.normalize GL08\ + -i pandas.tseries.offsets.QuarterBegin.rule_code GL08\ + -i pandas.tseries.offsets.QuarterBegin.startingMonth GL08\ + -i pandas.tseries.offsets.QuarterEnd PR02\ + -i pandas.tseries.offsets.QuarterEnd.copy SA01\ + -i pandas.tseries.offsets.QuarterEnd.freqstr SA01\ + -i pandas.tseries.offsets.QuarterEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.QuarterEnd.kwds SA01\ + -i pandas.tseries.offsets.QuarterEnd.n GL08\ + -i pandas.tseries.offsets.QuarterEnd.name SA01\ + -i pandas.tseries.offsets.QuarterEnd.nanos GL08\ + -i pandas.tseries.offsets.QuarterEnd.normalize GL08\ + -i pandas.tseries.offsets.QuarterEnd.rule_code GL08\ + -i pandas.tseries.offsets.QuarterEnd.startingMonth GL08\ + -i pandas.tseries.offsets.Second PR02\ + -i pandas.tseries.offsets.Second.copy SA01\ + -i pandas.tseries.offsets.Second.delta GL08\ + -i pandas.tseries.offsets.Second.freqstr SA01\ + -i pandas.tseries.offsets.Second.is_on_offset GL08\ + -i pandas.tseries.offsets.Second.kwds SA01\ + -i pandas.tseries.offsets.Second.n GL08\ + -i pandas.tseries.offsets.Second.name SA01\ + -i pandas.tseries.offsets.Second.nanos SA01\ + -i pandas.tseries.offsets.Second.normalize GL08\ + -i pandas.tseries.offsets.Second.rule_code GL08\ + -i pandas.tseries.offsets.SemiMonthBegin PR02,SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.copy SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.kwds SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.n GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.name SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.nanos GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.normalize GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.SemiMonthEnd PR02,SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.copy SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.kwds SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.n GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.name SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.nanos GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.normalize GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.Tick GL08\ + -i pandas.tseries.offsets.Tick.copy SA01\ + -i pandas.tseries.offsets.Tick.delta GL08\ + -i pandas.tseries.offsets.Tick.freqstr SA01\ + -i pandas.tseries.offsets.Tick.is_on_offset GL08\ + -i pandas.tseries.offsets.Tick.kwds SA01\ + -i pandas.tseries.offsets.Tick.n GL08\ + -i pandas.tseries.offsets.Tick.name SA01\ + -i pandas.tseries.offsets.Tick.nanos SA01\ + -i pandas.tseries.offsets.Tick.normalize GL08\ + -i pandas.tseries.offsets.Tick.rule_code GL08\ + -i pandas.tseries.offsets.Week PR02\ + -i pandas.tseries.offsets.Week.copy SA01\ + -i pandas.tseries.offsets.Week.freqstr SA01\ + -i pandas.tseries.offsets.Week.is_on_offset GL08\ + -i pandas.tseries.offsets.Week.kwds SA01\ + -i pandas.tseries.offsets.Week.n GL08\ + -i pandas.tseries.offsets.Week.name SA01\ + -i pandas.tseries.offsets.Week.nanos GL08\ + -i pandas.tseries.offsets.Week.normalize GL08\ + -i pandas.tseries.offsets.Week.rule_code GL08\ + -i pandas.tseries.offsets.Week.weekday GL08\ + -i pandas.tseries.offsets.WeekOfMonth PR02,SA01\ + -i pandas.tseries.offsets.WeekOfMonth.copy SA01\ + -i pandas.tseries.offsets.WeekOfMonth.freqstr SA01\ + -i pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08\ + -i pandas.tseries.offsets.WeekOfMonth.kwds SA01\ + -i pandas.tseries.offsets.WeekOfMonth.n GL08\ + -i pandas.tseries.offsets.WeekOfMonth.name SA01\ + -i pandas.tseries.offsets.WeekOfMonth.nanos GL08\ + -i pandas.tseries.offsets.WeekOfMonth.normalize GL08\ + -i pandas.tseries.offsets.WeekOfMonth.rule_code GL08\ + -i pandas.tseries.offsets.WeekOfMonth.week GL08\ + -i pandas.tseries.offsets.WeekOfMonth.weekday GL08\ + -i pandas.tseries.offsets.YearBegin PR02\ + -i pandas.tseries.offsets.YearBegin.copy SA01\ + -i pandas.tseries.offsets.YearBegin.freqstr SA01\ + -i pandas.tseries.offsets.YearBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.YearBegin.kwds SA01\ + -i pandas.tseries.offsets.YearBegin.month GL08\ + -i pandas.tseries.offsets.YearBegin.n GL08\ + -i pandas.tseries.offsets.YearBegin.name SA01\ + -i pandas.tseries.offsets.YearBegin.nanos GL08\ + -i pandas.tseries.offsets.YearBegin.normalize GL08\ + -i pandas.tseries.offsets.YearBegin.rule_code GL08\ + -i pandas.tseries.offsets.YearEnd PR02\ + -i pandas.tseries.offsets.YearEnd.copy SA01\ + -i pandas.tseries.offsets.YearEnd.freqstr SA01\ + -i pandas.tseries.offsets.YearEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.YearEnd.kwds SA01\ + -i pandas.tseries.offsets.YearEnd.month GL08\ + -i pandas.tseries.offsets.YearEnd.n GL08\ + -i pandas.tseries.offsets.YearEnd.name SA01\ + -i pandas.tseries.offsets.YearEnd.nanos GL08\ + -i pandas.tseries.offsets.YearEnd.normalize GL08\ + -i pandas.tseries.offsets.YearEnd.rule_code GL08\ + -i pandas.unique PR07\ + -i pandas.util.hash_array PR07,SA01\ + -i pandas.util.hash_pandas_object PR07,SA01 # There should be no backslash in the final line, please keep this comment in the last ignored function + + RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 73bfb12316dc5..72d5c03ab724f 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -255,29 +255,28 @@ def test_validate_all_ignore_errors(self, monkeypatch): ], ) - exit_status_ignore_func = validate_docstrings.print_validate_all_results( + exit_status = validate_docstrings.print_validate_all_results( output_format="default", prefix=None, - errors=["ER01", "ER02"], ignore_deprecated=False, - ignore_errors={ - "pandas.DataFrame.align": ["ER01"], - # ignoring an error that is not requested should be of no effect - "pandas.Index.all": ["ER03"] - } + ignore_errors={"*": {"ER03"}}, ) + # two functions * two not ignored errors + assert exit_status == 2 * 2 + exit_status = validate_docstrings.print_validate_all_results( output_format="default", prefix=None, - errors=["ER01", "ER02"], ignore_deprecated=False, - ignore_errors=None + ignore_errors={ + "*": {"ER03"}, + "pandas.DataFrame.align": {"ER01"}, + # ignoring an error that is not requested should be of no effect + "pandas.Index.all": {"ER03"} + } ) - - # we have 2 error codes activated out of the 3 available in the validate results - # one run has a function to ignore, the other does not - assert exit_status == 2*2 - assert exit_status_ignore_func == exit_status - 1 + # two functions * two not global ignored errors - one function ignored error + assert exit_status == 2 * 2 - 1 @@ -399,11 +398,10 @@ def test_exit_status_for_main(self, monkeypatch) -> None: func_name="docstring1", prefix=None, output_format="default", - errors=[], ignore_deprecated=False, ignore_errors=None, ) - assert exit_status == 0 + assert exit_status == 3 def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None: monkeypatch.setattr( @@ -430,7 +428,6 @@ def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None: func_name=None, prefix=None, output_format="default", - errors=[], ignore_deprecated=False, ignore_errors=None, ) @@ -449,7 +446,6 @@ def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch) -> None: func_name=None, output_format="default", prefix=None, - errors=[], ignore_deprecated=False, ignore_errors=None, ) @@ -474,7 +470,6 @@ def test_exit_status_for_validate_all_json(self, monkeypatch) -> None: func_name=None, output_format="json", prefix=None, - errors=[], ignore_deprecated=False, ignore_errors=None, ) @@ -519,18 +514,16 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None: func_name=None, output_format="default", prefix=None, - errors=["ER01"], ignore_deprecated=False, - ignore_errors=None, + ignore_errors={"*": {"ER02", "ER03"}}, ) assert exit_status == 3 exit_status = validate_docstrings.main( func_name=None, - prefix=None, output_format="default", - errors=["ER03"], + prefix=None, ignore_deprecated=False, - ignore_errors=None, + ignore_errors={"*": {"ER01", "ER02"}}, ) assert exit_status == 1 diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index b42deff66f546..0057f97ffa211 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -16,6 +16,7 @@ from __future__ import annotations import argparse +import collections import doctest import importlib import json @@ -65,6 +66,10 @@ "EX04": "Do not import {imported_library}, as it is imported " "automatically for the examples (numpy as np, pandas as pd)", } +ALL_ERRORS = set(NUMPYDOC_ERROR_MSGS).union(set(ERROR_MSGS)) +duplicated_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS)) +assert not duplicated_errors, (f"Errors {duplicated_errors} exist in both pandas " + "and numpydoc, should they be removed from pandas?") def pandas_error(code, **kwargs): @@ -340,9 +345,8 @@ def get_all_api_items(): def print_validate_all_results( output_format: str, prefix: str | None, - errors: list[str] | None, ignore_deprecated: bool, - ignore_errors: dict[str, list[str]] | None, + ignore_errors: dict[str, set[str]], ): if output_format not in ("default", "json", "actions"): raise ValueError(f'Unknown output_format "{output_format}"') @@ -358,22 +362,28 @@ def print_validate_all_results( prefix = "##[error]" if output_format == "actions" else "" exit_status = 0 for func_name, res in result.items(): - for err_code, err_desc in res["errors"]: - is_not_requested_error = errors and err_code not in errors - is_ignored_error = err_code in ignore_errors.get(func_name, []) - if is_not_requested_error or is_ignored_error: - continue - + error_messages = dict(res["errors"]) + actual_failures = set(error_messages) + expected_failures = (ignore_errors.get(func_name, set()) + | ignore_errors.get("*", set())) + for err_code in actual_failures - expected_failures: + sys.stdout.write( + f'{prefix}{res["file"]}:{res["file_line"]}:' + f'{err_code}:{func_name}:{error_messages[err_code]}\n' + ) + exit_status += 1 + for err_code in ignore_errors.get(func_name, set()) - actual_failures: sys.stdout.write( f'{prefix}{res["file"]}:{res["file_line"]}:' - f"{err_code}:{func_name}:{err_desc}\n" + f"{err_code}:{func_name}:" + "EXPECTED TO FAIL, BUT NOT FAILING\n" ) exit_status += 1 return exit_status -def print_validate_one_results(func_name: str) -> None: +def print_validate_one_results(func_name: str) -> int: def header(title, width=80, char="#") -> str: full_line = char * width side_len = (width - len(title) - 2) // 2 @@ -399,20 +409,45 @@ def header(title, width=80, char="#") -> str: sys.stderr.write(header("Doctests")) sys.stderr.write(result["examples_errs"]) + return len(result["errors"]) + len(result["examples_errs"]) + + +def _format_ignore_errors(raw_ignore_errors): + ignore_errors = collections.defaultdict(set) + if raw_ignore_errors: + for obj_name, error_codes in raw_ignore_errors: + # function errors "pandas.Series PR01,SA01" + if obj_name != "*": + if obj_name in ignore_errors: + raise ValueError( + f"Object `{obj_name}` is present in more than one " + "--ignore_errors argument. Please use it once and specify " + "the errors separated by commas.") + ignore_errors[obj_name] = set(error_codes.split(",")) + + unknown_errors = ignore_errors[obj_name] - ALL_ERRORS + if unknown_errors: + raise ValueError( + f"Object `{obj_name}` is ignoring errors {unknown_errors} " + f"which are not known. Known errors are: {ALL_ERRORS}") + + # global errors "PR02,ES01" + else: + ignore_errors["*"].update(set(error_codes.split(","))) + + unknown_errors = ignore_errors["*"] - ALL_ERRORS + if unknown_errors: + raise ValueError( + f"Unknown errors {unknown_errors} specified using --ignore_errors " + "Known errors are: {ALL_ERRORS}") -def validate_error_codes(errors): - overlapped_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS)) - assert not overlapped_errors, f"{overlapped_errors} is overlapped." - all_errors = set(NUMPYDOC_ERROR_MSGS).union(set(ERROR_MSGS)) - nonexistent_errors = set(errors) - all_errors - assert not nonexistent_errors, f"{nonexistent_errors} don't exist." + return ignore_errors def main( func_name, output_format, prefix, - errors, ignore_deprecated, ignore_errors ): @@ -420,31 +455,14 @@ def main( Main entry point. Call the validation for one or for all docstrings. """ if func_name is None: - error_str = ", ".join(errors) - msg = f"Validate docstrings ({error_str})\n" - else: - msg = f"Validate docstring in function {func_name}\n" - sys.stdout.write(msg) - - validate_error_codes(errors) - if ignore_errors is not None: - for error_codes in ignore_errors.values(): - validate_error_codes(error_codes) - - if func_name is None: - exit_status = print_validate_all_results( + return print_validate_all_results( output_format, prefix, - errors, ignore_deprecated, ignore_errors ) else: - print_validate_one_results(func_name) - exit_status = 0 - sys.stdout.write(msg + "DONE" + os.linesep) - - return exit_status + return print_validate_one_results(func_name) if __name__ == "__main__": @@ -474,14 +492,6 @@ def main( "of methods starting by this pattern. It is " "ignored if parameter function is provided", ) - argparser.add_argument( - "--errors", - default=None, - help="comma separated " - "list of error codes to validate. By default it " - "validates all errors (ignored when validating " - "a single docstring)", - ) argparser.add_argument( "--ignore_deprecated", default=False, @@ -492,6 +502,7 @@ def main( ) argparser.add_argument( "--ignore_errors", + "-i", default=None, action="append", nargs=2, @@ -504,18 +515,11 @@ def main( ) args = argparser.parse_args(sys.argv[1:]) - args.errors = args.errors.split(",") if args.errors else None - if args.ignore_errors: - args.ignore_errors = {function: error_codes.split(",") - for function, error_codes - in args.ignore_errors} - sys.exit( main(args.function, args.format, args.prefix, - args.errors, args.ignore_deprecated, - args.ignore_errors + _format_ignore_errors(args.ignore_errors), ) ) From 914a9ee69f40caf1f798c86f57f10d1561ca34e3 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Tue, 19 Mar 2024 01:31:58 -0400 Subject: [PATCH 06/23] CI: Remove ASAN job (#57886) * Try removing subprocess call in conftest * empty bytes * try non-editable * Revert "try non-editable" This reverts commit 2f9316db93518fbaf8776f049f128c647f5f5106. * Revert "empty bytes" This reverts commit 7f500435f6aa3c5710dd4317ba21652290a04dfa. * Revert "Try removing subprocess call in conftest" This reverts commit 31ad407638254e3a34ae76dcf381f1fac06b2205. * Remove ASAN job * revert more --- .github/actions/run-tests/action.yml | 9 +------- .github/workflows/unit-tests.yml | 14 ------------ ci/deps/actions-311-sanitizers.yaml | 32 ---------------------------- 3 files changed, 1 insertion(+), 54 deletions(-) delete mode 100644 ci/deps/actions-311-sanitizers.yaml diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml index 4a9fe04a8f5f9..66e4142dc0cbb 100644 --- a/.github/actions/run-tests/action.yml +++ b/.github/actions/run-tests/action.yml @@ -1,16 +1,9 @@ name: Run tests and report results -inputs: - preload: - description: Preload arguments for sanitizer - required: false - asan_options: - description: Arguments for Address Sanitizer (ASAN) - required: false runs: using: composite steps: - name: Test - run: ${{ inputs.asan_options }} ${{ inputs.preload }} ci/run_tests.sh + run: ci/run_tests.sh shell: bash -el {0} - name: Publish test results diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 855973a22886a..f93950224eaae 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -68,14 +68,6 @@ jobs: - name: "Pyarrow Nightly" env_file: actions-311-pyarrownightly.yaml pattern: "not slow and not network and not single_cpu" - - name: "ASAN / UBSAN" - env_file: actions-311-sanitizers.yaml - pattern: "not slow and not network and not single_cpu and not skip_ubsan" - asan_options: "ASAN_OPTIONS=detect_leaks=0" - preload: LD_PRELOAD=$(gcc -print-file-name=libasan.so) - meson_args: --config-settings=setup-args="-Db_sanitize=address,undefined" - cflags_adds: -fno-sanitize-recover=all - pytest_workers: -1 # disable pytest-xdist as it swallows stderr from ASAN fail-fast: false name: ${{ matrix.name || format('ubuntu-latest {0}', matrix.env_file) }} env: @@ -161,18 +153,12 @@ jobs: - name: Test (not single_cpu) uses: ./.github/actions/run-tests if: ${{ matrix.name != 'Pypy' }} - with: - preload: ${{ matrix.preload }} - asan_options: ${{ matrix.asan_options }} env: # Set pattern to not single_cpu if not already set PATTERN: ${{ env.PATTERN == '' && 'not single_cpu' || matrix.pattern }} - name: Test (single_cpu) uses: ./.github/actions/run-tests - with: - preload: ${{ matrix.preload }} - asan_options: ${{ matrix.asan_options }} env: PATTERN: 'single_cpu' PYTEST_WORKERS: 0 diff --git a/ci/deps/actions-311-sanitizers.yaml b/ci/deps/actions-311-sanitizers.yaml deleted file mode 100644 index f5f04c90bffad..0000000000000 --- a/ci/deps/actions-311-sanitizers.yaml +++ /dev/null @@ -1,32 +0,0 @@ -name: pandas-dev -channels: - - conda-forge -dependencies: - - python=3.11 - - # build dependencies - - versioneer[toml] - - cython>=0.29.33 - - meson[ninja]=1.2.1 - - meson-python=0.13.1 - - # test dependencies - - pytest>=7.3.2 - - pytest-cov - - pytest-xdist>=2.2.0 - - pytest-localserver>=0.7.1 - - pytest-qt>=4.2.0 - - boto3 - - hypothesis>=6.46.1 - - pyqt>=5.15.9 - - # required dependencies - - python-dateutil - - numpy - - pytz - - # pandas dependencies - - pip - - - pip: - - "tzdata>=2022.7" From aa3e949e2a2b72588186cb1936edb535713aefa0 Mon Sep 17 00:00:00 2001 From: Marc Garcia Date: Tue, 19 Mar 2024 17:57:33 +0100 Subject: [PATCH 07/23] CI: Improve API of --ignore_errors in validate_docstrings.py (#57908) * CI: Improve API of --ignore_errors in validate_docstrings.py * Updating tests --- ci/code_checks.sh | 2412 ++++++++++----------- scripts/tests/test_validate_docstrings.py | 16 +- scripts/validate_docstrings.py | 29 +- 3 files changed, 1232 insertions(+), 1225 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 3c46cb39eeb7e..a9967dcb8efe6 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -68,1212 +68,1212 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then MSG='Validate Docstrings' ; echo $MSG $BASE_DIR/scripts/validate_docstrings.py \ --format=actions \ - -i '*' ES01 `# For now it is ok if docstrings are missing the extended summary` \ - -i pandas.Series.dt PR01 `# Accessors are implemented as classes, but we do not document the Parameters section` \ - -i pandas.Categorical.__array__ SA01\ - -i pandas.Categorical.codes SA01\ - -i pandas.Categorical.dtype SA01\ - -i pandas.Categorical.from_codes SA01\ - -i pandas.Categorical.ordered SA01\ - -i pandas.CategoricalDtype.categories SA01\ - -i pandas.CategoricalDtype.ordered SA01\ - -i pandas.CategoricalIndex.codes SA01\ - -i pandas.CategoricalIndex.ordered SA01\ - -i pandas.DataFrame.__dataframe__ SA01\ - -i pandas.DataFrame.__iter__ SA01\ - -i pandas.DataFrame.assign SA01\ - -i pandas.DataFrame.at_time PR01\ - -i pandas.DataFrame.axes SA01\ - -i pandas.DataFrame.backfill PR01,SA01\ - -i pandas.DataFrame.bfill SA01\ - -i pandas.DataFrame.columns SA01\ - -i pandas.DataFrame.copy SA01\ - -i pandas.DataFrame.droplevel SA01\ - -i pandas.DataFrame.dtypes SA01\ - -i pandas.DataFrame.ffill SA01\ - -i pandas.DataFrame.first_valid_index SA01\ - -i pandas.DataFrame.get SA01\ - -i pandas.DataFrame.hist RT03\ - -i pandas.DataFrame.infer_objects RT03\ - -i pandas.DataFrame.keys SA01\ - -i pandas.DataFrame.kurt RT03,SA01\ - -i pandas.DataFrame.kurtosis RT03,SA01\ - -i pandas.DataFrame.last_valid_index SA01\ - -i pandas.DataFrame.mask RT03\ - -i pandas.DataFrame.max RT03\ - -i pandas.DataFrame.mean RT03,SA01\ - -i pandas.DataFrame.median RT03,SA01\ - -i pandas.DataFrame.min RT03\ - -i pandas.DataFrame.pad PR01,SA01\ - -i pandas.DataFrame.plot PR02,SA01\ - -i pandas.DataFrame.pop SA01\ - -i pandas.DataFrame.prod RT03\ - -i pandas.DataFrame.product RT03\ - -i pandas.DataFrame.reorder_levels SA01\ - -i pandas.DataFrame.sem PR01,RT03,SA01\ - -i pandas.DataFrame.skew RT03,SA01\ - -i pandas.DataFrame.sparse PR01,SA01\ - -i pandas.DataFrame.sparse.density SA01\ - -i pandas.DataFrame.sparse.from_spmatrix SA01\ - -i pandas.DataFrame.sparse.to_coo SA01\ - -i pandas.DataFrame.sparse.to_dense SA01\ - -i pandas.DataFrame.std PR01,RT03,SA01\ - -i pandas.DataFrame.sum RT03\ - -i pandas.DataFrame.swapaxes PR01,SA01\ - -i pandas.DataFrame.swaplevel SA01\ - -i pandas.DataFrame.to_feather SA01\ - -i pandas.DataFrame.to_markdown SA01\ - -i pandas.DataFrame.to_parquet RT03\ - -i pandas.DataFrame.to_period SA01\ - -i pandas.DataFrame.to_timestamp SA01\ - -i pandas.DataFrame.tz_convert SA01\ - -i pandas.DataFrame.tz_localize SA01\ - -i pandas.DataFrame.unstack RT03\ - -i pandas.DataFrame.value_counts RT03\ - -i pandas.DataFrame.var PR01,RT03,SA01\ - -i pandas.DataFrame.where RT03\ - -i pandas.DatetimeIndex.ceil SA01\ - -i pandas.DatetimeIndex.date SA01\ - -i pandas.DatetimeIndex.day SA01\ - -i pandas.DatetimeIndex.day_name SA01\ - -i pandas.DatetimeIndex.day_of_year SA01\ - -i pandas.DatetimeIndex.dayofyear SA01\ - -i pandas.DatetimeIndex.floor SA01\ - -i pandas.DatetimeIndex.freqstr SA01\ - -i pandas.DatetimeIndex.hour SA01\ - -i pandas.DatetimeIndex.indexer_at_time PR01,RT03\ - -i pandas.DatetimeIndex.indexer_between_time RT03\ - -i pandas.DatetimeIndex.inferred_freq SA01\ - -i pandas.DatetimeIndex.is_leap_year SA01\ - -i pandas.DatetimeIndex.microsecond SA01\ - -i pandas.DatetimeIndex.minute SA01\ - -i pandas.DatetimeIndex.month SA01\ - -i pandas.DatetimeIndex.month_name SA01\ - -i pandas.DatetimeIndex.nanosecond SA01\ - -i pandas.DatetimeIndex.quarter SA01\ - -i pandas.DatetimeIndex.round SA01\ - -i pandas.DatetimeIndex.second SA01\ - -i pandas.DatetimeIndex.snap PR01,RT03,SA01\ - -i pandas.DatetimeIndex.std PR01,RT03\ - -i pandas.DatetimeIndex.time SA01\ - -i pandas.DatetimeIndex.timetz SA01\ - -i pandas.DatetimeIndex.to_period RT03\ - -i pandas.DatetimeIndex.to_pydatetime RT03,SA01\ - -i pandas.DatetimeIndex.tz SA01\ - -i pandas.DatetimeIndex.tz_convert RT03\ - -i pandas.DatetimeIndex.year SA01\ - -i pandas.DatetimeTZDtype SA01\ - -i pandas.DatetimeTZDtype.tz SA01\ - -i pandas.DatetimeTZDtype.unit SA01\ - -i pandas.ExcelFile PR01,SA01\ - -i pandas.ExcelFile.parse PR01,SA01\ - -i pandas.ExcelWriter SA01\ - -i pandas.Float32Dtype SA01\ - -i pandas.Float64Dtype SA01\ - -i pandas.Grouper PR02,SA01\ - -i pandas.HDFStore.append PR01,SA01\ - -i pandas.HDFStore.get SA01\ - -i pandas.HDFStore.groups SA01\ - -i pandas.HDFStore.info RT03,SA01\ - -i pandas.HDFStore.keys SA01\ - -i pandas.HDFStore.put PR01,SA01\ - -i pandas.HDFStore.select SA01\ - -i pandas.HDFStore.walk SA01\ - -i pandas.Index PR07\ - -i pandas.Index.T SA01\ - -i pandas.Index.append PR07,RT03,SA01\ - -i pandas.Index.astype SA01\ - -i pandas.Index.copy PR07,SA01\ - -i pandas.Index.difference PR07,RT03,SA01\ - -i pandas.Index.drop PR07,SA01\ - -i pandas.Index.drop_duplicates RT03\ - -i pandas.Index.droplevel RT03,SA01\ - -i pandas.Index.dropna RT03,SA01\ - -i pandas.Index.dtype SA01\ - -i pandas.Index.duplicated RT03\ - -i pandas.Index.empty GL08\ - -i pandas.Index.equals SA01\ - -i pandas.Index.fillna RT03\ - -i pandas.Index.get_indexer PR07,SA01\ - -i pandas.Index.get_indexer_for PR01,SA01\ - -i pandas.Index.get_indexer_non_unique PR07,SA01\ - -i pandas.Index.get_loc PR07,RT03,SA01\ - -i pandas.Index.get_slice_bound PR07\ - -i pandas.Index.hasnans SA01\ - -i pandas.Index.identical PR01,SA01\ - -i pandas.Index.inferred_type SA01\ - -i pandas.Index.insert PR07,RT03,SA01\ - -i pandas.Index.intersection PR07,RT03,SA01\ - -i pandas.Index.item SA01\ - -i pandas.Index.join PR07,RT03,SA01\ - -i pandas.Index.map SA01\ - -i pandas.Index.memory_usage RT03\ - -i pandas.Index.name SA01\ - -i pandas.Index.names GL08\ - -i pandas.Index.nbytes SA01\ - -i pandas.Index.ndim SA01\ - -i pandas.Index.nunique RT03\ - -i pandas.Index.putmask PR01,RT03\ - -i pandas.Index.ravel PR01,RT03\ - -i pandas.Index.reindex PR07\ - -i pandas.Index.shape SA01\ - -i pandas.Index.size SA01\ - -i pandas.Index.slice_indexer PR07,RT03,SA01\ - -i pandas.Index.slice_locs RT03\ - -i pandas.Index.str PR01,SA01\ - -i pandas.Index.symmetric_difference PR07,RT03,SA01\ - -i pandas.Index.take PR01,PR07\ - -i pandas.Index.to_list RT03\ - -i pandas.Index.union PR07,RT03,SA01\ - -i pandas.Index.unique RT03\ - -i pandas.Index.value_counts RT03\ - -i pandas.Index.view GL08\ - -i pandas.Int16Dtype SA01\ - -i pandas.Int32Dtype SA01\ - -i pandas.Int64Dtype SA01\ - -i pandas.Int8Dtype SA01\ - -i pandas.Interval PR02\ - -i pandas.Interval.closed SA01\ - -i pandas.Interval.left SA01\ - -i pandas.Interval.mid SA01\ - -i pandas.Interval.right SA01\ - -i pandas.IntervalDtype PR01,SA01\ - -i pandas.IntervalDtype.subtype SA01\ - -i pandas.IntervalIndex.closed SA01\ - -i pandas.IntervalIndex.contains RT03\ - -i pandas.IntervalIndex.get_indexer PR07,SA01\ - -i pandas.IntervalIndex.get_loc PR07,RT03,SA01\ - -i pandas.IntervalIndex.is_non_overlapping_monotonic SA01\ - -i pandas.IntervalIndex.left GL08\ - -i pandas.IntervalIndex.length GL08\ - -i pandas.IntervalIndex.mid GL08\ - -i pandas.IntervalIndex.right GL08\ - -i pandas.IntervalIndex.set_closed RT03,SA01\ - -i pandas.IntervalIndex.to_tuples RT03,SA01\ - -i pandas.MultiIndex PR01\ - -i pandas.MultiIndex.append PR07,SA01\ - -i pandas.MultiIndex.copy PR07,RT03,SA01\ - -i pandas.MultiIndex.drop PR07,RT03,SA01\ - -i pandas.MultiIndex.droplevel RT03,SA01\ - -i pandas.MultiIndex.dtypes SA01\ - -i pandas.MultiIndex.get_indexer PR07,SA01\ - -i pandas.MultiIndex.get_level_values SA01\ - -i pandas.MultiIndex.get_loc PR07\ - -i pandas.MultiIndex.get_loc_level PR07\ - -i pandas.MultiIndex.levels SA01\ - -i pandas.MultiIndex.levshape SA01\ - -i pandas.MultiIndex.names SA01\ - -i pandas.MultiIndex.nlevels SA01\ - -i pandas.MultiIndex.remove_unused_levels RT03,SA01\ - -i pandas.MultiIndex.reorder_levels RT03,SA01\ - -i pandas.MultiIndex.set_codes SA01\ - -i pandas.MultiIndex.set_levels RT03,SA01\ - -i pandas.MultiIndex.sortlevel PR07,SA01\ - -i pandas.MultiIndex.to_frame RT03\ - -i pandas.MultiIndex.truncate SA01\ - -i pandas.NA SA01\ - -i pandas.NaT SA01\ - -i pandas.NamedAgg SA01\ - -i pandas.Period SA01\ - -i pandas.Period.asfreq SA01\ - -i pandas.Period.freq GL08\ - -i pandas.Period.freqstr SA01\ - -i pandas.Period.is_leap_year SA01\ - -i pandas.Period.month SA01\ - -i pandas.Period.now SA01\ - -i pandas.Period.ordinal GL08\ - -i pandas.Period.quarter SA01\ - -i pandas.Period.strftime PR01,SA01\ - -i pandas.Period.to_timestamp SA01\ - -i pandas.Period.year SA01\ - -i pandas.PeriodDtype SA01\ - -i pandas.PeriodDtype.freq SA01\ - -i pandas.PeriodIndex.day SA01\ - -i pandas.PeriodIndex.day_of_week SA01\ - -i pandas.PeriodIndex.day_of_year SA01\ - -i pandas.PeriodIndex.dayofweek SA01\ - -i pandas.PeriodIndex.dayofyear SA01\ - -i pandas.PeriodIndex.days_in_month SA01\ - -i pandas.PeriodIndex.daysinmonth SA01\ - -i pandas.PeriodIndex.freqstr SA01\ - -i pandas.PeriodIndex.from_fields PR07,SA01\ - -i pandas.PeriodIndex.from_ordinals SA01\ - -i pandas.PeriodIndex.hour SA01\ - -i pandas.PeriodIndex.is_leap_year SA01\ - -i pandas.PeriodIndex.minute SA01\ - -i pandas.PeriodIndex.month SA01\ - -i pandas.PeriodIndex.quarter SA01\ - -i pandas.PeriodIndex.qyear GL08\ - -i pandas.PeriodIndex.second SA01\ - -i pandas.PeriodIndex.to_timestamp RT03,SA01\ - -i pandas.PeriodIndex.week SA01\ - -i pandas.PeriodIndex.weekday SA01\ - -i pandas.PeriodIndex.weekofyear SA01\ - -i pandas.PeriodIndex.year SA01\ - -i pandas.RangeIndex PR07\ - -i pandas.RangeIndex.from_range PR01,SA01\ - -i pandas.RangeIndex.start SA01\ - -i pandas.RangeIndex.step SA01\ - -i pandas.RangeIndex.stop SA01\ - -i pandas.Series SA01\ - -i pandas.Series.T SA01\ - -i pandas.Series.__iter__ RT03,SA01\ - -i pandas.Series.add PR07\ - -i pandas.Series.at_time PR01\ - -i pandas.Series.backfill PR01,SA01\ - -i pandas.Series.bfill SA01\ - -i pandas.Series.case_when RT03\ - -i pandas.Series.cat PR07,SA01\ - -i pandas.Series.cat.add_categories PR01,PR02\ - -i pandas.Series.cat.as_ordered PR01\ - -i pandas.Series.cat.as_unordered PR01\ - -i pandas.Series.cat.codes SA01\ - -i pandas.Series.cat.ordered SA01\ - -i pandas.Series.cat.remove_categories PR01,PR02\ - -i pandas.Series.cat.remove_unused_categories PR01\ - -i pandas.Series.cat.rename_categories PR01,PR02\ - -i pandas.Series.cat.reorder_categories PR01,PR02\ - -i pandas.Series.cat.set_categories PR01,PR02\ - -i pandas.Series.copy SA01\ - -i pandas.Series.div PR07\ - -i pandas.Series.droplevel SA01\ - -i pandas.Series.dt.as_unit PR01,PR02\ - -i pandas.Series.dt.ceil PR01,PR02,SA01\ - -i pandas.Series.dt.components SA01\ - -i pandas.Series.dt.date SA01\ - -i pandas.Series.dt.day SA01\ - -i pandas.Series.dt.day_name PR01,PR02,SA01\ - -i pandas.Series.dt.day_of_year SA01\ - -i pandas.Series.dt.dayofyear SA01\ - -i pandas.Series.dt.days SA01\ - -i pandas.Series.dt.days_in_month SA01\ - -i pandas.Series.dt.daysinmonth SA01\ - -i pandas.Series.dt.floor PR01,PR02,SA01\ - -i pandas.Series.dt.freq GL08\ - -i pandas.Series.dt.hour SA01\ - -i pandas.Series.dt.is_leap_year SA01\ - -i pandas.Series.dt.microsecond SA01\ - -i pandas.Series.dt.microseconds SA01\ - -i pandas.Series.dt.minute SA01\ - -i pandas.Series.dt.month SA01\ - -i pandas.Series.dt.month_name PR01,PR02,SA01\ - -i pandas.Series.dt.nanosecond SA01\ - -i pandas.Series.dt.nanoseconds SA01\ - -i pandas.Series.dt.normalize PR01\ - -i pandas.Series.dt.quarter SA01\ - -i pandas.Series.dt.qyear GL08\ - -i pandas.Series.dt.round PR01,PR02,SA01\ - -i pandas.Series.dt.second SA01\ - -i pandas.Series.dt.seconds SA01\ - -i pandas.Series.dt.strftime PR01,PR02\ - -i pandas.Series.dt.time SA01\ - -i pandas.Series.dt.timetz SA01\ - -i pandas.Series.dt.to_period PR01,PR02,RT03\ - -i pandas.Series.dt.total_seconds PR01\ - -i pandas.Series.dt.tz SA01\ - -i pandas.Series.dt.tz_convert PR01,PR02,RT03\ - -i pandas.Series.dt.tz_localize PR01,PR02\ - -i pandas.Series.dt.unit GL08\ - -i pandas.Series.dt.year SA01\ - -i pandas.Series.dtype SA01\ - -i pandas.Series.dtypes SA01\ - -i pandas.Series.empty GL08\ - -i pandas.Series.eq PR07,SA01\ - -i pandas.Series.ffill SA01\ - -i pandas.Series.first_valid_index SA01\ - -i pandas.Series.floordiv PR07\ - -i pandas.Series.ge PR07,SA01\ - -i pandas.Series.get SA01\ - -i pandas.Series.gt PR07,SA01\ - -i pandas.Series.hasnans SA01\ - -i pandas.Series.infer_objects RT03\ - -i pandas.Series.is_monotonic_decreasing SA01\ - -i pandas.Series.is_monotonic_increasing SA01\ - -i pandas.Series.is_unique SA01\ - -i pandas.Series.item SA01\ - -i pandas.Series.keys SA01\ - -i pandas.Series.kurt RT03,SA01\ - -i pandas.Series.kurtosis RT03,SA01\ - -i pandas.Series.last_valid_index SA01\ - -i pandas.Series.le PR07,SA01\ - -i pandas.Series.list.__getitem__ SA01\ - -i pandas.Series.list.flatten SA01\ - -i pandas.Series.list.len SA01\ - -i pandas.Series.lt PR07,SA01\ - -i pandas.Series.mask RT03\ - -i pandas.Series.max RT03\ - -i pandas.Series.mean RT03,SA01\ - -i pandas.Series.median RT03,SA01\ - -i pandas.Series.min RT03\ - -i pandas.Series.mod PR07\ - -i pandas.Series.mode SA01\ - -i pandas.Series.mul PR07\ - -i pandas.Series.nbytes SA01\ - -i pandas.Series.ndim SA01\ - -i pandas.Series.ne PR07,SA01\ - -i pandas.Series.nunique RT03\ - -i pandas.Series.pad PR01,SA01\ - -i pandas.Series.plot PR02,SA01\ - -i pandas.Series.pop RT03,SA01\ - -i pandas.Series.pow PR07\ - -i pandas.Series.prod RT03\ - -i pandas.Series.product RT03\ - -i pandas.Series.radd PR07\ - -i pandas.Series.rdiv PR07\ - -i pandas.Series.reorder_levels RT03,SA01\ - -i pandas.Series.rfloordiv PR07\ - -i pandas.Series.rmod PR07\ - -i pandas.Series.rmul PR07\ - -i pandas.Series.rpow PR07\ - -i pandas.Series.rsub PR07\ - -i pandas.Series.rtruediv PR07\ - -i pandas.Series.sem PR01,RT03,SA01\ - -i pandas.Series.shape SA01\ - -i pandas.Series.size SA01\ - -i pandas.Series.skew RT03,SA01\ - -i pandas.Series.sparse PR01,SA01\ - -i pandas.Series.sparse.density SA01\ - -i pandas.Series.sparse.fill_value SA01\ - -i pandas.Series.sparse.from_coo PR07,SA01\ - -i pandas.Series.sparse.npoints SA01\ - -i pandas.Series.sparse.sp_values SA01\ - -i pandas.Series.sparse.to_coo PR07,RT03,SA01\ - -i pandas.Series.std PR01,RT03,SA01\ - -i pandas.Series.str PR01,SA01\ - -i pandas.Series.str.capitalize RT03\ - -i pandas.Series.str.casefold RT03\ - -i pandas.Series.str.center RT03,SA01\ - -i pandas.Series.str.decode PR07,RT03,SA01\ - -i pandas.Series.str.encode PR07,RT03,SA01\ - -i pandas.Series.str.find RT03\ - -i pandas.Series.str.fullmatch RT03\ - -i pandas.Series.str.get RT03,SA01\ - -i pandas.Series.str.index RT03\ - -i pandas.Series.str.ljust RT03,SA01\ - -i pandas.Series.str.lower RT03\ - -i pandas.Series.str.lstrip RT03\ - -i pandas.Series.str.match RT03\ - -i pandas.Series.str.normalize RT03,SA01\ - -i pandas.Series.str.partition RT03\ - -i pandas.Series.str.repeat SA01\ - -i pandas.Series.str.replace SA01\ - -i pandas.Series.str.rfind RT03\ - -i pandas.Series.str.rindex RT03\ - -i pandas.Series.str.rjust RT03,SA01\ - -i pandas.Series.str.rpartition RT03\ - -i pandas.Series.str.rstrip RT03\ - -i pandas.Series.str.strip RT03\ - -i pandas.Series.str.swapcase RT03\ - -i pandas.Series.str.title RT03\ - -i pandas.Series.str.translate RT03,SA01\ - -i pandas.Series.str.upper RT03\ - -i pandas.Series.str.wrap RT03,SA01\ - -i pandas.Series.str.zfill RT03\ - -i pandas.Series.struct.dtypes SA01\ - -i pandas.Series.sub PR07\ - -i pandas.Series.sum RT03\ - -i pandas.Series.swaplevel SA01\ - -i pandas.Series.to_dict SA01\ - -i pandas.Series.to_frame SA01\ - -i pandas.Series.to_list RT03\ - -i pandas.Series.to_markdown SA01\ - -i pandas.Series.to_period SA01\ - -i pandas.Series.to_string SA01\ - -i pandas.Series.to_timestamp RT03,SA01\ - -i pandas.Series.truediv PR07\ - -i pandas.Series.tz_convert SA01\ - -i pandas.Series.tz_localize SA01\ - -i pandas.Series.unstack SA01\ - -i pandas.Series.update PR07,SA01\ - -i pandas.Series.value_counts RT03\ - -i pandas.Series.var PR01,RT03,SA01\ - -i pandas.Series.where RT03\ - -i pandas.SparseDtype SA01\ - -i pandas.Timedelta PR07,SA01\ - -i pandas.Timedelta.as_unit SA01\ - -i pandas.Timedelta.asm8 SA01\ - -i pandas.Timedelta.ceil SA01\ - -i pandas.Timedelta.components SA01\ - -i pandas.Timedelta.days SA01\ - -i pandas.Timedelta.floor SA01\ - -i pandas.Timedelta.max PR02,PR07,SA01\ - -i pandas.Timedelta.min PR02,PR07,SA01\ - -i pandas.Timedelta.resolution PR02,PR07,SA01\ - -i pandas.Timedelta.round SA01\ - -i pandas.Timedelta.to_numpy PR01\ - -i pandas.Timedelta.to_timedelta64 SA01\ - -i pandas.Timedelta.total_seconds SA01\ - -i pandas.Timedelta.view SA01\ - -i pandas.TimedeltaIndex PR01\ - -i pandas.TimedeltaIndex.as_unit RT03,SA01\ - -i pandas.TimedeltaIndex.ceil SA01\ - -i pandas.TimedeltaIndex.components SA01\ - -i pandas.TimedeltaIndex.days SA01\ - -i pandas.TimedeltaIndex.floor SA01\ - -i pandas.TimedeltaIndex.inferred_freq SA01\ - -i pandas.TimedeltaIndex.microseconds SA01\ - -i pandas.TimedeltaIndex.nanoseconds SA01\ - -i pandas.TimedeltaIndex.round SA01\ - -i pandas.TimedeltaIndex.seconds SA01\ - -i pandas.TimedeltaIndex.to_pytimedelta RT03,SA01\ - -i pandas.Timestamp PR07,SA01\ - -i pandas.Timestamp.as_unit SA01\ - -i pandas.Timestamp.asm8 SA01\ - -i pandas.Timestamp.astimezone SA01\ - -i pandas.Timestamp.ceil SA01\ - -i pandas.Timestamp.combine PR01,SA01\ - -i pandas.Timestamp.ctime SA01\ - -i pandas.Timestamp.date SA01\ - -i pandas.Timestamp.day GL08\ - -i pandas.Timestamp.day_name SA01\ - -i pandas.Timestamp.day_of_week SA01\ - -i pandas.Timestamp.day_of_year SA01\ - -i pandas.Timestamp.dayofweek SA01\ - -i pandas.Timestamp.dayofyear SA01\ - -i pandas.Timestamp.days_in_month SA01\ - -i pandas.Timestamp.daysinmonth SA01\ - -i pandas.Timestamp.dst SA01\ - -i pandas.Timestamp.floor SA01\ - -i pandas.Timestamp.fold GL08\ - -i pandas.Timestamp.fromordinal SA01\ - -i pandas.Timestamp.fromtimestamp PR01,SA01\ - -i pandas.Timestamp.hour GL08\ - -i pandas.Timestamp.is_leap_year SA01\ - -i pandas.Timestamp.isocalendar SA01\ - -i pandas.Timestamp.isoformat SA01\ - -i pandas.Timestamp.isoweekday SA01\ - -i pandas.Timestamp.max PR02,PR07,SA01\ - -i pandas.Timestamp.microsecond GL08\ - -i pandas.Timestamp.min PR02,PR07,SA01\ - -i pandas.Timestamp.minute GL08\ - -i pandas.Timestamp.month GL08\ - -i pandas.Timestamp.month_name SA01\ - -i pandas.Timestamp.nanosecond GL08\ - -i pandas.Timestamp.normalize SA01\ - -i pandas.Timestamp.now SA01\ - -i pandas.Timestamp.quarter SA01\ - -i pandas.Timestamp.replace PR07,SA01\ - -i pandas.Timestamp.resolution PR02,PR07,SA01\ - -i pandas.Timestamp.round SA01\ - -i pandas.Timestamp.second GL08\ - -i pandas.Timestamp.strftime SA01\ - -i pandas.Timestamp.strptime PR01,SA01\ - -i pandas.Timestamp.time SA01\ - -i pandas.Timestamp.timestamp SA01\ - -i pandas.Timestamp.timetuple SA01\ - -i pandas.Timestamp.timetz SA01\ - -i pandas.Timestamp.to_datetime64 SA01\ - -i pandas.Timestamp.to_julian_date SA01\ - -i pandas.Timestamp.to_numpy PR01\ - -i pandas.Timestamp.to_period PR01,SA01\ - -i pandas.Timestamp.to_pydatetime PR01,SA01\ - -i pandas.Timestamp.today SA01\ - -i pandas.Timestamp.toordinal SA01\ - -i pandas.Timestamp.tz SA01\ - -i pandas.Timestamp.tz_convert SA01\ - -i pandas.Timestamp.tz_localize SA01\ - -i pandas.Timestamp.tzinfo GL08\ - -i pandas.Timestamp.tzname SA01\ - -i pandas.Timestamp.unit SA01\ - -i pandas.Timestamp.utcfromtimestamp PR01,SA01\ - -i pandas.Timestamp.utcnow SA01\ - -i pandas.Timestamp.utcoffset SA01\ - -i pandas.Timestamp.utctimetuple SA01\ - -i pandas.Timestamp.value GL08\ - -i pandas.Timestamp.week SA01\ - -i pandas.Timestamp.weekday SA01\ - -i pandas.Timestamp.weekofyear SA01\ - -i pandas.Timestamp.year GL08\ - -i pandas.UInt16Dtype SA01\ - -i pandas.UInt32Dtype SA01\ - -i pandas.UInt64Dtype SA01\ - -i pandas.UInt8Dtype SA01\ - -i pandas.api.extensions.ExtensionArray SA01\ - -i pandas.api.extensions.ExtensionArray._accumulate RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01\ - -i pandas.api.extensions.ExtensionArray._formatter SA01\ - -i pandas.api.extensions.ExtensionArray._from_sequence SA01\ - -i pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01\ - -i pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._reduce RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._values_for_factorize SA01\ - -i pandas.api.extensions.ExtensionArray.astype SA01\ - -i pandas.api.extensions.ExtensionArray.copy RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.dropna RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.dtype SA01\ - -i pandas.api.extensions.ExtensionArray.duplicated RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.equals SA01\ - -i pandas.api.extensions.ExtensionArray.fillna SA01\ - -i pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.interpolate PR01,SA01\ - -i pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.isna SA01\ - -i pandas.api.extensions.ExtensionArray.nbytes SA01\ - -i pandas.api.extensions.ExtensionArray.ndim SA01\ - -i pandas.api.extensions.ExtensionArray.ravel RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.shape SA01\ - -i pandas.api.extensions.ExtensionArray.shift SA01\ - -i pandas.api.extensions.ExtensionArray.take RT03\ - -i pandas.api.extensions.ExtensionArray.tolist RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.unique RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.view SA01\ - -i pandas.api.extensions.register_extension_dtype SA01\ - -i pandas.api.indexers.BaseIndexer PR01,SA01\ - -i pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01\ - -i pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01\ - -i pandas.api.interchange.from_dataframe RT03,SA01\ - -i pandas.api.types.infer_dtype PR07,SA01\ - -i pandas.api.types.is_any_real_numeric_dtype SA01\ - -i pandas.api.types.is_bool PR01,SA01\ - -i pandas.api.types.is_bool_dtype SA01\ - -i pandas.api.types.is_categorical_dtype SA01\ - -i pandas.api.types.is_complex PR01,SA01\ - -i pandas.api.types.is_complex_dtype SA01\ - -i pandas.api.types.is_datetime64_any_dtype SA01\ - -i pandas.api.types.is_datetime64_dtype SA01\ - -i pandas.api.types.is_datetime64_ns_dtype SA01\ - -i pandas.api.types.is_datetime64tz_dtype SA01\ - -i pandas.api.types.is_dict_like PR07,SA01\ - -i pandas.api.types.is_extension_array_dtype SA01\ - -i pandas.api.types.is_file_like PR07,SA01\ - -i pandas.api.types.is_float PR01,SA01\ - -i pandas.api.types.is_float_dtype SA01\ - -i pandas.api.types.is_hashable PR01,RT03,SA01\ - -i pandas.api.types.is_int64_dtype SA01\ - -i pandas.api.types.is_integer PR01,SA01\ - -i pandas.api.types.is_integer_dtype SA01\ - -i pandas.api.types.is_interval_dtype SA01\ - -i pandas.api.types.is_iterator PR07,SA01\ - -i pandas.api.types.is_list_like SA01\ - -i pandas.api.types.is_named_tuple PR07,SA01\ - -i pandas.api.types.is_numeric_dtype SA01\ - -i pandas.api.types.is_object_dtype SA01\ - -i pandas.api.types.is_period_dtype SA01\ - -i pandas.api.types.is_re PR07,SA01\ - -i pandas.api.types.is_re_compilable PR07,SA01\ - -i pandas.api.types.is_scalar SA01\ - -i pandas.api.types.is_signed_integer_dtype SA01\ - -i pandas.api.types.is_sparse SA01\ - -i pandas.api.types.is_string_dtype SA01\ - -i pandas.api.types.is_timedelta64_dtype SA01\ - -i pandas.api.types.is_timedelta64_ns_dtype SA01\ - -i pandas.api.types.is_unsigned_integer_dtype SA01\ - -i pandas.api.types.pandas_dtype PR07,RT03,SA01\ - -i pandas.api.types.union_categoricals RT03,SA01\ - -i pandas.arrays.ArrowExtensionArray PR07,SA01\ - -i pandas.arrays.BooleanArray SA01\ - -i pandas.arrays.DatetimeArray SA01\ - -i pandas.arrays.FloatingArray SA01\ - -i pandas.arrays.IntegerArray SA01\ - -i pandas.arrays.IntervalArray.closed SA01\ - -i pandas.arrays.IntervalArray.contains RT03\ - -i pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01\ - -i pandas.arrays.IntervalArray.left SA01\ - -i pandas.arrays.IntervalArray.length SA01\ - -i pandas.arrays.IntervalArray.mid SA01\ - -i pandas.arrays.IntervalArray.right SA01\ - -i pandas.arrays.IntervalArray.set_closed RT03,SA01\ - -i pandas.arrays.IntervalArray.to_tuples RT03,SA01\ - -i pandas.arrays.NumpyExtensionArray SA01\ - -i pandas.arrays.SparseArray PR07,SA01\ - -i pandas.arrays.TimedeltaArray PR07,SA01\ - -i pandas.bdate_range RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.agg RT03\ - -i pandas.core.groupby.DataFrameGroupBy.aggregate RT03\ - -i pandas.core.groupby.DataFrameGroupBy.apply RT03\ - -i pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.cummax RT03\ - -i pandas.core.groupby.DataFrameGroupBy.cummin RT03\ - -i pandas.core.groupby.DataFrameGroupBy.cumprod RT03\ - -i pandas.core.groupby.DataFrameGroupBy.cumsum RT03\ - -i pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.groups SA01\ - -i pandas.core.groupby.DataFrameGroupBy.hist RT03\ - -i pandas.core.groupby.DataFrameGroupBy.indices SA01\ - -i pandas.core.groupby.DataFrameGroupBy.max SA01\ - -i pandas.core.groupby.DataFrameGroupBy.mean RT03\ - -i pandas.core.groupby.DataFrameGroupBy.median SA01\ - -i pandas.core.groupby.DataFrameGroupBy.min SA01\ - -i pandas.core.groupby.DataFrameGroupBy.nth PR02\ - -i pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.ohlc SA01\ - -i pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.prod SA01\ - -i pandas.core.groupby.DataFrameGroupBy.rank RT03\ - -i pandas.core.groupby.DataFrameGroupBy.resample RT03\ - -i pandas.core.groupby.DataFrameGroupBy.sem SA01\ - -i pandas.core.groupby.DataFrameGroupBy.skew RT03\ - -i pandas.core.groupby.DataFrameGroupBy.sum SA01\ - -i pandas.core.groupby.DataFrameGroupBy.transform RT03\ - -i pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01\ - -i pandas.core.groupby.SeriesGroupBy.agg RT03\ - -i pandas.core.groupby.SeriesGroupBy.aggregate RT03\ - -i pandas.core.groupby.SeriesGroupBy.apply RT03\ - -i pandas.core.groupby.SeriesGroupBy.cummax RT03\ - -i pandas.core.groupby.SeriesGroupBy.cummin RT03\ - -i pandas.core.groupby.SeriesGroupBy.cumprod RT03\ - -i pandas.core.groupby.SeriesGroupBy.cumsum RT03\ - -i pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01\ - -i pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01\ - -i pandas.core.groupby.SeriesGroupBy.groups SA01\ - -i pandas.core.groupby.SeriesGroupBy.indices SA01\ - -i pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01\ - -i pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01\ - -i pandas.core.groupby.SeriesGroupBy.max SA01\ - -i pandas.core.groupby.SeriesGroupBy.mean RT03\ - -i pandas.core.groupby.SeriesGroupBy.median SA01\ - -i pandas.core.groupby.SeriesGroupBy.min SA01\ - -i pandas.core.groupby.SeriesGroupBy.nth PR02\ - -i pandas.core.groupby.SeriesGroupBy.ohlc SA01\ - -i pandas.core.groupby.SeriesGroupBy.plot PR02,SA01\ - -i pandas.core.groupby.SeriesGroupBy.prod SA01\ - -i pandas.core.groupby.SeriesGroupBy.rank RT03\ - -i pandas.core.groupby.SeriesGroupBy.resample RT03\ - -i pandas.core.groupby.SeriesGroupBy.sem SA01\ - -i pandas.core.groupby.SeriesGroupBy.skew RT03\ - -i pandas.core.groupby.SeriesGroupBy.sum SA01\ - -i pandas.core.groupby.SeriesGroupBy.transform RT03\ - -i pandas.core.resample.Resampler.__iter__ RT03,SA01\ - -i pandas.core.resample.Resampler.ffill RT03\ - -i pandas.core.resample.Resampler.get_group RT03,SA01\ - -i pandas.core.resample.Resampler.groups SA01\ - -i pandas.core.resample.Resampler.indices SA01\ - -i pandas.core.resample.Resampler.max PR01,RT03,SA01\ - -i pandas.core.resample.Resampler.mean SA01\ - -i pandas.core.resample.Resampler.median SA01\ - -i pandas.core.resample.Resampler.min PR01,RT03,SA01\ - -i pandas.core.resample.Resampler.ohlc SA01\ - -i pandas.core.resample.Resampler.prod SA01\ - -i pandas.core.resample.Resampler.quantile PR01,PR07\ - -i pandas.core.resample.Resampler.sem SA01\ - -i pandas.core.resample.Resampler.std SA01\ - -i pandas.core.resample.Resampler.sum SA01\ - -i pandas.core.resample.Resampler.transform PR01,RT03,SA01\ - -i pandas.core.resample.Resampler.var SA01\ - -i pandas.core.window.expanding.Expanding.corr PR01\ - -i pandas.core.window.expanding.Expanding.count PR01\ - -i pandas.core.window.rolling.Rolling.max PR01\ - -i pandas.core.window.rolling.Window.std PR01\ - -i pandas.core.window.rolling.Window.var PR01\ - -i pandas.date_range RT03\ - -i pandas.describe_option SA01\ - -i pandas.errors.AbstractMethodError PR01,SA01\ - -i pandas.errors.AttributeConflictWarning SA01\ - -i pandas.errors.CSSWarning SA01\ - -i pandas.errors.CategoricalConversionWarning SA01\ - -i pandas.errors.ChainedAssignmentError SA01\ - -i pandas.errors.ClosedFileError SA01\ - -i pandas.errors.DataError SA01\ - -i pandas.errors.DuplicateLabelError SA01\ - -i pandas.errors.EmptyDataError SA01\ - -i pandas.errors.IntCastingNaNError SA01\ - -i pandas.errors.InvalidIndexError SA01\ - -i pandas.errors.InvalidVersion SA01\ - -i pandas.errors.MergeError SA01\ - -i pandas.errors.NullFrequencyError SA01\ - -i pandas.errors.NumExprClobberingError SA01\ - -i pandas.errors.NumbaUtilError SA01\ - -i pandas.errors.OptionError SA01\ - -i pandas.errors.OutOfBoundsDatetime SA01\ - -i pandas.errors.OutOfBoundsTimedelta SA01\ - -i pandas.errors.PerformanceWarning SA01\ - -i pandas.errors.PossibleDataLossError SA01\ - -i pandas.errors.PossiblePrecisionLoss SA01\ - -i pandas.errors.SpecificationError SA01\ - -i pandas.errors.UndefinedVariableError PR01,SA01\ - -i pandas.errors.UnsortedIndexError SA01\ - -i pandas.errors.UnsupportedFunctionCall SA01\ - -i pandas.errors.ValueLabelTypeMismatch SA01\ - -i pandas.get_option SA01\ - -i pandas.infer_freq SA01\ - -i pandas.interval_range RT03\ - -i pandas.io.formats.style.Styler.apply RT03\ - -i pandas.io.formats.style.Styler.apply_index RT03\ - -i pandas.io.formats.style.Styler.background_gradient RT03\ - -i pandas.io.formats.style.Styler.bar RT03,SA01\ - -i pandas.io.formats.style.Styler.clear SA01\ - -i pandas.io.formats.style.Styler.concat RT03,SA01\ - -i pandas.io.formats.style.Styler.export RT03\ - -i pandas.io.formats.style.Styler.format RT03\ - -i pandas.io.formats.style.Styler.format_index RT03\ - -i pandas.io.formats.style.Styler.from_custom_template SA01\ - -i pandas.io.formats.style.Styler.hide RT03,SA01\ - -i pandas.io.formats.style.Styler.highlight_between RT03\ - -i pandas.io.formats.style.Styler.highlight_max RT03\ - -i pandas.io.formats.style.Styler.highlight_min RT03\ - -i pandas.io.formats.style.Styler.highlight_null RT03\ - -i pandas.io.formats.style.Styler.highlight_quantile RT03\ - -i pandas.io.formats.style.Styler.map RT03\ - -i pandas.io.formats.style.Styler.map_index RT03\ - -i pandas.io.formats.style.Styler.relabel_index RT03\ - -i pandas.io.formats.style.Styler.set_caption RT03,SA01\ - -i pandas.io.formats.style.Styler.set_properties RT03,SA01\ - -i pandas.io.formats.style.Styler.set_sticky RT03,SA01\ - -i pandas.io.formats.style.Styler.set_table_attributes PR07,RT03\ - -i pandas.io.formats.style.Styler.set_table_styles RT03\ - -i pandas.io.formats.style.Styler.set_td_classes RT03\ - -i pandas.io.formats.style.Styler.set_tooltips RT03,SA01\ - -i pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01\ - -i pandas.io.formats.style.Styler.text_gradient RT03\ - -i pandas.io.formats.style.Styler.to_excel PR01\ - -i pandas.io.formats.style.Styler.to_string SA01\ - -i pandas.io.formats.style.Styler.use RT03\ - -i pandas.io.json.build_table_schema PR07,RT03,SA01\ - -i pandas.io.stata.StataReader.data_label SA01\ - -i pandas.io.stata.StataReader.value_labels RT03,SA01\ - -i pandas.io.stata.StataReader.variable_labels RT03,SA01\ - -i pandas.io.stata.StataWriter.write_file SA01\ - -i pandas.json_normalize RT03,SA01\ - -i pandas.merge PR07\ - -i pandas.merge_asof PR07,RT03\ - -i pandas.merge_ordered PR07\ - -i pandas.option_context SA01\ - -i pandas.period_range RT03,SA01\ - -i pandas.pivot PR07\ - -i pandas.pivot_table PR07\ - -i pandas.plotting.andrews_curves RT03,SA01\ - -i pandas.plotting.autocorrelation_plot RT03,SA01\ - -i pandas.plotting.lag_plot RT03,SA01\ - -i pandas.plotting.parallel_coordinates PR07,RT03,SA01\ - -i pandas.plotting.plot_params SA01\ - -i pandas.plotting.scatter_matrix PR07,SA01\ - -i pandas.plotting.table PR07,RT03,SA01\ - -i pandas.qcut PR07,SA01\ - -i pandas.read_feather SA01\ - -i pandas.read_orc SA01\ - -i pandas.read_sas SA01\ - -i pandas.read_spss SA01\ - -i pandas.reset_option SA01\ - -i pandas.set_eng_float_format RT03,SA01\ - -i pandas.set_option SA01\ - -i pandas.show_versions SA01\ - -i pandas.test SA01\ - -i pandas.testing.assert_extension_array_equal SA01\ - -i pandas.testing.assert_index_equal PR07,SA01\ - -i pandas.testing.assert_series_equal PR07,SA01\ - -i pandas.timedelta_range SA01\ - -i pandas.tseries.api.guess_datetime_format SA01\ - -i pandas.tseries.offsets.BDay PR02,SA01\ - -i pandas.tseries.offsets.BMonthBegin PR02\ - -i pandas.tseries.offsets.BMonthEnd PR02\ - -i pandas.tseries.offsets.BQuarterBegin PR02\ - -i pandas.tseries.offsets.BQuarterBegin.copy SA01\ - -i pandas.tseries.offsets.BQuarterBegin.freqstr SA01\ - -i pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.BQuarterBegin.kwds SA01\ - -i pandas.tseries.offsets.BQuarterBegin.n GL08\ - -i pandas.tseries.offsets.BQuarterBegin.name SA01\ - -i pandas.tseries.offsets.BQuarterBegin.nanos GL08\ - -i pandas.tseries.offsets.BQuarterBegin.normalize GL08\ - -i pandas.tseries.offsets.BQuarterBegin.rule_code GL08\ - -i pandas.tseries.offsets.BQuarterBegin.startingMonth GL08\ - -i pandas.tseries.offsets.BQuarterEnd PR02\ - -i pandas.tseries.offsets.BQuarterEnd.copy SA01\ - -i pandas.tseries.offsets.BQuarterEnd.freqstr SA01\ - -i pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.BQuarterEnd.kwds SA01\ - -i pandas.tseries.offsets.BQuarterEnd.n GL08\ - -i pandas.tseries.offsets.BQuarterEnd.name SA01\ - -i pandas.tseries.offsets.BQuarterEnd.nanos GL08\ - -i pandas.tseries.offsets.BQuarterEnd.normalize GL08\ - -i pandas.tseries.offsets.BQuarterEnd.rule_code GL08\ - -i pandas.tseries.offsets.BQuarterEnd.startingMonth GL08\ - -i pandas.tseries.offsets.BYearBegin PR02\ - -i pandas.tseries.offsets.BYearBegin.copy SA01\ - -i pandas.tseries.offsets.BYearBegin.freqstr SA01\ - -i pandas.tseries.offsets.BYearBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.BYearBegin.kwds SA01\ - -i pandas.tseries.offsets.BYearBegin.month GL08\ - -i pandas.tseries.offsets.BYearBegin.n GL08\ - -i pandas.tseries.offsets.BYearBegin.name SA01\ - -i pandas.tseries.offsets.BYearBegin.nanos GL08\ - -i pandas.tseries.offsets.BYearBegin.normalize GL08\ - -i pandas.tseries.offsets.BYearBegin.rule_code GL08\ - -i pandas.tseries.offsets.BYearEnd PR02\ - -i pandas.tseries.offsets.BYearEnd.copy SA01\ - -i pandas.tseries.offsets.BYearEnd.freqstr SA01\ - -i pandas.tseries.offsets.BYearEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.BYearEnd.kwds SA01\ - -i pandas.tseries.offsets.BYearEnd.month GL08\ - -i pandas.tseries.offsets.BYearEnd.n GL08\ - -i pandas.tseries.offsets.BYearEnd.name SA01\ - -i pandas.tseries.offsets.BYearEnd.nanos GL08\ - -i pandas.tseries.offsets.BYearEnd.normalize GL08\ - -i pandas.tseries.offsets.BYearEnd.rule_code GL08\ - -i pandas.tseries.offsets.BusinessDay PR02,SA01\ - -i pandas.tseries.offsets.BusinessDay.calendar GL08\ - -i pandas.tseries.offsets.BusinessDay.copy SA01\ - -i pandas.tseries.offsets.BusinessDay.freqstr SA01\ - -i pandas.tseries.offsets.BusinessDay.holidays GL08\ - -i pandas.tseries.offsets.BusinessDay.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessDay.kwds SA01\ - -i pandas.tseries.offsets.BusinessDay.n GL08\ - -i pandas.tseries.offsets.BusinessDay.name SA01\ - -i pandas.tseries.offsets.BusinessDay.nanos GL08\ - -i pandas.tseries.offsets.BusinessDay.normalize GL08\ - -i pandas.tseries.offsets.BusinessDay.rule_code GL08\ - -i pandas.tseries.offsets.BusinessDay.weekmask GL08\ - -i pandas.tseries.offsets.BusinessHour PR02,SA01\ - -i pandas.tseries.offsets.BusinessHour.calendar GL08\ - -i pandas.tseries.offsets.BusinessHour.copy SA01\ - -i pandas.tseries.offsets.BusinessHour.end GL08\ - -i pandas.tseries.offsets.BusinessHour.freqstr SA01\ - -i pandas.tseries.offsets.BusinessHour.holidays GL08\ - -i pandas.tseries.offsets.BusinessHour.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessHour.kwds SA01\ - -i pandas.tseries.offsets.BusinessHour.n GL08\ - -i pandas.tseries.offsets.BusinessHour.name SA01\ - -i pandas.tseries.offsets.BusinessHour.nanos GL08\ - -i pandas.tseries.offsets.BusinessHour.normalize GL08\ - -i pandas.tseries.offsets.BusinessHour.rule_code GL08\ - -i pandas.tseries.offsets.BusinessHour.start GL08\ - -i pandas.tseries.offsets.BusinessHour.weekmask GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin PR02\ - -i pandas.tseries.offsets.BusinessMonthBegin.copy SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.kwds SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.n GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.name SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.nanos GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.normalize GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd PR02\ - -i pandas.tseries.offsets.BusinessMonthEnd.copy SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.kwds SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.n GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.name SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.nanos GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.normalize GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.CBMonthBegin PR02\ - -i pandas.tseries.offsets.CBMonthEnd PR02\ - -i pandas.tseries.offsets.CDay PR02,SA01\ - -i pandas.tseries.offsets.CustomBusinessDay PR02,SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.n GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.name SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.weekmask GL08\ - -i pandas.tseries.offsets.CustomBusinessHour PR02,SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.end GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.n GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.name SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.start GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.weekmask GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin PR02\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd PR02\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08\ - -i pandas.tseries.offsets.DateOffset PR02\ - -i pandas.tseries.offsets.DateOffset.copy SA01\ - -i pandas.tseries.offsets.DateOffset.freqstr SA01\ - -i pandas.tseries.offsets.DateOffset.is_on_offset GL08\ - -i pandas.tseries.offsets.DateOffset.kwds SA01\ - -i pandas.tseries.offsets.DateOffset.n GL08\ - -i pandas.tseries.offsets.DateOffset.name SA01\ - -i pandas.tseries.offsets.DateOffset.nanos GL08\ - -i pandas.tseries.offsets.DateOffset.normalize GL08\ - -i pandas.tseries.offsets.DateOffset.rule_code GL08\ - -i pandas.tseries.offsets.Day PR02\ - -i pandas.tseries.offsets.Day.copy SA01\ - -i pandas.tseries.offsets.Day.delta GL08\ - -i pandas.tseries.offsets.Day.freqstr SA01\ - -i pandas.tseries.offsets.Day.is_on_offset GL08\ - -i pandas.tseries.offsets.Day.kwds SA01\ - -i pandas.tseries.offsets.Day.n GL08\ - -i pandas.tseries.offsets.Day.name SA01\ - -i pandas.tseries.offsets.Day.nanos SA01\ - -i pandas.tseries.offsets.Day.normalize GL08\ - -i pandas.tseries.offsets.Day.rule_code GL08\ - -i pandas.tseries.offsets.Easter PR02\ - -i pandas.tseries.offsets.Easter.copy SA01\ - -i pandas.tseries.offsets.Easter.freqstr SA01\ - -i pandas.tseries.offsets.Easter.is_on_offset GL08\ - -i pandas.tseries.offsets.Easter.kwds SA01\ - -i pandas.tseries.offsets.Easter.n GL08\ - -i pandas.tseries.offsets.Easter.name SA01\ - -i pandas.tseries.offsets.Easter.nanos GL08\ - -i pandas.tseries.offsets.Easter.normalize GL08\ - -i pandas.tseries.offsets.Easter.rule_code GL08\ - -i pandas.tseries.offsets.FY5253 PR02\ - -i pandas.tseries.offsets.FY5253.copy SA01\ - -i pandas.tseries.offsets.FY5253.freqstr SA01\ - -i pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08\ - -i pandas.tseries.offsets.FY5253.get_year_end GL08\ - -i pandas.tseries.offsets.FY5253.is_on_offset GL08\ - -i pandas.tseries.offsets.FY5253.kwds SA01\ - -i pandas.tseries.offsets.FY5253.n GL08\ - -i pandas.tseries.offsets.FY5253.name SA01\ - -i pandas.tseries.offsets.FY5253.nanos GL08\ - -i pandas.tseries.offsets.FY5253.normalize GL08\ - -i pandas.tseries.offsets.FY5253.rule_code GL08\ - -i pandas.tseries.offsets.FY5253.startingMonth GL08\ - -i pandas.tseries.offsets.FY5253.variation GL08\ - -i pandas.tseries.offsets.FY5253.weekday GL08\ - -i pandas.tseries.offsets.FY5253Quarter PR02\ - -i pandas.tseries.offsets.FY5253Quarter.copy SA01\ - -i pandas.tseries.offsets.FY5253Quarter.freqstr SA01\ - -i pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08\ - -i pandas.tseries.offsets.FY5253Quarter.get_weeks GL08\ - -i pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08\ - -i pandas.tseries.offsets.FY5253Quarter.kwds SA01\ - -i pandas.tseries.offsets.FY5253Quarter.n GL08\ - -i pandas.tseries.offsets.FY5253Quarter.name SA01\ - -i pandas.tseries.offsets.FY5253Quarter.nanos GL08\ - -i pandas.tseries.offsets.FY5253Quarter.normalize GL08\ - -i pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08\ - -i pandas.tseries.offsets.FY5253Quarter.rule_code GL08\ - -i pandas.tseries.offsets.FY5253Quarter.startingMonth GL08\ - -i pandas.tseries.offsets.FY5253Quarter.variation GL08\ - -i pandas.tseries.offsets.FY5253Quarter.weekday GL08\ - -i pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08\ - -i pandas.tseries.offsets.Hour PR02\ - -i pandas.tseries.offsets.Hour.copy SA01\ - -i pandas.tseries.offsets.Hour.delta GL08\ - -i pandas.tseries.offsets.Hour.freqstr SA01\ - -i pandas.tseries.offsets.Hour.is_on_offset GL08\ - -i pandas.tseries.offsets.Hour.kwds SA01\ - -i pandas.tseries.offsets.Hour.n GL08\ - -i pandas.tseries.offsets.Hour.name SA01\ - -i pandas.tseries.offsets.Hour.nanos SA01\ - -i pandas.tseries.offsets.Hour.normalize GL08\ - -i pandas.tseries.offsets.Hour.rule_code GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth PR02,SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.copy SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.kwds SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.n GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.name SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.nanos GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.normalize GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.week GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.weekday GL08\ - -i pandas.tseries.offsets.Micro PR02\ - -i pandas.tseries.offsets.Micro.copy SA01\ - -i pandas.tseries.offsets.Micro.delta GL08\ - -i pandas.tseries.offsets.Micro.freqstr SA01\ - -i pandas.tseries.offsets.Micro.is_on_offset GL08\ - -i pandas.tseries.offsets.Micro.kwds SA01\ - -i pandas.tseries.offsets.Micro.n GL08\ - -i pandas.tseries.offsets.Micro.name SA01\ - -i pandas.tseries.offsets.Micro.nanos SA01\ - -i pandas.tseries.offsets.Micro.normalize GL08\ - -i pandas.tseries.offsets.Micro.rule_code GL08\ - -i pandas.tseries.offsets.Milli PR02\ - -i pandas.tseries.offsets.Milli.copy SA01\ - -i pandas.tseries.offsets.Milli.delta GL08\ - -i pandas.tseries.offsets.Milli.freqstr SA01\ - -i pandas.tseries.offsets.Milli.is_on_offset GL08\ - -i pandas.tseries.offsets.Milli.kwds SA01\ - -i pandas.tseries.offsets.Milli.n GL08\ - -i pandas.tseries.offsets.Milli.name SA01\ - -i pandas.tseries.offsets.Milli.nanos SA01\ - -i pandas.tseries.offsets.Milli.normalize GL08\ - -i pandas.tseries.offsets.Milli.rule_code GL08\ - -i pandas.tseries.offsets.Minute PR02\ - -i pandas.tseries.offsets.Minute.copy SA01\ - -i pandas.tseries.offsets.Minute.delta GL08\ - -i pandas.tseries.offsets.Minute.freqstr SA01\ - -i pandas.tseries.offsets.Minute.is_on_offset GL08\ - -i pandas.tseries.offsets.Minute.kwds SA01\ - -i pandas.tseries.offsets.Minute.n GL08\ - -i pandas.tseries.offsets.Minute.name SA01\ - -i pandas.tseries.offsets.Minute.nanos SA01\ - -i pandas.tseries.offsets.Minute.normalize GL08\ - -i pandas.tseries.offsets.Minute.rule_code GL08\ - -i pandas.tseries.offsets.MonthBegin PR02\ - -i pandas.tseries.offsets.MonthBegin.copy SA01\ - -i pandas.tseries.offsets.MonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.MonthBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.MonthBegin.kwds SA01\ - -i pandas.tseries.offsets.MonthBegin.n GL08\ - -i pandas.tseries.offsets.MonthBegin.name SA01\ - -i pandas.tseries.offsets.MonthBegin.nanos GL08\ - -i pandas.tseries.offsets.MonthBegin.normalize GL08\ - -i pandas.tseries.offsets.MonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.MonthEnd PR02\ - -i pandas.tseries.offsets.MonthEnd.copy SA01\ - -i pandas.tseries.offsets.MonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.MonthEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.MonthEnd.kwds SA01\ - -i pandas.tseries.offsets.MonthEnd.n GL08\ - -i pandas.tseries.offsets.MonthEnd.name SA01\ - -i pandas.tseries.offsets.MonthEnd.nanos GL08\ - -i pandas.tseries.offsets.MonthEnd.normalize GL08\ - -i pandas.tseries.offsets.MonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.Nano PR02\ - -i pandas.tseries.offsets.Nano.copy SA01\ - -i pandas.tseries.offsets.Nano.delta GL08\ - -i pandas.tseries.offsets.Nano.freqstr SA01\ - -i pandas.tseries.offsets.Nano.is_on_offset GL08\ - -i pandas.tseries.offsets.Nano.kwds SA01\ - -i pandas.tseries.offsets.Nano.n GL08\ - -i pandas.tseries.offsets.Nano.name SA01\ - -i pandas.tseries.offsets.Nano.nanos SA01\ - -i pandas.tseries.offsets.Nano.normalize GL08\ - -i pandas.tseries.offsets.Nano.rule_code GL08\ - -i pandas.tseries.offsets.QuarterBegin PR02\ - -i pandas.tseries.offsets.QuarterBegin.copy SA01\ - -i pandas.tseries.offsets.QuarterBegin.freqstr SA01\ - -i pandas.tseries.offsets.QuarterBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.QuarterBegin.kwds SA01\ - -i pandas.tseries.offsets.QuarterBegin.n GL08\ - -i pandas.tseries.offsets.QuarterBegin.name SA01\ - -i pandas.tseries.offsets.QuarterBegin.nanos GL08\ - -i pandas.tseries.offsets.QuarterBegin.normalize GL08\ - -i pandas.tseries.offsets.QuarterBegin.rule_code GL08\ - -i pandas.tseries.offsets.QuarterBegin.startingMonth GL08\ - -i pandas.tseries.offsets.QuarterEnd PR02\ - -i pandas.tseries.offsets.QuarterEnd.copy SA01\ - -i pandas.tseries.offsets.QuarterEnd.freqstr SA01\ - -i pandas.tseries.offsets.QuarterEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.QuarterEnd.kwds SA01\ - -i pandas.tseries.offsets.QuarterEnd.n GL08\ - -i pandas.tseries.offsets.QuarterEnd.name SA01\ - -i pandas.tseries.offsets.QuarterEnd.nanos GL08\ - -i pandas.tseries.offsets.QuarterEnd.normalize GL08\ - -i pandas.tseries.offsets.QuarterEnd.rule_code GL08\ - -i pandas.tseries.offsets.QuarterEnd.startingMonth GL08\ - -i pandas.tseries.offsets.Second PR02\ - -i pandas.tseries.offsets.Second.copy SA01\ - -i pandas.tseries.offsets.Second.delta GL08\ - -i pandas.tseries.offsets.Second.freqstr SA01\ - -i pandas.tseries.offsets.Second.is_on_offset GL08\ - -i pandas.tseries.offsets.Second.kwds SA01\ - -i pandas.tseries.offsets.Second.n GL08\ - -i pandas.tseries.offsets.Second.name SA01\ - -i pandas.tseries.offsets.Second.nanos SA01\ - -i pandas.tseries.offsets.Second.normalize GL08\ - -i pandas.tseries.offsets.Second.rule_code GL08\ - -i pandas.tseries.offsets.SemiMonthBegin PR02,SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.copy SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.kwds SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.n GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.name SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.nanos GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.normalize GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.SemiMonthEnd PR02,SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.copy SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.kwds SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.n GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.name SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.nanos GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.normalize GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.Tick GL08\ - -i pandas.tseries.offsets.Tick.copy SA01\ - -i pandas.tseries.offsets.Tick.delta GL08\ - -i pandas.tseries.offsets.Tick.freqstr SA01\ - -i pandas.tseries.offsets.Tick.is_on_offset GL08\ - -i pandas.tseries.offsets.Tick.kwds SA01\ - -i pandas.tseries.offsets.Tick.n GL08\ - -i pandas.tseries.offsets.Tick.name SA01\ - -i pandas.tseries.offsets.Tick.nanos SA01\ - -i pandas.tseries.offsets.Tick.normalize GL08\ - -i pandas.tseries.offsets.Tick.rule_code GL08\ - -i pandas.tseries.offsets.Week PR02\ - -i pandas.tseries.offsets.Week.copy SA01\ - -i pandas.tseries.offsets.Week.freqstr SA01\ - -i pandas.tseries.offsets.Week.is_on_offset GL08\ - -i pandas.tseries.offsets.Week.kwds SA01\ - -i pandas.tseries.offsets.Week.n GL08\ - -i pandas.tseries.offsets.Week.name SA01\ - -i pandas.tseries.offsets.Week.nanos GL08\ - -i pandas.tseries.offsets.Week.normalize GL08\ - -i pandas.tseries.offsets.Week.rule_code GL08\ - -i pandas.tseries.offsets.Week.weekday GL08\ - -i pandas.tseries.offsets.WeekOfMonth PR02,SA01\ - -i pandas.tseries.offsets.WeekOfMonth.copy SA01\ - -i pandas.tseries.offsets.WeekOfMonth.freqstr SA01\ - -i pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08\ - -i pandas.tseries.offsets.WeekOfMonth.kwds SA01\ - -i pandas.tseries.offsets.WeekOfMonth.n GL08\ - -i pandas.tseries.offsets.WeekOfMonth.name SA01\ - -i pandas.tseries.offsets.WeekOfMonth.nanos GL08\ - -i pandas.tseries.offsets.WeekOfMonth.normalize GL08\ - -i pandas.tseries.offsets.WeekOfMonth.rule_code GL08\ - -i pandas.tseries.offsets.WeekOfMonth.week GL08\ - -i pandas.tseries.offsets.WeekOfMonth.weekday GL08\ - -i pandas.tseries.offsets.YearBegin PR02\ - -i pandas.tseries.offsets.YearBegin.copy SA01\ - -i pandas.tseries.offsets.YearBegin.freqstr SA01\ - -i pandas.tseries.offsets.YearBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.YearBegin.kwds SA01\ - -i pandas.tseries.offsets.YearBegin.month GL08\ - -i pandas.tseries.offsets.YearBegin.n GL08\ - -i pandas.tseries.offsets.YearBegin.name SA01\ - -i pandas.tseries.offsets.YearBegin.nanos GL08\ - -i pandas.tseries.offsets.YearBegin.normalize GL08\ - -i pandas.tseries.offsets.YearBegin.rule_code GL08\ - -i pandas.tseries.offsets.YearEnd PR02\ - -i pandas.tseries.offsets.YearEnd.copy SA01\ - -i pandas.tseries.offsets.YearEnd.freqstr SA01\ - -i pandas.tseries.offsets.YearEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.YearEnd.kwds SA01\ - -i pandas.tseries.offsets.YearEnd.month GL08\ - -i pandas.tseries.offsets.YearEnd.n GL08\ - -i pandas.tseries.offsets.YearEnd.name SA01\ - -i pandas.tseries.offsets.YearEnd.nanos GL08\ - -i pandas.tseries.offsets.YearEnd.normalize GL08\ - -i pandas.tseries.offsets.YearEnd.rule_code GL08\ - -i pandas.unique PR07\ - -i pandas.util.hash_array PR07,SA01\ - -i pandas.util.hash_pandas_object PR07,SA01 # There should be no backslash in the final line, please keep this comment in the last ignored function + -i ES01 `# For now it is ok if docstrings are missing the extended summary` \ + -i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \ + -i "pandas.Categorical.__array__ SA01" \ + -i "pandas.Categorical.codes SA01" \ + -i "pandas.Categorical.dtype SA01" \ + -i "pandas.Categorical.from_codes SA01" \ + -i "pandas.Categorical.ordered SA01" \ + -i "pandas.CategoricalDtype.categories SA01" \ + -i "pandas.CategoricalDtype.ordered SA01" \ + -i "pandas.CategoricalIndex.codes SA01" \ + -i "pandas.CategoricalIndex.ordered SA01" \ + -i "pandas.DataFrame.__dataframe__ SA01" \ + -i "pandas.DataFrame.__iter__ SA01" \ + -i "pandas.DataFrame.assign SA01" \ + -i "pandas.DataFrame.at_time PR01" \ + -i "pandas.DataFrame.axes SA01" \ + -i "pandas.DataFrame.backfill PR01,SA01" \ + -i "pandas.DataFrame.bfill SA01" \ + -i "pandas.DataFrame.columns SA01" \ + -i "pandas.DataFrame.copy SA01" \ + -i "pandas.DataFrame.droplevel SA01" \ + -i "pandas.DataFrame.dtypes SA01" \ + -i "pandas.DataFrame.ffill SA01" \ + -i "pandas.DataFrame.first_valid_index SA01" \ + -i "pandas.DataFrame.get SA01" \ + -i "pandas.DataFrame.hist RT03" \ + -i "pandas.DataFrame.infer_objects RT03" \ + -i "pandas.DataFrame.keys SA01" \ + -i "pandas.DataFrame.kurt RT03,SA01" \ + -i "pandas.DataFrame.kurtosis RT03,SA01" \ + -i "pandas.DataFrame.last_valid_index SA01" \ + -i "pandas.DataFrame.mask RT03" \ + -i "pandas.DataFrame.max RT03" \ + -i "pandas.DataFrame.mean RT03,SA01" \ + -i "pandas.DataFrame.median RT03,SA01" \ + -i "pandas.DataFrame.min RT03" \ + -i "pandas.DataFrame.pad PR01,SA01" \ + -i "pandas.DataFrame.plot PR02,SA01" \ + -i "pandas.DataFrame.pop SA01" \ + -i "pandas.DataFrame.prod RT03" \ + -i "pandas.DataFrame.product RT03" \ + -i "pandas.DataFrame.reorder_levels SA01" \ + -i "pandas.DataFrame.sem PR01,RT03,SA01" \ + -i "pandas.DataFrame.skew RT03,SA01" \ + -i "pandas.DataFrame.sparse PR01,SA01" \ + -i "pandas.DataFrame.sparse.density SA01" \ + -i "pandas.DataFrame.sparse.from_spmatrix SA01" \ + -i "pandas.DataFrame.sparse.to_coo SA01" \ + -i "pandas.DataFrame.sparse.to_dense SA01" \ + -i "pandas.DataFrame.std PR01,RT03,SA01" \ + -i "pandas.DataFrame.sum RT03" \ + -i "pandas.DataFrame.swapaxes PR01,SA01" \ + -i "pandas.DataFrame.swaplevel SA01" \ + -i "pandas.DataFrame.to_feather SA01" \ + -i "pandas.DataFrame.to_markdown SA01" \ + -i "pandas.DataFrame.to_parquet RT03" \ + -i "pandas.DataFrame.to_period SA01" \ + -i "pandas.DataFrame.to_timestamp SA01" \ + -i "pandas.DataFrame.tz_convert SA01" \ + -i "pandas.DataFrame.tz_localize SA01" \ + -i "pandas.DataFrame.unstack RT03" \ + -i "pandas.DataFrame.value_counts RT03" \ + -i "pandas.DataFrame.var PR01,RT03,SA01" \ + -i "pandas.DataFrame.where RT03" \ + -i "pandas.DatetimeIndex.ceil SA01" \ + -i "pandas.DatetimeIndex.date SA01" \ + -i "pandas.DatetimeIndex.day SA01" \ + -i "pandas.DatetimeIndex.day_name SA01" \ + -i "pandas.DatetimeIndex.day_of_year SA01" \ + -i "pandas.DatetimeIndex.dayofyear SA01" \ + -i "pandas.DatetimeIndex.floor SA01" \ + -i "pandas.DatetimeIndex.freqstr SA01" \ + -i "pandas.DatetimeIndex.hour SA01" \ + -i "pandas.DatetimeIndex.indexer_at_time PR01,RT03" \ + -i "pandas.DatetimeIndex.indexer_between_time RT03" \ + -i "pandas.DatetimeIndex.inferred_freq SA01" \ + -i "pandas.DatetimeIndex.is_leap_year SA01" \ + -i "pandas.DatetimeIndex.microsecond SA01" \ + -i "pandas.DatetimeIndex.minute SA01" \ + -i "pandas.DatetimeIndex.month SA01" \ + -i "pandas.DatetimeIndex.month_name SA01" \ + -i "pandas.DatetimeIndex.nanosecond SA01" \ + -i "pandas.DatetimeIndex.quarter SA01" \ + -i "pandas.DatetimeIndex.round SA01" \ + -i "pandas.DatetimeIndex.second SA01" \ + -i "pandas.DatetimeIndex.snap PR01,RT03,SA01" \ + -i "pandas.DatetimeIndex.std PR01,RT03" \ + -i "pandas.DatetimeIndex.time SA01" \ + -i "pandas.DatetimeIndex.timetz SA01" \ + -i "pandas.DatetimeIndex.to_period RT03" \ + -i "pandas.DatetimeIndex.to_pydatetime RT03,SA01" \ + -i "pandas.DatetimeIndex.tz SA01" \ + -i "pandas.DatetimeIndex.tz_convert RT03" \ + -i "pandas.DatetimeIndex.year SA01" \ + -i "pandas.DatetimeTZDtype SA01" \ + -i "pandas.DatetimeTZDtype.tz SA01" \ + -i "pandas.DatetimeTZDtype.unit SA01" \ + -i "pandas.ExcelFile PR01,SA01" \ + -i "pandas.ExcelFile.parse PR01,SA01" \ + -i "pandas.ExcelWriter SA01" \ + -i "pandas.Float32Dtype SA01" \ + -i "pandas.Float64Dtype SA01" \ + -i "pandas.Grouper PR02,SA01" \ + -i "pandas.HDFStore.append PR01,SA01" \ + -i "pandas.HDFStore.get SA01" \ + -i "pandas.HDFStore.groups SA01" \ + -i "pandas.HDFStore.info RT03,SA01" \ + -i "pandas.HDFStore.keys SA01" \ + -i "pandas.HDFStore.put PR01,SA01" \ + -i "pandas.HDFStore.select SA01" \ + -i "pandas.HDFStore.walk SA01" \ + -i "pandas.Index PR07" \ + -i "pandas.Index.T SA01" \ + -i "pandas.Index.append PR07,RT03,SA01" \ + -i "pandas.Index.astype SA01" \ + -i "pandas.Index.copy PR07,SA01" \ + -i "pandas.Index.difference PR07,RT03,SA01" \ + -i "pandas.Index.drop PR07,SA01" \ + -i "pandas.Index.drop_duplicates RT03" \ + -i "pandas.Index.droplevel RT03,SA01" \ + -i "pandas.Index.dropna RT03,SA01" \ + -i "pandas.Index.dtype SA01" \ + -i "pandas.Index.duplicated RT03" \ + -i "pandas.Index.empty GL08" \ + -i "pandas.Index.equals SA01" \ + -i "pandas.Index.fillna RT03" \ + -i "pandas.Index.get_indexer PR07,SA01" \ + -i "pandas.Index.get_indexer_for PR01,SA01" \ + -i "pandas.Index.get_indexer_non_unique PR07,SA01" \ + -i "pandas.Index.get_loc PR07,RT03,SA01" \ + -i "pandas.Index.get_slice_bound PR07" \ + -i "pandas.Index.hasnans SA01" \ + -i "pandas.Index.identical PR01,SA01" \ + -i "pandas.Index.inferred_type SA01" \ + -i "pandas.Index.insert PR07,RT03,SA01" \ + -i "pandas.Index.intersection PR07,RT03,SA01" \ + -i "pandas.Index.item SA01" \ + -i "pandas.Index.join PR07,RT03,SA01" \ + -i "pandas.Index.map SA01" \ + -i "pandas.Index.memory_usage RT03" \ + -i "pandas.Index.name SA01" \ + -i "pandas.Index.names GL08" \ + -i "pandas.Index.nbytes SA01" \ + -i "pandas.Index.ndim SA01" \ + -i "pandas.Index.nunique RT03" \ + -i "pandas.Index.putmask PR01,RT03" \ + -i "pandas.Index.ravel PR01,RT03" \ + -i "pandas.Index.reindex PR07" \ + -i "pandas.Index.shape SA01" \ + -i "pandas.Index.size SA01" \ + -i "pandas.Index.slice_indexer PR07,RT03,SA01" \ + -i "pandas.Index.slice_locs RT03" \ + -i "pandas.Index.str PR01,SA01" \ + -i "pandas.Index.symmetric_difference PR07,RT03,SA01" \ + -i "pandas.Index.take PR01,PR07" \ + -i "pandas.Index.to_list RT03" \ + -i "pandas.Index.union PR07,RT03,SA01" \ + -i "pandas.Index.unique RT03" \ + -i "pandas.Index.value_counts RT03" \ + -i "pandas.Index.view GL08" \ + -i "pandas.Int16Dtype SA01" \ + -i "pandas.Int32Dtype SA01" \ + -i "pandas.Int64Dtype SA01" \ + -i "pandas.Int8Dtype SA01" \ + -i "pandas.Interval PR02" \ + -i "pandas.Interval.closed SA01" \ + -i "pandas.Interval.left SA01" \ + -i "pandas.Interval.mid SA01" \ + -i "pandas.Interval.right SA01" \ + -i "pandas.IntervalDtype PR01,SA01" \ + -i "pandas.IntervalDtype.subtype SA01" \ + -i "pandas.IntervalIndex.closed SA01" \ + -i "pandas.IntervalIndex.contains RT03" \ + -i "pandas.IntervalIndex.get_indexer PR07,SA01" \ + -i "pandas.IntervalIndex.get_loc PR07,RT03,SA01" \ + -i "pandas.IntervalIndex.is_non_overlapping_monotonic SA01" \ + -i "pandas.IntervalIndex.left GL08" \ + -i "pandas.IntervalIndex.length GL08" \ + -i "pandas.IntervalIndex.mid GL08" \ + -i "pandas.IntervalIndex.right GL08" \ + -i "pandas.IntervalIndex.set_closed RT03,SA01" \ + -i "pandas.IntervalIndex.to_tuples RT03,SA01" \ + -i "pandas.MultiIndex PR01" \ + -i "pandas.MultiIndex.append PR07,SA01" \ + -i "pandas.MultiIndex.copy PR07,RT03,SA01" \ + -i "pandas.MultiIndex.drop PR07,RT03,SA01" \ + -i "pandas.MultiIndex.droplevel RT03,SA01" \ + -i "pandas.MultiIndex.dtypes SA01" \ + -i "pandas.MultiIndex.get_indexer PR07,SA01" \ + -i "pandas.MultiIndex.get_level_values SA01" \ + -i "pandas.MultiIndex.get_loc PR07" \ + -i "pandas.MultiIndex.get_loc_level PR07" \ + -i "pandas.MultiIndex.levels SA01" \ + -i "pandas.MultiIndex.levshape SA01" \ + -i "pandas.MultiIndex.names SA01" \ + -i "pandas.MultiIndex.nlevels SA01" \ + -i "pandas.MultiIndex.remove_unused_levels RT03,SA01" \ + -i "pandas.MultiIndex.reorder_levels RT03,SA01" \ + -i "pandas.MultiIndex.set_codes SA01" \ + -i "pandas.MultiIndex.set_levels RT03,SA01" \ + -i "pandas.MultiIndex.sortlevel PR07,SA01" \ + -i "pandas.MultiIndex.to_frame RT03" \ + -i "pandas.MultiIndex.truncate SA01" \ + -i "pandas.NA SA01" \ + -i "pandas.NaT SA01" \ + -i "pandas.NamedAgg SA01" \ + -i "pandas.Period SA01" \ + -i "pandas.Period.asfreq SA01" \ + -i "pandas.Period.freq GL08" \ + -i "pandas.Period.freqstr SA01" \ + -i "pandas.Period.is_leap_year SA01" \ + -i "pandas.Period.month SA01" \ + -i "pandas.Period.now SA01" \ + -i "pandas.Period.ordinal GL08" \ + -i "pandas.Period.quarter SA01" \ + -i "pandas.Period.strftime PR01,SA01" \ + -i "pandas.Period.to_timestamp SA01" \ + -i "pandas.Period.year SA01" \ + -i "pandas.PeriodDtype SA01" \ + -i "pandas.PeriodDtype.freq SA01" \ + -i "pandas.PeriodIndex.day SA01" \ + -i "pandas.PeriodIndex.day_of_week SA01" \ + -i "pandas.PeriodIndex.day_of_year SA01" \ + -i "pandas.PeriodIndex.dayofweek SA01" \ + -i "pandas.PeriodIndex.dayofyear SA01" \ + -i "pandas.PeriodIndex.days_in_month SA01" \ + -i "pandas.PeriodIndex.daysinmonth SA01" \ + -i "pandas.PeriodIndex.freqstr SA01" \ + -i "pandas.PeriodIndex.from_fields PR07,SA01" \ + -i "pandas.PeriodIndex.from_ordinals SA01" \ + -i "pandas.PeriodIndex.hour SA01" \ + -i "pandas.PeriodIndex.is_leap_year SA01" \ + -i "pandas.PeriodIndex.minute SA01" \ + -i "pandas.PeriodIndex.month SA01" \ + -i "pandas.PeriodIndex.quarter SA01" \ + -i "pandas.PeriodIndex.qyear GL08" \ + -i "pandas.PeriodIndex.second SA01" \ + -i "pandas.PeriodIndex.to_timestamp RT03,SA01" \ + -i "pandas.PeriodIndex.week SA01" \ + -i "pandas.PeriodIndex.weekday SA01" \ + -i "pandas.PeriodIndex.weekofyear SA01" \ + -i "pandas.PeriodIndex.year SA01" \ + -i "pandas.RangeIndex PR07" \ + -i "pandas.RangeIndex.from_range PR01,SA01" \ + -i "pandas.RangeIndex.start SA01" \ + -i "pandas.RangeIndex.step SA01" \ + -i "pandas.RangeIndex.stop SA01" \ + -i "pandas.Series SA01" \ + -i "pandas.Series.T SA01" \ + -i "pandas.Series.__iter__ RT03,SA01" \ + -i "pandas.Series.add PR07" \ + -i "pandas.Series.at_time PR01" \ + -i "pandas.Series.backfill PR01,SA01" \ + -i "pandas.Series.bfill SA01" \ + -i "pandas.Series.case_when RT03" \ + -i "pandas.Series.cat PR07,SA01" \ + -i "pandas.Series.cat.add_categories PR01,PR02" \ + -i "pandas.Series.cat.as_ordered PR01" \ + -i "pandas.Series.cat.as_unordered PR01" \ + -i "pandas.Series.cat.codes SA01" \ + -i "pandas.Series.cat.ordered SA01" \ + -i "pandas.Series.cat.remove_categories PR01,PR02" \ + -i "pandas.Series.cat.remove_unused_categories PR01" \ + -i "pandas.Series.cat.rename_categories PR01,PR02" \ + -i "pandas.Series.cat.reorder_categories PR01,PR02" \ + -i "pandas.Series.cat.set_categories PR01,PR02" \ + -i "pandas.Series.copy SA01" \ + -i "pandas.Series.div PR07" \ + -i "pandas.Series.droplevel SA01" \ + -i "pandas.Series.dt.as_unit PR01,PR02" \ + -i "pandas.Series.dt.ceil PR01,PR02,SA01" \ + -i "pandas.Series.dt.components SA01" \ + -i "pandas.Series.dt.date SA01" \ + -i "pandas.Series.dt.day SA01" \ + -i "pandas.Series.dt.day_name PR01,PR02,SA01" \ + -i "pandas.Series.dt.day_of_year SA01" \ + -i "pandas.Series.dt.dayofyear SA01" \ + -i "pandas.Series.dt.days SA01" \ + -i "pandas.Series.dt.days_in_month SA01" \ + -i "pandas.Series.dt.daysinmonth SA01" \ + -i "pandas.Series.dt.floor PR01,PR02,SA01" \ + -i "pandas.Series.dt.freq GL08" \ + -i "pandas.Series.dt.hour SA01" \ + -i "pandas.Series.dt.is_leap_year SA01" \ + -i "pandas.Series.dt.microsecond SA01" \ + -i "pandas.Series.dt.microseconds SA01" \ + -i "pandas.Series.dt.minute SA01" \ + -i "pandas.Series.dt.month SA01" \ + -i "pandas.Series.dt.month_name PR01,PR02,SA01" \ + -i "pandas.Series.dt.nanosecond SA01" \ + -i "pandas.Series.dt.nanoseconds SA01" \ + -i "pandas.Series.dt.normalize PR01" \ + -i "pandas.Series.dt.quarter SA01" \ + -i "pandas.Series.dt.qyear GL08" \ + -i "pandas.Series.dt.round PR01,PR02,SA01" \ + -i "pandas.Series.dt.second SA01" \ + -i "pandas.Series.dt.seconds SA01" \ + -i "pandas.Series.dt.strftime PR01,PR02" \ + -i "pandas.Series.dt.time SA01" \ + -i "pandas.Series.dt.timetz SA01" \ + -i "pandas.Series.dt.to_period PR01,PR02,RT03" \ + -i "pandas.Series.dt.total_seconds PR01" \ + -i "pandas.Series.dt.tz SA01" \ + -i "pandas.Series.dt.tz_convert PR01,PR02,RT03" \ + -i "pandas.Series.dt.tz_localize PR01,PR02" \ + -i "pandas.Series.dt.unit GL08" \ + -i "pandas.Series.dt.year SA01" \ + -i "pandas.Series.dtype SA01" \ + -i "pandas.Series.dtypes SA01" \ + -i "pandas.Series.empty GL08" \ + -i "pandas.Series.eq PR07,SA01" \ + -i "pandas.Series.ffill SA01" \ + -i "pandas.Series.first_valid_index SA01" \ + -i "pandas.Series.floordiv PR07" \ + -i "pandas.Series.ge PR07,SA01" \ + -i "pandas.Series.get SA01" \ + -i "pandas.Series.gt PR07,SA01" \ + -i "pandas.Series.hasnans SA01" \ + -i "pandas.Series.infer_objects RT03" \ + -i "pandas.Series.is_monotonic_decreasing SA01" \ + -i "pandas.Series.is_monotonic_increasing SA01" \ + -i "pandas.Series.is_unique SA01" \ + -i "pandas.Series.item SA01" \ + -i "pandas.Series.keys SA01" \ + -i "pandas.Series.kurt RT03,SA01" \ + -i "pandas.Series.kurtosis RT03,SA01" \ + -i "pandas.Series.last_valid_index SA01" \ + -i "pandas.Series.le PR07,SA01" \ + -i "pandas.Series.list.__getitem__ SA01" \ + -i "pandas.Series.list.flatten SA01" \ + -i "pandas.Series.list.len SA01" \ + -i "pandas.Series.lt PR07,SA01" \ + -i "pandas.Series.mask RT03" \ + -i "pandas.Series.max RT03" \ + -i "pandas.Series.mean RT03,SA01" \ + -i "pandas.Series.median RT03,SA01" \ + -i "pandas.Series.min RT03" \ + -i "pandas.Series.mod PR07" \ + -i "pandas.Series.mode SA01" \ + -i "pandas.Series.mul PR07" \ + -i "pandas.Series.nbytes SA01" \ + -i "pandas.Series.ndim SA01" \ + -i "pandas.Series.ne PR07,SA01" \ + -i "pandas.Series.nunique RT03" \ + -i "pandas.Series.pad PR01,SA01" \ + -i "pandas.Series.plot PR02,SA01" \ + -i "pandas.Series.pop RT03,SA01" \ + -i "pandas.Series.pow PR07" \ + -i "pandas.Series.prod RT03" \ + -i "pandas.Series.product RT03" \ + -i "pandas.Series.radd PR07" \ + -i "pandas.Series.rdiv PR07" \ + -i "pandas.Series.reorder_levels RT03,SA01" \ + -i "pandas.Series.rfloordiv PR07" \ + -i "pandas.Series.rmod PR07" \ + -i "pandas.Series.rmul PR07" \ + -i "pandas.Series.rpow PR07" \ + -i "pandas.Series.rsub PR07" \ + -i "pandas.Series.rtruediv PR07" \ + -i "pandas.Series.sem PR01,RT03,SA01" \ + -i "pandas.Series.shape SA01" \ + -i "pandas.Series.size SA01" \ + -i "pandas.Series.skew RT03,SA01" \ + -i "pandas.Series.sparse PR01,SA01" \ + -i "pandas.Series.sparse.density SA01" \ + -i "pandas.Series.sparse.fill_value SA01" \ + -i "pandas.Series.sparse.from_coo PR07,SA01" \ + -i "pandas.Series.sparse.npoints SA01" \ + -i "pandas.Series.sparse.sp_values SA01" \ + -i "pandas.Series.sparse.to_coo PR07,RT03,SA01" \ + -i "pandas.Series.std PR01,RT03,SA01" \ + -i "pandas.Series.str PR01,SA01" \ + -i "pandas.Series.str.capitalize RT03" \ + -i "pandas.Series.str.casefold RT03" \ + -i "pandas.Series.str.center RT03,SA01" \ + -i "pandas.Series.str.decode PR07,RT03,SA01" \ + -i "pandas.Series.str.encode PR07,RT03,SA01" \ + -i "pandas.Series.str.find RT03" \ + -i "pandas.Series.str.fullmatch RT03" \ + -i "pandas.Series.str.get RT03,SA01" \ + -i "pandas.Series.str.index RT03" \ + -i "pandas.Series.str.ljust RT03,SA01" \ + -i "pandas.Series.str.lower RT03" \ + -i "pandas.Series.str.lstrip RT03" \ + -i "pandas.Series.str.match RT03" \ + -i "pandas.Series.str.normalize RT03,SA01" \ + -i "pandas.Series.str.partition RT03" \ + -i "pandas.Series.str.repeat SA01" \ + -i "pandas.Series.str.replace SA01" \ + -i "pandas.Series.str.rfind RT03" \ + -i "pandas.Series.str.rindex RT03" \ + -i "pandas.Series.str.rjust RT03,SA01" \ + -i "pandas.Series.str.rpartition RT03" \ + -i "pandas.Series.str.rstrip RT03" \ + -i "pandas.Series.str.strip RT03" \ + -i "pandas.Series.str.swapcase RT03" \ + -i "pandas.Series.str.title RT03" \ + -i "pandas.Series.str.translate RT03,SA01" \ + -i "pandas.Series.str.upper RT03" \ + -i "pandas.Series.str.wrap RT03,SA01" \ + -i "pandas.Series.str.zfill RT03" \ + -i "pandas.Series.struct.dtypes SA01" \ + -i "pandas.Series.sub PR07" \ + -i "pandas.Series.sum RT03" \ + -i "pandas.Series.swaplevel SA01" \ + -i "pandas.Series.to_dict SA01" \ + -i "pandas.Series.to_frame SA01" \ + -i "pandas.Series.to_list RT03" \ + -i "pandas.Series.to_markdown SA01" \ + -i "pandas.Series.to_period SA01" \ + -i "pandas.Series.to_string SA01" \ + -i "pandas.Series.to_timestamp RT03,SA01" \ + -i "pandas.Series.truediv PR07" \ + -i "pandas.Series.tz_convert SA01" \ + -i "pandas.Series.tz_localize SA01" \ + -i "pandas.Series.unstack SA01" \ + -i "pandas.Series.update PR07,SA01" \ + -i "pandas.Series.value_counts RT03" \ + -i "pandas.Series.var PR01,RT03,SA01" \ + -i "pandas.Series.where RT03" \ + -i "pandas.SparseDtype SA01" \ + -i "pandas.Timedelta PR07,SA01" \ + -i "pandas.Timedelta.as_unit SA01" \ + -i "pandas.Timedelta.asm8 SA01" \ + -i "pandas.Timedelta.ceil SA01" \ + -i "pandas.Timedelta.components SA01" \ + -i "pandas.Timedelta.days SA01" \ + -i "pandas.Timedelta.floor SA01" \ + -i "pandas.Timedelta.max PR02,PR07,SA01" \ + -i "pandas.Timedelta.min PR02,PR07,SA01" \ + -i "pandas.Timedelta.resolution PR02,PR07,SA01" \ + -i "pandas.Timedelta.round SA01" \ + -i "pandas.Timedelta.to_numpy PR01" \ + -i "pandas.Timedelta.to_timedelta64 SA01" \ + -i "pandas.Timedelta.total_seconds SA01" \ + -i "pandas.Timedelta.view SA01" \ + -i "pandas.TimedeltaIndex PR01" \ + -i "pandas.TimedeltaIndex.as_unit RT03,SA01" \ + -i "pandas.TimedeltaIndex.ceil SA01" \ + -i "pandas.TimedeltaIndex.components SA01" \ + -i "pandas.TimedeltaIndex.days SA01" \ + -i "pandas.TimedeltaIndex.floor SA01" \ + -i "pandas.TimedeltaIndex.inferred_freq SA01" \ + -i "pandas.TimedeltaIndex.microseconds SA01" \ + -i "pandas.TimedeltaIndex.nanoseconds SA01" \ + -i "pandas.TimedeltaIndex.round SA01" \ + -i "pandas.TimedeltaIndex.seconds SA01" \ + -i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \ + -i "pandas.Timestamp PR07,SA01" \ + -i "pandas.Timestamp.as_unit SA01" \ + -i "pandas.Timestamp.asm8 SA01" \ + -i "pandas.Timestamp.astimezone SA01" \ + -i "pandas.Timestamp.ceil SA01" \ + -i "pandas.Timestamp.combine PR01,SA01" \ + -i "pandas.Timestamp.ctime SA01" \ + -i "pandas.Timestamp.date SA01" \ + -i "pandas.Timestamp.day GL08" \ + -i "pandas.Timestamp.day_name SA01" \ + -i "pandas.Timestamp.day_of_week SA01" \ + -i "pandas.Timestamp.day_of_year SA01" \ + -i "pandas.Timestamp.dayofweek SA01" \ + -i "pandas.Timestamp.dayofyear SA01" \ + -i "pandas.Timestamp.days_in_month SA01" \ + -i "pandas.Timestamp.daysinmonth SA01" \ + -i "pandas.Timestamp.dst SA01" \ + -i "pandas.Timestamp.floor SA01" \ + -i "pandas.Timestamp.fold GL08" \ + -i "pandas.Timestamp.fromordinal SA01" \ + -i "pandas.Timestamp.fromtimestamp PR01,SA01" \ + -i "pandas.Timestamp.hour GL08" \ + -i "pandas.Timestamp.is_leap_year SA01" \ + -i "pandas.Timestamp.isocalendar SA01" \ + -i "pandas.Timestamp.isoformat SA01" \ + -i "pandas.Timestamp.isoweekday SA01" \ + -i "pandas.Timestamp.max PR02,PR07,SA01" \ + -i "pandas.Timestamp.microsecond GL08" \ + -i "pandas.Timestamp.min PR02,PR07,SA01" \ + -i "pandas.Timestamp.minute GL08" \ + -i "pandas.Timestamp.month GL08" \ + -i "pandas.Timestamp.month_name SA01" \ + -i "pandas.Timestamp.nanosecond GL08" \ + -i "pandas.Timestamp.normalize SA01" \ + -i "pandas.Timestamp.now SA01" \ + -i "pandas.Timestamp.quarter SA01" \ + -i "pandas.Timestamp.replace PR07,SA01" \ + -i "pandas.Timestamp.resolution PR02,PR07,SA01" \ + -i "pandas.Timestamp.round SA01" \ + -i "pandas.Timestamp.second GL08" \ + -i "pandas.Timestamp.strftime SA01" \ + -i "pandas.Timestamp.strptime PR01,SA01" \ + -i "pandas.Timestamp.time SA01" \ + -i "pandas.Timestamp.timestamp SA01" \ + -i "pandas.Timestamp.timetuple SA01" \ + -i "pandas.Timestamp.timetz SA01" \ + -i "pandas.Timestamp.to_datetime64 SA01" \ + -i "pandas.Timestamp.to_julian_date SA01" \ + -i "pandas.Timestamp.to_numpy PR01" \ + -i "pandas.Timestamp.to_period PR01,SA01" \ + -i "pandas.Timestamp.to_pydatetime PR01,SA01" \ + -i "pandas.Timestamp.today SA01" \ + -i "pandas.Timestamp.toordinal SA01" \ + -i "pandas.Timestamp.tz SA01" \ + -i "pandas.Timestamp.tz_convert SA01" \ + -i "pandas.Timestamp.tz_localize SA01" \ + -i "pandas.Timestamp.tzinfo GL08" \ + -i "pandas.Timestamp.tzname SA01" \ + -i "pandas.Timestamp.unit SA01" \ + -i "pandas.Timestamp.utcfromtimestamp PR01,SA01" \ + -i "pandas.Timestamp.utcnow SA01" \ + -i "pandas.Timestamp.utcoffset SA01" \ + -i "pandas.Timestamp.utctimetuple SA01" \ + -i "pandas.Timestamp.value GL08" \ + -i "pandas.Timestamp.week SA01" \ + -i "pandas.Timestamp.weekday SA01" \ + -i "pandas.Timestamp.weekofyear SA01" \ + -i "pandas.Timestamp.year GL08" \ + -i "pandas.UInt16Dtype SA01" \ + -i "pandas.UInt32Dtype SA01" \ + -i "pandas.UInt64Dtype SA01" \ + -i "pandas.UInt8Dtype SA01" \ + -i "pandas.api.extensions.ExtensionArray SA01" \ + -i "pandas.api.extensions.ExtensionArray._accumulate RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01" \ + -i "pandas.api.extensions.ExtensionArray._formatter SA01" \ + -i "pandas.api.extensions.ExtensionArray._from_sequence SA01" \ + -i "pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01" \ + -i "pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._reduce RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._values_for_factorize SA01" \ + -i "pandas.api.extensions.ExtensionArray.astype SA01" \ + -i "pandas.api.extensions.ExtensionArray.copy RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.dropna RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.dtype SA01" \ + -i "pandas.api.extensions.ExtensionArray.duplicated RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.equals SA01" \ + -i "pandas.api.extensions.ExtensionArray.fillna SA01" \ + -i "pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.interpolate PR01,SA01" \ + -i "pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.isna SA01" \ + -i "pandas.api.extensions.ExtensionArray.nbytes SA01" \ + -i "pandas.api.extensions.ExtensionArray.ndim SA01" \ + -i "pandas.api.extensions.ExtensionArray.ravel RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.shape SA01" \ + -i "pandas.api.extensions.ExtensionArray.shift SA01" \ + -i "pandas.api.extensions.ExtensionArray.take RT03" \ + -i "pandas.api.extensions.ExtensionArray.tolist RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.unique RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.view SA01" \ + -i "pandas.api.extensions.register_extension_dtype SA01" \ + -i "pandas.api.indexers.BaseIndexer PR01,SA01" \ + -i "pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01" \ + -i "pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01" \ + -i "pandas.api.interchange.from_dataframe RT03,SA01" \ + -i "pandas.api.types.infer_dtype PR07,SA01" \ + -i "pandas.api.types.is_any_real_numeric_dtype SA01" \ + -i "pandas.api.types.is_bool PR01,SA01" \ + -i "pandas.api.types.is_bool_dtype SA01" \ + -i "pandas.api.types.is_categorical_dtype SA01" \ + -i "pandas.api.types.is_complex PR01,SA01" \ + -i "pandas.api.types.is_complex_dtype SA01" \ + -i "pandas.api.types.is_datetime64_any_dtype SA01" \ + -i "pandas.api.types.is_datetime64_dtype SA01" \ + -i "pandas.api.types.is_datetime64_ns_dtype SA01" \ + -i "pandas.api.types.is_datetime64tz_dtype SA01" \ + -i "pandas.api.types.is_dict_like PR07,SA01" \ + -i "pandas.api.types.is_extension_array_dtype SA01" \ + -i "pandas.api.types.is_file_like PR07,SA01" \ + -i "pandas.api.types.is_float PR01,SA01" \ + -i "pandas.api.types.is_float_dtype SA01" \ + -i "pandas.api.types.is_hashable PR01,RT03,SA01" \ + -i "pandas.api.types.is_int64_dtype SA01" \ + -i "pandas.api.types.is_integer PR01,SA01" \ + -i "pandas.api.types.is_integer_dtype SA01" \ + -i "pandas.api.types.is_interval_dtype SA01" \ + -i "pandas.api.types.is_iterator PR07,SA01" \ + -i "pandas.api.types.is_list_like SA01" \ + -i "pandas.api.types.is_named_tuple PR07,SA01" \ + -i "pandas.api.types.is_numeric_dtype SA01" \ + -i "pandas.api.types.is_object_dtype SA01" \ + -i "pandas.api.types.is_period_dtype SA01" \ + -i "pandas.api.types.is_re PR07,SA01" \ + -i "pandas.api.types.is_re_compilable PR07,SA01" \ + -i "pandas.api.types.is_scalar SA01" \ + -i "pandas.api.types.is_signed_integer_dtype SA01" \ + -i "pandas.api.types.is_sparse SA01" \ + -i "pandas.api.types.is_string_dtype SA01" \ + -i "pandas.api.types.is_timedelta64_dtype SA01" \ + -i "pandas.api.types.is_timedelta64_ns_dtype SA01" \ + -i "pandas.api.types.is_unsigned_integer_dtype SA01" \ + -i "pandas.api.types.pandas_dtype PR07,RT03,SA01" \ + -i "pandas.api.types.union_categoricals RT03,SA01" \ + -i "pandas.arrays.ArrowExtensionArray PR07,SA01" \ + -i "pandas.arrays.BooleanArray SA01" \ + -i "pandas.arrays.DatetimeArray SA01" \ + -i "pandas.arrays.FloatingArray SA01" \ + -i "pandas.arrays.IntegerArray SA01" \ + -i "pandas.arrays.IntervalArray.closed SA01" \ + -i "pandas.arrays.IntervalArray.contains RT03" \ + -i "pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01" \ + -i "pandas.arrays.IntervalArray.left SA01" \ + -i "pandas.arrays.IntervalArray.length SA01" \ + -i "pandas.arrays.IntervalArray.mid SA01" \ + -i "pandas.arrays.IntervalArray.right SA01" \ + -i "pandas.arrays.IntervalArray.set_closed RT03,SA01" \ + -i "pandas.arrays.IntervalArray.to_tuples RT03,SA01" \ + -i "pandas.arrays.NumpyExtensionArray SA01" \ + -i "pandas.arrays.SparseArray PR07,SA01" \ + -i "pandas.arrays.TimedeltaArray PR07,SA01" \ + -i "pandas.bdate_range RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.agg RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.aggregate RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.apply RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.cummax RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.cummin RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.cumprod RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.cumsum RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.groups SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.hist RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.indices SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.max SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.mean RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.median SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.min SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.nth PR02" \ + -i "pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.ohlc SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.prod SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.rank RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.resample RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.sem SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.skew RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.sum SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.transform RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.agg RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.aggregate RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.apply RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cummax RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cummin RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cumprod RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cumsum RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.groups SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.indices SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.max SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.mean RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.median SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.min SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.nth PR02" \ + -i "pandas.core.groupby.SeriesGroupBy.ohlc SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.plot PR02,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.prod SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.rank RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.resample RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.sem SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.skew RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.sum SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.transform RT03" \ + -i "pandas.core.resample.Resampler.__iter__ RT03,SA01" \ + -i "pandas.core.resample.Resampler.ffill RT03" \ + -i "pandas.core.resample.Resampler.get_group RT03,SA01" \ + -i "pandas.core.resample.Resampler.groups SA01" \ + -i "pandas.core.resample.Resampler.indices SA01" \ + -i "pandas.core.resample.Resampler.max PR01,RT03,SA01" \ + -i "pandas.core.resample.Resampler.mean SA01" \ + -i "pandas.core.resample.Resampler.median SA01" \ + -i "pandas.core.resample.Resampler.min PR01,RT03,SA01" \ + -i "pandas.core.resample.Resampler.ohlc SA01" \ + -i "pandas.core.resample.Resampler.prod SA01" \ + -i "pandas.core.resample.Resampler.quantile PR01,PR07" \ + -i "pandas.core.resample.Resampler.sem SA01" \ + -i "pandas.core.resample.Resampler.std SA01" \ + -i "pandas.core.resample.Resampler.sum SA01" \ + -i "pandas.core.resample.Resampler.transform PR01,RT03,SA01" \ + -i "pandas.core.resample.Resampler.var SA01" \ + -i "pandas.core.window.expanding.Expanding.corr PR01" \ + -i "pandas.core.window.expanding.Expanding.count PR01" \ + -i "pandas.core.window.rolling.Rolling.max PR01" \ + -i "pandas.core.window.rolling.Window.std PR01" \ + -i "pandas.core.window.rolling.Window.var PR01" \ + -i "pandas.date_range RT03" \ + -i "pandas.describe_option SA01" \ + -i "pandas.errors.AbstractMethodError PR01,SA01" \ + -i "pandas.errors.AttributeConflictWarning SA01" \ + -i "pandas.errors.CSSWarning SA01" \ + -i "pandas.errors.CategoricalConversionWarning SA01" \ + -i "pandas.errors.ChainedAssignmentError SA01" \ + -i "pandas.errors.ClosedFileError SA01" \ + -i "pandas.errors.DataError SA01" \ + -i "pandas.errors.DuplicateLabelError SA01" \ + -i "pandas.errors.EmptyDataError SA01" \ + -i "pandas.errors.IntCastingNaNError SA01" \ + -i "pandas.errors.InvalidIndexError SA01" \ + -i "pandas.errors.InvalidVersion SA01" \ + -i "pandas.errors.MergeError SA01" \ + -i "pandas.errors.NullFrequencyError SA01" \ + -i "pandas.errors.NumExprClobberingError SA01" \ + -i "pandas.errors.NumbaUtilError SA01" \ + -i "pandas.errors.OptionError SA01" \ + -i "pandas.errors.OutOfBoundsDatetime SA01" \ + -i "pandas.errors.OutOfBoundsTimedelta SA01" \ + -i "pandas.errors.PerformanceWarning SA01" \ + -i "pandas.errors.PossibleDataLossError SA01" \ + -i "pandas.errors.PossiblePrecisionLoss SA01" \ + -i "pandas.errors.SpecificationError SA01" \ + -i "pandas.errors.UndefinedVariableError PR01,SA01" \ + -i "pandas.errors.UnsortedIndexError SA01" \ + -i "pandas.errors.UnsupportedFunctionCall SA01" \ + -i "pandas.errors.ValueLabelTypeMismatch SA01" \ + -i "pandas.get_option SA01" \ + -i "pandas.infer_freq SA01" \ + -i "pandas.interval_range RT03" \ + -i "pandas.io.formats.style.Styler.apply RT03" \ + -i "pandas.io.formats.style.Styler.apply_index RT03" \ + -i "pandas.io.formats.style.Styler.background_gradient RT03" \ + -i "pandas.io.formats.style.Styler.bar RT03,SA01" \ + -i "pandas.io.formats.style.Styler.clear SA01" \ + -i "pandas.io.formats.style.Styler.concat RT03,SA01" \ + -i "pandas.io.formats.style.Styler.export RT03" \ + -i "pandas.io.formats.style.Styler.format RT03" \ + -i "pandas.io.formats.style.Styler.format_index RT03" \ + -i "pandas.io.formats.style.Styler.from_custom_template SA01" \ + -i "pandas.io.formats.style.Styler.hide RT03,SA01" \ + -i "pandas.io.formats.style.Styler.highlight_between RT03" \ + -i "pandas.io.formats.style.Styler.highlight_max RT03" \ + -i "pandas.io.formats.style.Styler.highlight_min RT03" \ + -i "pandas.io.formats.style.Styler.highlight_null RT03" \ + -i "pandas.io.formats.style.Styler.highlight_quantile RT03" \ + -i "pandas.io.formats.style.Styler.map RT03" \ + -i "pandas.io.formats.style.Styler.map_index RT03" \ + -i "pandas.io.formats.style.Styler.relabel_index RT03" \ + -i "pandas.io.formats.style.Styler.set_caption RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_properties RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_sticky RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_table_attributes PR07,RT03" \ + -i "pandas.io.formats.style.Styler.set_table_styles RT03" \ + -i "pandas.io.formats.style.Styler.set_td_classes RT03" \ + -i "pandas.io.formats.style.Styler.set_tooltips RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01" \ + -i "pandas.io.formats.style.Styler.text_gradient RT03" \ + -i "pandas.io.formats.style.Styler.to_excel PR01" \ + -i "pandas.io.formats.style.Styler.to_string SA01" \ + -i "pandas.io.formats.style.Styler.use RT03" \ + -i "pandas.io.json.build_table_schema PR07,RT03,SA01" \ + -i "pandas.io.stata.StataReader.data_label SA01" \ + -i "pandas.io.stata.StataReader.value_labels RT03,SA01" \ + -i "pandas.io.stata.StataReader.variable_labels RT03,SA01" \ + -i "pandas.io.stata.StataWriter.write_file SA01" \ + -i "pandas.json_normalize RT03,SA01" \ + -i "pandas.merge PR07" \ + -i "pandas.merge_asof PR07,RT03" \ + -i "pandas.merge_ordered PR07" \ + -i "pandas.option_context SA01" \ + -i "pandas.period_range RT03,SA01" \ + -i "pandas.pivot PR07" \ + -i "pandas.pivot_table PR07" \ + -i "pandas.plotting.andrews_curves RT03,SA01" \ + -i "pandas.plotting.autocorrelation_plot RT03,SA01" \ + -i "pandas.plotting.lag_plot RT03,SA01" \ + -i "pandas.plotting.parallel_coordinates PR07,RT03,SA01" \ + -i "pandas.plotting.plot_params SA01" \ + -i "pandas.plotting.scatter_matrix PR07,SA01" \ + -i "pandas.plotting.table PR07,RT03,SA01" \ + -i "pandas.qcut PR07,SA01" \ + -i "pandas.read_feather SA01" \ + -i "pandas.read_orc SA01" \ + -i "pandas.read_sas SA01" \ + -i "pandas.read_spss SA01" \ + -i "pandas.reset_option SA01" \ + -i "pandas.set_eng_float_format RT03,SA01" \ + -i "pandas.set_option SA01" \ + -i "pandas.show_versions SA01" \ + -i "pandas.test SA01" \ + -i "pandas.testing.assert_extension_array_equal SA01" \ + -i "pandas.testing.assert_index_equal PR07,SA01" \ + -i "pandas.testing.assert_series_equal PR07,SA01" \ + -i "pandas.timedelta_range SA01" \ + -i "pandas.tseries.api.guess_datetime_format SA01" \ + -i "pandas.tseries.offsets.BDay PR02,SA01" \ + -i "pandas.tseries.offsets.BMonthBegin PR02" \ + -i "pandas.tseries.offsets.BMonthEnd PR02" \ + -i "pandas.tseries.offsets.BQuarterBegin PR02" \ + -i "pandas.tseries.offsets.BQuarterBegin.copy SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.kwds SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.n GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.name SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.nanos GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.normalize GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.startingMonth GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd PR02" \ + -i "pandas.tseries.offsets.BQuarterEnd.copy SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.kwds SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.n GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.name SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.nanos GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.normalize GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.startingMonth GL08" \ + -i "pandas.tseries.offsets.BYearBegin PR02" \ + -i "pandas.tseries.offsets.BYearBegin.copy SA01" \ + -i "pandas.tseries.offsets.BYearBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.BYearBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BYearBegin.kwds SA01" \ + -i "pandas.tseries.offsets.BYearBegin.month GL08" \ + -i "pandas.tseries.offsets.BYearBegin.n GL08" \ + -i "pandas.tseries.offsets.BYearBegin.name SA01" \ + -i "pandas.tseries.offsets.BYearBegin.nanos GL08" \ + -i "pandas.tseries.offsets.BYearBegin.normalize GL08" \ + -i "pandas.tseries.offsets.BYearBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.BYearEnd PR02" \ + -i "pandas.tseries.offsets.BYearEnd.copy SA01" \ + -i "pandas.tseries.offsets.BYearEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.BYearEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BYearEnd.kwds SA01" \ + -i "pandas.tseries.offsets.BYearEnd.month GL08" \ + -i "pandas.tseries.offsets.BYearEnd.n GL08" \ + -i "pandas.tseries.offsets.BYearEnd.name SA01" \ + -i "pandas.tseries.offsets.BYearEnd.nanos GL08" \ + -i "pandas.tseries.offsets.BYearEnd.normalize GL08" \ + -i "pandas.tseries.offsets.BYearEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessDay PR02,SA01" \ + -i "pandas.tseries.offsets.BusinessDay.calendar GL08" \ + -i "pandas.tseries.offsets.BusinessDay.copy SA01" \ + -i "pandas.tseries.offsets.BusinessDay.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessDay.holidays GL08" \ + -i "pandas.tseries.offsets.BusinessDay.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessDay.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessDay.n GL08" \ + -i "pandas.tseries.offsets.BusinessDay.name SA01" \ + -i "pandas.tseries.offsets.BusinessDay.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessDay.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessDay.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessDay.weekmask GL08" \ + -i "pandas.tseries.offsets.BusinessHour PR02,SA01" \ + -i "pandas.tseries.offsets.BusinessHour.calendar GL08" \ + -i "pandas.tseries.offsets.BusinessHour.copy SA01" \ + -i "pandas.tseries.offsets.BusinessHour.end GL08" \ + -i "pandas.tseries.offsets.BusinessHour.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessHour.holidays GL08" \ + -i "pandas.tseries.offsets.BusinessHour.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessHour.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessHour.n GL08" \ + -i "pandas.tseries.offsets.BusinessHour.name SA01" \ + -i "pandas.tseries.offsets.BusinessHour.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessHour.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessHour.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessHour.start GL08" \ + -i "pandas.tseries.offsets.BusinessHour.weekmask GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin PR02" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.n GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.name SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd PR02" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.n GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.name SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.CBMonthBegin PR02" \ + -i "pandas.tseries.offsets.CBMonthEnd PR02" \ + -i "pandas.tseries.offsets.CDay PR02,SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay PR02,SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.weekmask GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour PR02,SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.end GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.start GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.weekmask GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin PR02" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd PR02" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08" \ + -i "pandas.tseries.offsets.DateOffset PR02" \ + -i "pandas.tseries.offsets.DateOffset.copy SA01" \ + -i "pandas.tseries.offsets.DateOffset.freqstr SA01" \ + -i "pandas.tseries.offsets.DateOffset.is_on_offset GL08" \ + -i "pandas.tseries.offsets.DateOffset.kwds SA01" \ + -i "pandas.tseries.offsets.DateOffset.n GL08" \ + -i "pandas.tseries.offsets.DateOffset.name SA01" \ + -i "pandas.tseries.offsets.DateOffset.nanos GL08" \ + -i "pandas.tseries.offsets.DateOffset.normalize GL08" \ + -i "pandas.tseries.offsets.DateOffset.rule_code GL08" \ + -i "pandas.tseries.offsets.Day PR02" \ + -i "pandas.tseries.offsets.Day.copy SA01" \ + -i "pandas.tseries.offsets.Day.delta GL08" \ + -i "pandas.tseries.offsets.Day.freqstr SA01" \ + -i "pandas.tseries.offsets.Day.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Day.kwds SA01" \ + -i "pandas.tseries.offsets.Day.n GL08" \ + -i "pandas.tseries.offsets.Day.name SA01" \ + -i "pandas.tseries.offsets.Day.nanos SA01" \ + -i "pandas.tseries.offsets.Day.normalize GL08" \ + -i "pandas.tseries.offsets.Day.rule_code GL08" \ + -i "pandas.tseries.offsets.Easter PR02" \ + -i "pandas.tseries.offsets.Easter.copy SA01" \ + -i "pandas.tseries.offsets.Easter.freqstr SA01" \ + -i "pandas.tseries.offsets.Easter.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Easter.kwds SA01" \ + -i "pandas.tseries.offsets.Easter.n GL08" \ + -i "pandas.tseries.offsets.Easter.name SA01" \ + -i "pandas.tseries.offsets.Easter.nanos GL08" \ + -i "pandas.tseries.offsets.Easter.normalize GL08" \ + -i "pandas.tseries.offsets.Easter.rule_code GL08" \ + -i "pandas.tseries.offsets.FY5253 PR02" \ + -i "pandas.tseries.offsets.FY5253.copy SA01" \ + -i "pandas.tseries.offsets.FY5253.freqstr SA01" \ + -i "pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08" \ + -i "pandas.tseries.offsets.FY5253.get_year_end GL08" \ + -i "pandas.tseries.offsets.FY5253.is_on_offset GL08" \ + -i "pandas.tseries.offsets.FY5253.kwds SA01" \ + -i "pandas.tseries.offsets.FY5253.n GL08" \ + -i "pandas.tseries.offsets.FY5253.name SA01" \ + -i "pandas.tseries.offsets.FY5253.nanos GL08" \ + -i "pandas.tseries.offsets.FY5253.normalize GL08" \ + -i "pandas.tseries.offsets.FY5253.rule_code GL08" \ + -i "pandas.tseries.offsets.FY5253.startingMonth GL08" \ + -i "pandas.tseries.offsets.FY5253.variation GL08" \ + -i "pandas.tseries.offsets.FY5253.weekday GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter PR02" \ + -i "pandas.tseries.offsets.FY5253Quarter.copy SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.freqstr SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.get_weeks GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.kwds SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.n GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.name SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.nanos GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.normalize GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.rule_code GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.startingMonth GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.variation GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.weekday GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08" \ + -i "pandas.tseries.offsets.Hour PR02" \ + -i "pandas.tseries.offsets.Hour.copy SA01" \ + -i "pandas.tseries.offsets.Hour.delta GL08" \ + -i "pandas.tseries.offsets.Hour.freqstr SA01" \ + -i "pandas.tseries.offsets.Hour.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Hour.kwds SA01" \ + -i "pandas.tseries.offsets.Hour.n GL08" \ + -i "pandas.tseries.offsets.Hour.name SA01" \ + -i "pandas.tseries.offsets.Hour.nanos SA01" \ + -i "pandas.tseries.offsets.Hour.normalize GL08" \ + -i "pandas.tseries.offsets.Hour.rule_code GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth PR02,SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.copy SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.kwds SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.n GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.name SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.nanos GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.normalize GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.week GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.weekday GL08" \ + -i "pandas.tseries.offsets.Micro PR02" \ + -i "pandas.tseries.offsets.Micro.copy SA01" \ + -i "pandas.tseries.offsets.Micro.delta GL08" \ + -i "pandas.tseries.offsets.Micro.freqstr SA01" \ + -i "pandas.tseries.offsets.Micro.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Micro.kwds SA01" \ + -i "pandas.tseries.offsets.Micro.n GL08" \ + -i "pandas.tseries.offsets.Micro.name SA01" \ + -i "pandas.tseries.offsets.Micro.nanos SA01" \ + -i "pandas.tseries.offsets.Micro.normalize GL08" \ + -i "pandas.tseries.offsets.Micro.rule_code GL08" \ + -i "pandas.tseries.offsets.Milli PR02" \ + -i "pandas.tseries.offsets.Milli.copy SA01" \ + -i "pandas.tseries.offsets.Milli.delta GL08" \ + -i "pandas.tseries.offsets.Milli.freqstr SA01" \ + -i "pandas.tseries.offsets.Milli.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Milli.kwds SA01" \ + -i "pandas.tseries.offsets.Milli.n GL08" \ + -i "pandas.tseries.offsets.Milli.name SA01" \ + -i "pandas.tseries.offsets.Milli.nanos SA01" \ + -i "pandas.tseries.offsets.Milli.normalize GL08" \ + -i "pandas.tseries.offsets.Milli.rule_code GL08" \ + -i "pandas.tseries.offsets.Minute PR02" \ + -i "pandas.tseries.offsets.Minute.copy SA01" \ + -i "pandas.tseries.offsets.Minute.delta GL08" \ + -i "pandas.tseries.offsets.Minute.freqstr SA01" \ + -i "pandas.tseries.offsets.Minute.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Minute.kwds SA01" \ + -i "pandas.tseries.offsets.Minute.n GL08" \ + -i "pandas.tseries.offsets.Minute.name SA01" \ + -i "pandas.tseries.offsets.Minute.nanos SA01" \ + -i "pandas.tseries.offsets.Minute.normalize GL08" \ + -i "pandas.tseries.offsets.Minute.rule_code GL08" \ + -i "pandas.tseries.offsets.MonthBegin PR02" \ + -i "pandas.tseries.offsets.MonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.MonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.MonthBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.MonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.MonthBegin.n GL08" \ + -i "pandas.tseries.offsets.MonthBegin.name SA01" \ + -i "pandas.tseries.offsets.MonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.MonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.MonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.MonthEnd PR02" \ + -i "pandas.tseries.offsets.MonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.MonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.MonthEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.MonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.MonthEnd.n GL08" \ + -i "pandas.tseries.offsets.MonthEnd.name SA01" \ + -i "pandas.tseries.offsets.MonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.MonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.MonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.Nano PR02" \ + -i "pandas.tseries.offsets.Nano.copy SA01" \ + -i "pandas.tseries.offsets.Nano.delta GL08" \ + -i "pandas.tseries.offsets.Nano.freqstr SA01" \ + -i "pandas.tseries.offsets.Nano.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Nano.kwds SA01" \ + -i "pandas.tseries.offsets.Nano.n GL08" \ + -i "pandas.tseries.offsets.Nano.name SA01" \ + -i "pandas.tseries.offsets.Nano.nanos SA01" \ + -i "pandas.tseries.offsets.Nano.normalize GL08" \ + -i "pandas.tseries.offsets.Nano.rule_code GL08" \ + -i "pandas.tseries.offsets.QuarterBegin PR02" \ + -i "pandas.tseries.offsets.QuarterBegin.copy SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.kwds SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.n GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.name SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.nanos GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.normalize GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.startingMonth GL08" \ + -i "pandas.tseries.offsets.QuarterEnd PR02" \ + -i "pandas.tseries.offsets.QuarterEnd.copy SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.kwds SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.n GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.name SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.nanos GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.normalize GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.startingMonth GL08" \ + -i "pandas.tseries.offsets.Second PR02" \ + -i "pandas.tseries.offsets.Second.copy SA01" \ + -i "pandas.tseries.offsets.Second.delta GL08" \ + -i "pandas.tseries.offsets.Second.freqstr SA01" \ + -i "pandas.tseries.offsets.Second.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Second.kwds SA01" \ + -i "pandas.tseries.offsets.Second.n GL08" \ + -i "pandas.tseries.offsets.Second.name SA01" \ + -i "pandas.tseries.offsets.Second.nanos SA01" \ + -i "pandas.tseries.offsets.Second.normalize GL08" \ + -i "pandas.tseries.offsets.Second.rule_code GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin PR02,SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.n GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.name SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd PR02,SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.n GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.name SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.Tick GL08" \ + -i "pandas.tseries.offsets.Tick.copy SA01" \ + -i "pandas.tseries.offsets.Tick.delta GL08" \ + -i "pandas.tseries.offsets.Tick.freqstr SA01" \ + -i "pandas.tseries.offsets.Tick.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Tick.kwds SA01" \ + -i "pandas.tseries.offsets.Tick.n GL08" \ + -i "pandas.tseries.offsets.Tick.name SA01" \ + -i "pandas.tseries.offsets.Tick.nanos SA01" \ + -i "pandas.tseries.offsets.Tick.normalize GL08" \ + -i "pandas.tseries.offsets.Tick.rule_code GL08" \ + -i "pandas.tseries.offsets.Week PR02" \ + -i "pandas.tseries.offsets.Week.copy SA01" \ + -i "pandas.tseries.offsets.Week.freqstr SA01" \ + -i "pandas.tseries.offsets.Week.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Week.kwds SA01" \ + -i "pandas.tseries.offsets.Week.n GL08" \ + -i "pandas.tseries.offsets.Week.name SA01" \ + -i "pandas.tseries.offsets.Week.nanos GL08" \ + -i "pandas.tseries.offsets.Week.normalize GL08" \ + -i "pandas.tseries.offsets.Week.rule_code GL08" \ + -i "pandas.tseries.offsets.Week.weekday GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth PR02,SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.copy SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.freqstr SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.kwds SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.n GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.name SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.nanos GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.normalize GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.rule_code GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.week GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.weekday GL08" \ + -i "pandas.tseries.offsets.YearBegin PR02" \ + -i "pandas.tseries.offsets.YearBegin.copy SA01" \ + -i "pandas.tseries.offsets.YearBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.YearBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.YearBegin.kwds SA01" \ + -i "pandas.tseries.offsets.YearBegin.month GL08" \ + -i "pandas.tseries.offsets.YearBegin.n GL08" \ + -i "pandas.tseries.offsets.YearBegin.name SA01" \ + -i "pandas.tseries.offsets.YearBegin.nanos GL08" \ + -i "pandas.tseries.offsets.YearBegin.normalize GL08" \ + -i "pandas.tseries.offsets.YearBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.YearEnd PR02" \ + -i "pandas.tseries.offsets.YearEnd.copy SA01" \ + -i "pandas.tseries.offsets.YearEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.YearEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.YearEnd.kwds SA01" \ + -i "pandas.tseries.offsets.YearEnd.month GL08" \ + -i "pandas.tseries.offsets.YearEnd.n GL08" \ + -i "pandas.tseries.offsets.YearEnd.name SA01" \ + -i "pandas.tseries.offsets.YearEnd.nanos GL08" \ + -i "pandas.tseries.offsets.YearEnd.normalize GL08" \ + -i "pandas.tseries.offsets.YearEnd.rule_code GL08" \ + -i "pandas.unique PR07" \ + -i "pandas.util.hash_array PR07,SA01" \ + -i "pandas.util.hash_pandas_object PR07,SA01" # There should be no backslash in the final line, please keep this comment in the last ignored function RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 72d5c03ab724f..d2e92bb971888 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -259,7 +259,7 @@ def test_validate_all_ignore_errors(self, monkeypatch): output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors={"*": {"ER03"}}, + ignore_errors={None: {"ER03"}}, ) # two functions * two not ignored errors assert exit_status == 2 * 2 @@ -269,7 +269,7 @@ def test_validate_all_ignore_errors(self, monkeypatch): prefix=None, ignore_deprecated=False, ignore_errors={ - "*": {"ER03"}, + None: {"ER03"}, "pandas.DataFrame.align": {"ER01"}, # ignoring an error that is not requested should be of no effect "pandas.Index.all": {"ER03"} @@ -399,7 +399,7 @@ def test_exit_status_for_main(self, monkeypatch) -> None: prefix=None, output_format="default", ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 3 @@ -429,7 +429,7 @@ def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None: prefix=None, output_format="default", ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 5 @@ -447,7 +447,7 @@ def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch) -> None: output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 0 @@ -471,7 +471,7 @@ def test_exit_status_for_validate_all_json(self, monkeypatch) -> None: output_format="json", prefix=None, ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 0 @@ -515,7 +515,7 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None: output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors={"*": {"ER02", "ER03"}}, + ignore_errors={None: {"ER02", "ER03"}}, ) assert exit_status == 3 @@ -524,6 +524,6 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None: output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors={"*": {"ER01", "ER02"}}, + ignore_errors={None: {"ER01", "ER02"}}, ) assert exit_status == 1 diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 0057f97ffa211..55acfaac4d843 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -365,7 +365,7 @@ def print_validate_all_results( error_messages = dict(res["errors"]) actual_failures = set(error_messages) expected_failures = (ignore_errors.get(func_name, set()) - | ignore_errors.get("*", set())) + | ignore_errors.get(None, set())) for err_code in actual_failures - expected_failures: sys.stdout.write( f'{prefix}{res["file"]}:{res["file_line"]}:' @@ -383,7 +383,8 @@ def print_validate_all_results( return exit_status -def print_validate_one_results(func_name: str) -> int: +def print_validate_one_results(func_name: str, + ignore_errors: dict[str, set[str]]) -> int: def header(title, width=80, char="#") -> str: full_line = char * width side_len = (width - len(title) - 2) // 2 @@ -394,6 +395,9 @@ def header(title, width=80, char="#") -> str: result = pandas_validate(func_name) + result["errors"] = [(code, message) for code, message in result["errors"] + if code not in ignore_errors.get(None, set())] + sys.stderr.write(header(f"Docstring ({func_name})")) sys.stderr.write(f"{result['docstring']}\n") @@ -415,9 +419,13 @@ def header(title, width=80, char="#") -> str: def _format_ignore_errors(raw_ignore_errors): ignore_errors = collections.defaultdict(set) if raw_ignore_errors: - for obj_name, error_codes in raw_ignore_errors: + for error_codes in raw_ignore_errors: + obj_name = None + if " " in error_codes: + obj_name, error_codes = error_codes.split(" ") + # function errors "pandas.Series PR01,SA01" - if obj_name != "*": + if obj_name: if obj_name in ignore_errors: raise ValueError( f"Object `{obj_name}` is present in more than one " @@ -433,7 +441,7 @@ def _format_ignore_errors(raw_ignore_errors): # global errors "PR02,ES01" else: - ignore_errors["*"].update(set(error_codes.split(","))) + ignore_errors[None].update(set(error_codes.split(","))) unknown_errors = ignore_errors["*"] - ALL_ERRORS if unknown_errors: @@ -462,7 +470,7 @@ def main( ignore_errors ) else: - return print_validate_one_results(func_name) + return print_validate_one_results(func_name, ignore_errors) if __name__ == "__main__": @@ -505,11 +513,10 @@ def main( "-i", default=None, action="append", - nargs=2, - metavar=("function", "error_codes"), - help="function for which comma separated list " - "of error codes should not be validated" - "(e.g. pandas.DataFrame.head PR01,SA01). " + help="comma-separated list of error codes " + "(e.g. 'PR02,SA01'), with optional object path " + "to ignore errors for a single object " + "(e.g. pandas.DataFrame.head PR02,SA01). " "Partial validation for more than one function" "can be achieved by repeating this parameter.", ) From b9be19b233cdc84f811545a43745c36b40c2c890 Mon Sep 17 00:00:00 2001 From: Trinh Quoc Anh Date: Tue, 19 Mar 2024 20:51:22 +0100 Subject: [PATCH 08/23] DOC: Fix F821 error in docstring (#57863) Fix F821 error in docstring Co-authored-by: Marc Garcia --- pandas/tests/io/xml/conftest.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pandas/tests/io/xml/conftest.py b/pandas/tests/io/xml/conftest.py index 40a94f27e98a9..8900b0dc46754 100644 --- a/pandas/tests/io/xml/conftest.py +++ b/pandas/tests/io/xml/conftest.py @@ -11,7 +11,7 @@ def xml_data_path(): Examples -------- >>> def test_read_xml(xml_data_path): - ... read_xml(xml_data_path / "file.xsl") + ... pd.read_xml(xml_data_path / "file.xsl") """ return Path(__file__).parent.parent / "data" / "xml" @@ -24,7 +24,7 @@ def xml_books(xml_data_path, datapath): Examples -------- >>> def test_read_xml(xml_books): - ... read_xml(xml_books) + ... pd.read_xml(xml_books) """ return datapath(xml_data_path / "books.xml") @@ -37,7 +37,7 @@ def xml_doc_ch_utf(xml_data_path, datapath): Examples -------- >>> def test_read_xml(xml_doc_ch_utf): - ... read_xml(xml_doc_ch_utf) + ... pd.read_xml(xml_doc_ch_utf) """ return datapath(xml_data_path / "doc_ch_utf.xml") @@ -50,7 +50,7 @@ def xml_baby_names(xml_data_path, datapath): Examples -------- >>> def test_read_xml(xml_baby_names): - ... read_xml(xml_baby_names) + ... pd.read_xml(xml_baby_names) """ return datapath(xml_data_path / "baby_names.xml") @@ -63,7 +63,7 @@ def kml_cta_rail_lines(xml_data_path, datapath): Examples -------- >>> def test_read_xml(kml_cta_rail_lines): - ... read_xml( + ... pd.read_xml( ... kml_cta_rail_lines, ... xpath=".//k:Placemark", ... namespaces={"k": "http://www.opengis.net/kml/2.2"}, @@ -80,7 +80,7 @@ def xsl_flatten_doc(xml_data_path, datapath): Examples -------- - >>> def test_read_xsl(xsl_flatten_doc): + >>> def test_read_xsl(xsl_flatten_doc, mode): ... with open( ... xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None ... ) as f: @@ -96,7 +96,7 @@ def xsl_row_field_output(xml_data_path, datapath): Examples -------- - >>> def test_read_xsl(xsl_row_field_output): + >>> def test_read_xsl(xsl_row_field_output, mode): ... with open( ... xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None ... ) as f: From 38086f11c2244eef2f135bf17fb0a00afb88b177 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:01:31 -1000 Subject: [PATCH 09/23] PERF: Allow ensure_index_from_sequence to return RangeIndex (#57786) --- pandas/core/indexes/base.py | 44 ++++++++++++++++++++++++++++--- pandas/core/indexes/range.py | 24 +++-------------- pandas/tests/indexes/test_base.py | 8 +++--- 3 files changed, 50 insertions(+), 26 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c2df773326dc9..3c01778e05f3d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -7154,6 +7154,43 @@ def shape(self) -> Shape: return (len(self),) +def maybe_sequence_to_range(sequence) -> Any | range: + """ + Convert a 1D, non-pandas sequence to a range if possible. + + Returns the input if not possible. + + Parameters + ---------- + sequence : 1D sequence + names : sequence of str + + Returns + ------- + Any : input or range + """ + if isinstance(sequence, (ABCSeries, Index)): + return sequence + np_sequence = np.asarray(sequence) + if np_sequence.dtype.kind != "i" or len(np_sequence) == 1: + return sequence + elif len(np_sequence) == 0: + return range(0) + diff = np_sequence[1] - np_sequence[0] + if diff == 0: + return sequence + elif len(np_sequence) == 2: + return range(np_sequence[0], np_sequence[1] + diff, diff) + maybe_range_indexer, remainder = np.divmod(np_sequence - np_sequence[0], diff) + if ( + lib.is_range_indexer(maybe_range_indexer, len(maybe_range_indexer)) + and not remainder.any() + ): + return range(np_sequence[0], np_sequence[-1] + diff, diff) + else: + return sequence + + def ensure_index_from_sequences(sequences, names=None) -> Index: """ Construct an index from sequences of data. @@ -7172,8 +7209,8 @@ def ensure_index_from_sequences(sequences, names=None) -> Index: Examples -------- - >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"]) - Index([1, 2, 3], dtype='int64', name='name') + >>> ensure_index_from_sequences([[1, 2, 4]], names=["name"]) + Index([1, 2, 4], dtype='int64', name='name') >>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"]) MultiIndex([('a', 'a'), @@ -7189,8 +7226,9 @@ def ensure_index_from_sequences(sequences, names=None) -> Index: if len(sequences) == 1: if names is not None: names = names[0] - return Index(sequences[0], name=names) + return Index(maybe_sequence_to_range(sequences[0]), name=names) else: + # TODO: Apply maybe_sequence_to_range to sequences? return MultiIndex.from_arrays(sequences, names=names) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 82bf8d7c70c7e..c573828a22032 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -29,7 +29,6 @@ doc, ) -from pandas.core.dtypes import missing from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.common import ( ensure_platform_int, @@ -475,28 +474,13 @@ def _shallow_copy(self, values, name: Hashable = no_default): if values.dtype.kind == "i" and values.ndim == 1: # GH 46675 & 43885: If values is equally spaced, return a # more memory-compact RangeIndex instead of Index with 64-bit dtype - if len(values) == 0: - return type(self)._simple_new(_empty_range, name=name) - elif len(values) == 1: + if len(values) == 1: start = values[0] new_range = range(start, start + self.step, self.step) return type(self)._simple_new(new_range, name=name) - diff = values[1] - values[0] - if not missing.isna(diff) and diff != 0: - if len(values) == 2: - # Can skip is_range_indexer check - new_range = range(values[0], values[-1] + diff, diff) - return type(self)._simple_new(new_range, name=name) - else: - maybe_range_indexer, remainder = np.divmod(values - values[0], diff) - if ( - lib.is_range_indexer( - maybe_range_indexer, len(maybe_range_indexer) - ) - and not remainder.any() - ): - new_range = range(values[0], values[-1] + diff, diff) - return type(self)._simple_new(new_range, name=name) + maybe_range = ibase.maybe_sequence_to_range(values) + if isinstance(maybe_range, range): + return type(self)._simple_new(maybe_range, name=name) return self._constructor._simple_new(values, name=name) def _view(self) -> Self: diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 4c703c3af944b..beee14197bfb8 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1514,8 +1514,10 @@ class TestIndexUtils: @pytest.mark.parametrize( "data, names, expected", [ - ([[1, 2, 3]], None, Index([1, 2, 3])), - ([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")), + ([[1, 2, 4]], None, Index([1, 2, 4])), + ([[1, 2, 4]], ["name"], Index([1, 2, 4], name="name")), + ([[1, 2, 3]], None, RangeIndex(1, 4)), + ([[1, 2, 3]], ["name"], RangeIndex(1, 4, name="name")), ( [["a", "a"], ["c", "d"]], None, @@ -1530,7 +1532,7 @@ class TestIndexUtils: ) def test_ensure_index_from_sequences(self, data, names, expected): result = ensure_index_from_sequences(data, names) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact=True) def test_ensure_index_mixed_closed_intervals(self): # GH27172 From 495f80896852e450badc8866ee7ebe8c434fa228 Mon Sep 17 00:00:00 2001 From: Trinh Quoc Anh Date: Tue, 19 Mar 2024 23:20:15 +0100 Subject: [PATCH 10/23] STYLE: Detect unnecessary pylint ignore (#57918) --- .pre-commit-config.yaml | 2 +- pandas/core/groupby/indexing.py | 1 - pandas/core/tools/datetimes.py | 1 - pandas/tests/io/formats/test_to_latex.py | 1 - pandas/tests/series/test_iteration.py | 2 -- 5 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 190ea32203807..41f1c4c6892a3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -78,7 +78,7 @@ repos: hooks: - id: pylint stages: [manual] - args: [--load-plugins=pylint.extensions.redefined_loop_name] + args: [--load-plugins=pylint.extensions.redefined_loop_name, --fail-on=I0021] - id: pylint alias: redefined-outer-name name: Redefining name from outer scope diff --git a/pandas/core/groupby/indexing.py b/pandas/core/groupby/indexing.py index 75c0a062b57d0..c658f625d5ea9 100644 --- a/pandas/core/groupby/indexing.py +++ b/pandas/core/groupby/indexing.py @@ -114,7 +114,6 @@ def _positional_selector(self) -> GroupByPositionalSelector: 4 b 5 """ if TYPE_CHECKING: - # pylint: disable-next=used-before-assignment groupby_self = cast(groupby.GroupBy, self) else: groupby_self = self diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index b8b1d39d4eb20..2aeb1aff07a54 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -993,7 +993,6 @@ def to_datetime( errors=errors, exact=exact, ) - # pylint: disable-next=used-before-assignment result: Timestamp | NaTType | Series | Index if isinstance(arg, Timestamp): diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index b9d5f04cb203b..1de53993fe646 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -1311,7 +1311,6 @@ def test_to_latex_multiindex_names(self, name0, name1, axes): ) col_names = [n if (bool(n) and 1 in axes) else "" for n in names] observed = df.to_latex(multirow=False) - # pylint: disable-next=consider-using-f-string expected = r"""\begin{tabular}{llrrrr} \toprule & %s & \multicolumn{2}{r}{1} & \multicolumn{2}{r}{2} \\ diff --git a/pandas/tests/series/test_iteration.py b/pandas/tests/series/test_iteration.py index edc82455234bb..1e0fa7fae107e 100644 --- a/pandas/tests/series/test_iteration.py +++ b/pandas/tests/series/test_iteration.py @@ -4,12 +4,10 @@ def test_keys(self, datetime_series): def test_iter_datetimes(self, datetime_series): for i, val in enumerate(datetime_series): - # pylint: disable-next=unnecessary-list-index-lookup assert val == datetime_series.iloc[i] def test_iter_strings(self, string_series): for i, val in enumerate(string_series): - # pylint: disable-next=unnecessary-list-index-lookup assert val == string_series.iloc[i] def test_iteritems_datetimes(self, datetime_series): From c12f978339edada1aab07adbac0a07896afd7e3e Mon Sep 17 00:00:00 2001 From: William Ayd Date: Tue, 19 Mar 2024 20:22:28 -0400 Subject: [PATCH 11/23] Remove Cython warnings (#57919) Remove Cython warning --- pandas/_libs/tslibs/util.pxd | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index e4ac3a9e167a3..a5822e57d3fa6 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -185,11 +185,11 @@ cdef inline const char* get_c_string(str py_string) except NULL: return get_c_string_buf_and_size(py_string, NULL) -cdef inline bytes string_encode_locale(str py_string) noexcept: +cdef inline bytes string_encode_locale(str py_string): """As opposed to PyUnicode_Encode, use current system locale to encode.""" return PyUnicode_EncodeLocale(py_string, NULL) -cdef inline object char_to_string_locale(const char* data) noexcept: +cdef inline object char_to_string_locale(const char* data): """As opposed to PyUnicode_FromString, use current system locale to decode.""" return PyUnicode_DecodeLocale(data, NULL) From 716b047a4b750f818c758a56dde0fcffdc6994e9 Mon Sep 17 00:00:00 2001 From: Trinh Quoc Anh Date: Wed, 20 Mar 2024 01:23:13 +0100 Subject: [PATCH 12/23] CLN: Remove unused code (#57920) --- pandas/conftest.py | 4 ---- pandas/core/base.py | 6 ------ pandas/core/dtypes/astype.py | 2 -- 3 files changed, 12 deletions(-) diff --git a/pandas/conftest.py b/pandas/conftest.py index 9302c581fd497..50a94b35c2edc 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1231,10 +1231,6 @@ def tz_aware_fixture(request): return request.param -# Generate cartesian product of tz_aware_fixture: -tz_aware_fixture2 = tz_aware_fixture - - _UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc] if zoneinfo is not None: _UTCS.append(zoneinfo.ZoneInfo("UTC")) diff --git a/pandas/core/base.py b/pandas/core/base.py index 33b37319675ae..987136ffdff7d 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -87,12 +87,6 @@ _shared_docs: dict[str, str] = {} -_indexops_doc_kwargs = { - "klass": "IndexOpsMixin", - "inplace": "", - "unique": "IndexOpsMixin", - "duplicated": "IndexOpsMixin", -} class PandasObject(DirNamesMixin): diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index d351d13fdfeb6..086f7d2da6640 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -37,8 +37,6 @@ from pandas.core.arrays import ExtensionArray -_dtype_obj = np.dtype(object) - @overload def _astype_nansafe( From 924f246753773b25bfc628eb469573e008cda8ec Mon Sep 17 00:00:00 2001 From: Nick Crews Date: Tue, 19 Mar 2024 16:23:31 -0800 Subject: [PATCH 13/23] BUG: pretty print all Mappings, not just dicts (#57915) This was discovered in https://github.com/ibis-project/ibis/pull/8693 --- doc/source/whatsnew/v3.0.0.rst | 1 + pandas/io/formats/printing.py | 10 +++++----- pandas/tests/io/formats/test_printing.py | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index cb211b0b72dce..f3fcdcdb79ed6 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -357,6 +357,7 @@ MultiIndex I/O ^^^ - Bug in :meth:`DataFrame.to_excel` when writing empty :class:`DataFrame` with :class:`MultiIndex` on both axes (:issue:`57696`) +- Now all ``Mapping`` s are pretty printed correctly. Before only literal ``dict`` s were. (:issue:`57915`) - - diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 214d1d7079fdb..0bd4f2935f4d0 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -187,8 +187,8 @@ def pprint_thing( _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. - escape_chars : list or dict, optional - Characters to escape. If a dict is passed the values are the + escape_chars : list[str] or Mapping[str, str], optional + Characters to escape. If a Mapping is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults @@ -204,11 +204,11 @@ def as_escaped_string( thing: Any, escape_chars: EscapeChars | None = escape_chars ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} - if isinstance(escape_chars, dict): + if isinstance(escape_chars, Mapping): if default_escapes: translate.update(escape_chars) else: - translate = escape_chars + translate = escape_chars # type: ignore[assignment] escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or () @@ -220,7 +220,7 @@ def as_escaped_string( if hasattr(thing, "__next__"): return str(thing) - elif isinstance(thing, dict) and _nest_lvl < get_option( + elif isinstance(thing, Mapping) and _nest_lvl < get_option( "display.pprint_nest_depth" ): result = _pprint_dict( diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index acf2bc72c687d..1009dfec53218 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -1,5 +1,6 @@ # Note! This file is aimed specifically at pandas.io.formats.printing utility # functions, not the general printing of pandas objects. +from collections.abc import Mapping import string import pandas._config.config as cf @@ -16,6 +17,17 @@ def test_adjoin(): assert adjoined == expected +class MyMapping(Mapping): + def __getitem__(self, key): + return 4 + + def __iter__(self): + return iter(["a", "b"]) + + def __len__(self): + return 2 + + class TestPPrintThing: def test_repr_binary_type(self): letters = string.ascii_letters @@ -42,6 +54,12 @@ def test_repr_obeys_max_seq_limit(self): def test_repr_set(self): assert printing.pprint_thing({1}) == "{1}" + def test_repr_dict(self): + assert printing.pprint_thing({"a": 4, "b": 4}) == "{'a': 4, 'b': 4}" + + def test_repr_mapping(self): + assert printing.pprint_thing(MyMapping()) == "{'a': 4, 'b': 4}" + class TestFormatBase: def test_adjoin(self): From bdc1485ddce07d6e01a6f146388a76c6f8e81b06 Mon Sep 17 00:00:00 2001 From: Paul <53956863+hutch3232@users.noreply.github.com> Date: Wed, 20 Mar 2024 12:32:14 -0400 Subject: [PATCH 14/23] DOC: fix minor typos and grammar missing_data.rst (#57929) fix minor typos and grammar missing_data.rst --- doc/source/user_guide/missing_data.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index aea7688c062b8..2e104ac06f9f4 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -88,7 +88,7 @@ To detect these missing value, use the :func:`isna` or :func:`notna` methods. .. warning:: - Experimental: the behaviour of :class:`NA`` can still change without warning. + Experimental: the behaviour of :class:`NA` can still change without warning. Starting from pandas 1.0, an experimental :class:`NA` value (singleton) is available to represent scalar missing values. The goal of :class:`NA` is provide a @@ -105,7 +105,7 @@ dtype, it will use :class:`NA`: s[2] s[2] is pd.NA -Currently, pandas does not yet use those data types using :class:`NA` by default +Currently, pandas does not use those data types using :class:`NA` by default in a :class:`DataFrame` or :class:`Series`, so you need to specify the dtype explicitly. An easy way to convert to those dtypes is explained in the :ref:`conversion section `. @@ -253,8 +253,8 @@ Conversion ^^^^^^^^^^ If you have a :class:`DataFrame` or :class:`Series` using ``np.nan``, -:meth:`Series.convert_dtypes` and :meth:`DataFrame.convert_dtypes` -in :class:`DataFrame` that can convert data to use the data types that use :class:`NA` +:meth:`DataFrame.convert_dtypes` and :meth:`Series.convert_dtypes`, respectively, +will convert your data to use the nullable data types supporting :class:`NA`, such as :class:`Int64Dtype` or :class:`ArrowDtype`. This is especially helpful after reading in data sets from IO methods where data types were inferred. From faa4c0400d5181ffb2fb8351743ca5eb36e436cf Mon Sep 17 00:00:00 2001 From: Stefano Silvestri Date: Wed, 20 Mar 2024 17:39:36 +0100 Subject: [PATCH 15/23] DOC: Getting started tutorials css adjustments (#57916) fixes #57912 --- doc/source/_static/css/getting_started.css | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/_static/css/getting_started.css b/doc/source/_static/css/getting_started.css index 0d53bbde94ae3..b02311eb66080 100644 --- a/doc/source/_static/css/getting_started.css +++ b/doc/source/_static/css/getting_started.css @@ -248,6 +248,7 @@ ul.task-bullet > li > p:first-child { } .tutorial-card .card-header { + --bs-card-cap-color: var(--pst-color-text-base); cursor: pointer; background-color: var(--pst-color-surface); border: 1px solid var(--pst-color-border) @@ -269,7 +270,7 @@ ul.task-bullet > li > p:first-child { .tutorial-card .gs-badge-link a { - color: var(--pst-color-text-base); + color: var(--pst-color-primary-text); text-decoration: none; } From d23f95fda6c3067e61d844aca09aefbde2bdb51e Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 20 Mar 2024 06:41:53 -1000 Subject: [PATCH 16/23] REF: Avoid new object creation when reverse slicing when possible (#57902) * REF: Avoid new objects when reverse slicing when possible * Adjust test * Remove astypes * Fix typing --- pandas/core/arrays/datetimelike.py | 9 ++-- pandas/core/indexes/base.py | 2 +- pandas/core/indexes/multi.py | 2 +- pandas/core/indexes/range.py | 54 +++++++++++----------- pandas/core/indexing.py | 2 +- pandas/core/internals/managers.py | 6 +-- pandas/core/reshape/reshape.py | 5 +- pandas/core/series.py | 4 +- pandas/core/sorting.py | 11 +++-- pandas/tests/indexes/ranges/test_range.py | 11 +++-- pandas/tests/indexes/ranges/test_setops.py | 6 +-- 11 files changed, 59 insertions(+), 53 deletions(-) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ba2c936b75d9e..745774b34a3ad 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -2371,11 +2371,12 @@ def factorize( ): if self.freq is not None: # We must be unique, so can short-circuit (and retain freq) - codes = np.arange(len(self), dtype=np.intp) - uniques = self.copy() # TODO: copy or view? if sort and self.freq.n < 0: - codes = codes[::-1] - uniques = uniques[::-1] + codes = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + uniques = self[::-1] + else: + codes = np.arange(len(self), dtype=np.intp) + uniques = self.copy() # TODO: copy or view? return codes, uniques if sort: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3c01778e05f3d..62facb89a2f16 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2116,7 +2116,7 @@ def droplevel(self, level: IndexLabel = 0): if not isinstance(level, (tuple, list)): level = [level] - levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] + levnums = sorted((self._get_level_number(lev) for lev in level), reverse=True) return self._drop_level_numbers(levnums) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 2cb05dadd5981..2e554bc848ffe 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3589,7 +3589,7 @@ def _reorder_indexer( new_order = key_order_map[self.codes[i][indexer]] elif isinstance(k, slice) and k.step is not None and k.step < 0: # flip order for negative step - new_order = np.arange(n)[::-1][indexer] + new_order = np.arange(n - 1, -1, -1)[indexer] elif isinstance(k, slice) and k.start is None and k.stop is None: # slice(None) should not determine order GH#31330 new_order = np.ones((n,), dtype=np.intp)[indexer] diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index c573828a22032..0ba3c22093c69 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -64,6 +64,12 @@ _dtype_int64 = np.dtype(np.int64) +def min_fitting_element(start: int, step: int, lower_limit: int) -> int: + """Returns the smallest element greater than or equal to the limit""" + no_steps = -(-(lower_limit - start) // abs(step)) + return start + abs(step) * no_steps + + class RangeIndex(Index): """ Immutable Index implementing a monotonic integer range. @@ -570,25 +576,30 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant nv.validate_argsort(args, kwargs) + start, stop, step = None, None, None if self._range.step > 0: - result = np.arange(len(self), dtype=np.intp) + if ascending: + start = len(self) + else: + start, stop, step = len(self) - 1, -1, -1 + elif ascending: + start, stop, step = len(self) - 1, -1, -1 else: - result = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + start = len(self) - if not ascending: - result = result[::-1] - return result + return np.arange(start, stop, step, dtype=np.intp) def factorize( self, sort: bool = False, use_na_sentinel: bool = True, ) -> tuple[npt.NDArray[np.intp], RangeIndex]: - codes = np.arange(len(self), dtype=np.intp) - uniques = self if sort and self.step < 0: - codes = codes[::-1] - uniques = uniques[::-1] + codes = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + uniques = self[::-1] + else: + codes = np.arange(len(self), dtype=np.intp) + uniques = self return codes, uniques def equals(self, other: object) -> bool: @@ -699,26 +710,15 @@ def _intersection(self, other: Index, sort: bool = False): # intersection disregarding the lower bounds tmp_start = first.start + (second.start - first.start) * first.step // gcd * s new_step = first.step * second.step // gcd - new_range = range(tmp_start, int_high, new_step) - new_index = self._simple_new(new_range) # adjust index to limiting interval - new_start = new_index._min_fitting_element(int_low) - new_range = range(new_start, new_index.stop, new_index.step) - new_index = self._simple_new(new_range) + new_start = min_fitting_element(tmp_start, new_step, int_low) + new_range = range(new_start, int_high, new_step) - if (self.step < 0 and other.step < 0) is not (new_index.step < 0): - new_index = new_index[::-1] + if (self.step < 0 and other.step < 0) is not (new_range.step < 0): + new_range = new_range[::-1] - if sort is None: - new_index = new_index.sort_values() - - return new_index - - def _min_fitting_element(self, lower_limit: int) -> int: - """Returns the smallest element greater than or equal to the limit""" - no_steps = -(-(lower_limit - self.start) // abs(self.step)) - return self.start + abs(self.step) * no_steps + return self._simple_new(new_range) def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: """ @@ -904,9 +904,9 @@ def _difference(self, other, sort=None): # e.g. range(10) and range(0, 10, 3) return super()._difference(other, sort=sort) - new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: - new_index = new_index[::-1] + new_rng = new_rng[::-1] + new_index = type(self)._simple_new(new_rng, name=res_name) return new_index diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c7a938dbc4449..c8a2e11dce3d7 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1145,7 +1145,7 @@ def _contains_slice(x: object) -> bool: # GH#41369 Loop in reverse order ensures indexing along columns before rows # which selects only necessary blocks which avoids dtype conversion if possible axis = len(tup) - 1 - for key in tup[::-1]: + for key in reversed(tup): if com.is_null_slice(key): axis -= 1 continue diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index d920ebc60de8c..af851e1fc8224 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1549,9 +1549,9 @@ def _insert_update_blklocs_and_blknos(self, loc) -> None: self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: - # np.append is a lot faster, let's use it if we can. - self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] - self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] + # As of numpy 1.26.4, np.concatenate faster than np.append + self._blklocs = np.concatenate([[0], self._blklocs]) + self._blknos = np.concatenate([[len(self.blocks)], self._blknos]) else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index b28010c13d6dd..ff358e8ba346c 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -910,9 +910,10 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: raise ValueError("Columns with duplicate values are not supported in stack") # If we need to drop `level` from columns, it needs to be in descending order + set_levels = set(level) drop_levnums = sorted(level, reverse=True) stack_cols = frame.columns._drop_level_numbers( - [k for k in range(frame.columns.nlevels) if k not in level][::-1] + [k for k in range(frame.columns.nlevels - 1, -1, -1) if k not in set_levels] ) if len(level) > 1: # Arrange columns in the order we want to take them, e.g. level=[2, 0, 1] @@ -936,7 +937,7 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: idx = (idx,) gen = iter(idx) column_indexer = tuple( - next(gen) if k in level else slice(None) + next(gen) if k in set_levels else slice(None) for k in range(frame.columns.nlevels) ) data = frame.loc[:, column_indexer] diff --git a/pandas/core/series.py b/pandas/core/series.py index 8a7c1531205e0..08e56cb4925b3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5510,9 +5510,9 @@ def case_when( replacements = updated_replacements default = default.astype(common_dtype) - counter = reversed(range(len(conditions))) + counter = range(len(conditions) - 1, -1, -1) for position, condition, replacement in zip( - counter, conditions[::-1], replacements[::-1] + counter, reversed(conditions), reversed(replacements) ): try: default = default.mask( diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 4774b013fc428..493e856c6dcc6 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -2,6 +2,7 @@ from __future__ import annotations +import itertools from typing import ( TYPE_CHECKING, Callable, @@ -334,13 +335,15 @@ def lexsort_indexer( raise ValueError(f"invalid na_position: {na_position}") if isinstance(orders, bool): - orders = [orders] * len(keys) + orders = itertools.repeat(orders, len(keys)) elif orders is None: - orders = [True] * len(keys) + orders = itertools.repeat(True, len(keys)) + else: + orders = reversed(orders) labels = [] - for k, order in zip(keys, orders): + for k, order in zip(reversed(keys), orders): k = ensure_key_mapped(k, key) if codes_given: codes = cast(np.ndarray, k) @@ -361,7 +364,7 @@ def lexsort_indexer( labels.append(codes) - return np.lexsort(labels[::-1]) + return np.lexsort(labels) def nargsort( diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 72762db21b0c5..c9ddbf4464b29 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -9,6 +9,7 @@ RangeIndex, ) import pandas._testing as tm +from pandas.core.indexes.range import min_fitting_element class TestRangeIndex: @@ -419,21 +420,21 @@ def test_extended_gcd(self, simple_index): assert 2 == result[0] def test_min_fitting_element(self): - result = RangeIndex(0, 20, 2)._min_fitting_element(1) + result = min_fitting_element(0, 2, 1) assert 2 == result - result = RangeIndex(1, 6)._min_fitting_element(1) + result = min_fitting_element(1, 1, 1) assert 1 == result - result = RangeIndex(18, -2, -2)._min_fitting_element(1) + result = min_fitting_element(18, -2, 1) assert 2 == result - result = RangeIndex(5, 0, -1)._min_fitting_element(1) + result = min_fitting_element(5, -1, 1) assert 1 == result big_num = 500000000000000000000000 - result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num) + result = min_fitting_element(5, 1, big_num) assert big_num == result def test_slice_specialised(self, simple_index): diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index d417b8b743dc5..ac24ff828cb8f 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -93,12 +93,12 @@ def test_intersection(self, sort): # GH 17296: intersect two decreasing RangeIndexes first = RangeIndex(10, -2, -2) other = RangeIndex(5, -4, -1) - expected = first.astype(int).intersection(other.astype(int), sort=sort) - result = first.intersection(other, sort=sort).astype(int) + expected = RangeIndex(start=4, stop=-2, step=-2) + result = first.intersection(other, sort=sort) tm.assert_index_equal(result, expected) # reversed - result = other.intersection(first, sort=sort).astype(int) + result = other.intersection(first, sort=sort) tm.assert_index_equal(result, expected) index = RangeIndex(5, name="foo") From eb55bca5bf91541a5c4f6213b18824589415127b Mon Sep 17 00:00:00 2001 From: William Ayd Date: Wed, 20 Mar 2024 12:58:27 -0400 Subject: [PATCH 17/23] Refactored pandas_timedelta_to_timedeltastruct (#55999) * Refactored pandas_timedelta_to_timedeltastruct * use generic macros * Revert "use generic macros" This reverts commit 29d115d16543c14d47cb4773a7c2bc8c62614d76. * fix sign issue * wextra fixes * more wextra * remove extraneous parantheses --- .../src/vendored/numpy/datetime/np_datetime.c | 374 ++++-------------- 1 file changed, 70 insertions(+), 304 deletions(-) diff --git a/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c b/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c index 934c54fafb634..f854f7b9210d8 100644 --- a/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c +++ b/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c @@ -710,355 +710,121 @@ void pandas_datetime_to_datetimestruct(npy_datetime dt, NPY_DATETIMEUNIT base, void pandas_timedelta_to_timedeltastruct(npy_timedelta td, NPY_DATETIMEUNIT base, pandas_timedeltastruct *out) { - npy_int64 frac; - npy_int64 sfrac; - npy_int64 ifrac; - int sign; - npy_int64 per_day; - npy_int64 per_sec; - /* Initialize the output to all zeros */ memset(out, 0, sizeof(pandas_timedeltastruct)); - switch (base) { - case NPY_FR_ns: - - per_day = 86400000000000LL; - per_sec = 1000LL * 1000LL * 1000LL; - - // put frac in seconds - if (td < 0 && td % per_sec != 0) - frac = td / per_sec - 1; - else - frac = td / per_sec; - - if (frac < 0) { - sign = -1; - - // even fraction - if ((-frac % 86400LL) != 0) { - out->days = -frac / 86400LL + 1; - frac += 86400LL * out->days; - } else { - frac = -frac; - } - } else { - sign = 1; - out->days = 0; - } - - if (frac >= 86400) { - out->days += frac / 86400LL; - frac -= out->days * 86400LL; - } - - if (frac >= 3600) { - out->hrs = (npy_int32)(frac / 3600LL); - frac -= out->hrs * 3600LL; - } else { - out->hrs = 0; - } - - if (frac >= 60) { - out->min = (npy_int32)(frac / 60LL); - frac -= out->min * 60LL; - } else { - out->min = 0; - } - - if (frac >= 0) { - out->sec = (npy_int32)frac; - frac -= out->sec; - } else { - out->sec = 0; - } + const npy_int64 sec_per_hour = 3600; + const npy_int64 sec_per_min = 60; - sfrac = (out->hrs * 3600LL + out->min * 60LL + out->sec) * per_sec; - - if (sign < 0) - out->days = -out->days; - - ifrac = td - (out->days * per_day + sfrac); - - if (ifrac != 0) { - out->ms = (npy_int32)(ifrac / (1000LL * 1000LL)); - ifrac -= out->ms * 1000LL * 1000LL; - out->us = (npy_int32)(ifrac / 1000LL); - ifrac -= out->us * 1000LL; - out->ns = (npy_int32)ifrac; - } else { - out->ms = 0; - out->us = 0; - out->ns = 0; - } + switch (base) { + case NPY_FR_W: + out->days = 7 * td; break; - - case NPY_FR_us: - - per_day = 86400000000LL; - per_sec = 1000LL * 1000LL; - - // put frac in seconds - if (td < 0 && td % per_sec != 0) - frac = td / per_sec - 1; - else - frac = td / per_sec; - - if (frac < 0) { - sign = -1; - - // even fraction - if ((-frac % 86400LL) != 0) { - out->days = -frac / 86400LL + 1; - frac += 86400LL * out->days; - } else { - frac = -frac; - } - } else { - sign = 1; - out->days = 0; - } - - if (frac >= 86400) { - out->days += frac / 86400LL; - frac -= out->days * 86400LL; - } - - if (frac >= 3600) { - out->hrs = (npy_int32)(frac / 3600LL); - frac -= out->hrs * 3600LL; - } else { - out->hrs = 0; - } - - if (frac >= 60) { - out->min = (npy_int32)(frac / 60LL); - frac -= out->min * 60LL; - } else { - out->min = 0; - } - - if (frac >= 0) { - out->sec = (npy_int32)frac; - frac -= out->sec; - } else { - out->sec = 0; - } - - sfrac = (out->hrs * 3600LL + out->min * 60LL + out->sec) * per_sec; - - if (sign < 0) - out->days = -out->days; - - ifrac = td - (out->days * per_day + sfrac); - - if (ifrac != 0) { - out->ms = (npy_int32)(ifrac / 1000LL); - ifrac -= out->ms * 1000LL; - out->us = (npy_int32)(ifrac / 1L); - ifrac -= out->us * 1L; - out->ns = (npy_int32)ifrac; - } else { - out->ms = 0; - out->us = 0; - out->ns = 0; - } + case NPY_FR_D: + out->days = td; break; - + case NPY_FR_h: + out->days = td / 24LL; + td -= out->days * 24LL; + out->hrs = (npy_int32)td; + break; + case NPY_FR_m: + out->days = td / 1440LL; + td -= out->days * 1440LL; + out->hrs = (npy_int32)(td / 60LL); + td -= out->hrs * 60LL; + out->min = (npy_int32)td; + break; + case NPY_FR_s: case NPY_FR_ms: - - per_day = 86400000LL; - per_sec = 1000LL; - - // put frac in seconds - if (td < 0 && td % per_sec != 0) - frac = td / per_sec - 1; - else - frac = td / per_sec; - - if (frac < 0) { - sign = -1; - - // even fraction - if ((-frac % 86400LL) != 0) { - out->days = -frac / 86400LL + 1; - frac += 86400LL * out->days; - } else { - frac = -frac; - } - } else { - sign = 1; - out->days = 0; - } - - if (frac >= 86400) { - out->days += frac / 86400LL; - frac -= out->days * 86400LL; - } - - if (frac >= 3600) { - out->hrs = (npy_int32)(frac / 3600LL); - frac -= out->hrs * 3600LL; - } else { - out->hrs = 0; - } - - if (frac >= 60) { - out->min = (npy_int32)(frac / 60LL); - frac -= out->min * 60LL; - } else { - out->min = 0; - } - - if (frac >= 0) { - out->sec = (npy_int32)frac; - frac -= out->sec; - } else { - out->sec = 0; - } - - sfrac = (out->hrs * 3600LL + out->min * 60LL + out->sec) * per_sec; - - if (sign < 0) - out->days = -out->days; - - ifrac = td - (out->days * per_day + sfrac); - - if (ifrac != 0) { - out->ms = (npy_int32)ifrac; - out->us = 0; - out->ns = 0; + case NPY_FR_us: + case NPY_FR_ns: { + const npy_int64 sec_per_day = 86400; + npy_int64 per_sec; + if (base == NPY_FR_s) { + per_sec = 1; + } else if (base == NPY_FR_ms) { + per_sec = 1000; + } else if (base == NPY_FR_us) { + per_sec = 1000000; } else { - out->ms = 0; - out->us = 0; - out->ns = 0; + per_sec = 1000000000; } - break; - - case NPY_FR_s: - // special case where we can simplify many expressions bc per_sec=1 - - per_day = 86400LL; - per_sec = 1L; + const npy_int64 per_day = sec_per_day * per_sec; + npy_int64 frac; // put frac in seconds if (td < 0 && td % per_sec != 0) frac = td / per_sec - 1; else frac = td / per_sec; + const int sign = frac < 0 ? -1 : 1; if (frac < 0) { - sign = -1; - // even fraction - if ((-frac % 86400LL) != 0) { - out->days = -frac / 86400LL + 1; - frac += 86400LL * out->days; + if ((-frac % sec_per_day) != 0) { + out->days = -frac / sec_per_day + 1; + frac += sec_per_day * out->days; } else { frac = -frac; } - } else { - sign = 1; - out->days = 0; } - if (frac >= 86400) { - out->days += frac / 86400LL; - frac -= out->days * 86400LL; + if (frac >= sec_per_day) { + out->days += frac / sec_per_day; + frac -= out->days * sec_per_day; } - if (frac >= 3600) { - out->hrs = (npy_int32)(frac / 3600LL); - frac -= out->hrs * 3600LL; - } else { - out->hrs = 0; + if (frac >= sec_per_hour) { + out->hrs = (npy_int32)(frac / sec_per_hour); + frac -= out->hrs * sec_per_hour; } - if (frac >= 60) { - out->min = (npy_int32)(frac / 60LL); - frac -= out->min * 60LL; - } else { - out->min = 0; + if (frac >= sec_per_min) { + out->min = (npy_int32)(frac / sec_per_min); + frac -= out->min * sec_per_min; } if (frac >= 0) { out->sec = (npy_int32)frac; frac -= out->sec; - } else { - out->sec = 0; } - sfrac = (out->hrs * 3600LL + out->min * 60LL + out->sec) * per_sec; - if (sign < 0) out->days = -out->days; - ifrac = td - (out->days * per_day + sfrac); - - if (ifrac != 0) { - out->ms = 0; - out->us = 0; - out->ns = 0; - } else { - out->ms = 0; - out->us = 0; - out->ns = 0; + if (base > NPY_FR_s) { + const npy_int64 sfrac = + (out->hrs * sec_per_hour + out->min * sec_per_min + out->sec) * + per_sec; + + npy_int64 ifrac = td - (out->days * per_day + sfrac); + + if (base == NPY_FR_ms) { + out->ms = (npy_int32)ifrac; + } else if (base == NPY_FR_us) { + out->ms = (npy_int32)(ifrac / 1000LL); + ifrac = ifrac % 1000LL; + out->us = (npy_int32)ifrac; + } else if (base == NPY_FR_ns) { + out->ms = (npy_int32)(ifrac / (1000LL * 1000LL)); + ifrac = ifrac % (1000LL * 1000LL); + out->us = (npy_int32)(ifrac / 1000LL); + ifrac = ifrac % 1000LL; + out->ns = (npy_int32)ifrac; + } } - break; - - case NPY_FR_m: - - out->days = td / 1440LL; - td -= out->days * 1440LL; - out->hrs = (npy_int32)(td / 60LL); - td -= out->hrs * 60LL; - out->min = (npy_int32)td; - - out->sec = 0; - out->ms = 0; - out->us = 0; - out->ns = 0; - break; - - case NPY_FR_h: - out->days = td / 24LL; - td -= out->days * 24LL; - out->hrs = (npy_int32)td; - - out->min = 0; - out->sec = 0; - out->ms = 0; - out->us = 0; - out->ns = 0; - break; - - case NPY_FR_D: - out->days = td; - out->hrs = 0; - out->min = 0; - out->sec = 0; - out->ms = 0; - out->us = 0; - out->ns = 0; - break; - - case NPY_FR_W: - out->days = 7 * td; - out->hrs = 0; - out->min = 0; - out->sec = 0; - out->ms = 0; - out->us = 0; - out->ns = 0; - break; + } break; default: PyErr_SetString(PyExc_RuntimeError, "NumPy timedelta metadata is corrupted with " "invalid base unit"); + break; } - out->seconds = out->hrs * 3600 + out->min * 60 + out->sec; + out->seconds = + (npy_int32)(out->hrs * sec_per_hour + out->min * sec_per_min + out->sec); out->microseconds = out->ms * 1000 + out->us; out->nanoseconds = out->ns; } From 114a84d8a0eee8fb93a4d2d701a2a6e62ebcf6d2 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Wed, 20 Mar 2024 13:00:00 -0400 Subject: [PATCH 18/23] Cython guard against [c|m|re]alloc failures (#57705) --- pandas/_libs/algos.pyx | 2 ++ pandas/_libs/groupby.pyx | 4 ++++ pandas/_libs/hashing.pyx | 4 ++++ pandas/_libs/hashtable_class_helper.pxi.in | 12 ++++++++++-- pandas/_libs/sas.pyx | 2 +- pandas/_libs/tslibs/period.pyx | 2 ++ 6 files changed, 23 insertions(+), 3 deletions(-) diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index e70ac26a2c28e..e2e93c5242b24 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -180,6 +180,8 @@ def is_lexsorted(list_of_arrays: list) -> bool: n = len(list_of_arrays[0]) cdef int64_t **vecs = malloc(nlevels * sizeof(int64_t*)) + if vecs is NULL: + raise MemoryError() for i in range(nlevels): arr = list_of_arrays[i] assert arr.dtype.name == "int64" diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 391bb4a3a3fd3..2ff45038d6a3e 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -81,6 +81,8 @@ cdef float64_t median_linear_mask(float64_t* a, int n, uint8_t* mask) noexcept n return NaN tmp = malloc((n - na_count) * sizeof(float64_t)) + if tmp is NULL: + raise MemoryError() j = 0 for i in range(n): @@ -118,6 +120,8 @@ cdef float64_t median_linear(float64_t* a, int n) noexcept nogil: return NaN tmp = malloc((n - na_count) * sizeof(float64_t)) + if tmp is NULL: + raise MemoryError() j = 0 for i in range(n): diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index be6958e3315e9..8b424e96973d3 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -68,7 +68,11 @@ def hash_object_array( # create an array of bytes vecs = malloc(n * sizeof(char *)) + if vecs is NULL: + raise MemoryError() lens = malloc(n * sizeof(uint64_t)) + if lens is NULL: + raise MemoryError() for i in range(n): val = arr[i] diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 629b6b42db852..e8827c58b5924 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -257,7 +257,7 @@ cdef class StringVector(Vector): self.data.n = 0 self.data.m = _INIT_VEC_CAP self.data.data = malloc(self.data.m * sizeof(char *)) - if not self.data.data: + if self.data.data is NULL: raise MemoryError() cdef resize(self): @@ -270,7 +270,7 @@ cdef class StringVector(Vector): orig_data = self.data.data self.data.data = malloc(self.data.m * sizeof(char *)) - if not self.data.data: + if self.data.data is NULL: raise MemoryError() for i in range(m): self.data.data[i] = orig_data[i] @@ -975,6 +975,8 @@ cdef class StringHashTable(HashTable): const char **vecs vecs = malloc(n * sizeof(char *)) + if vecs is NULL: + raise MemoryError() for i in range(n): val = values[i] v = get_c_string(val) @@ -1005,6 +1007,8 @@ cdef class StringHashTable(HashTable): # these by-definition *must* be strings vecs = malloc(n * sizeof(char *)) + if vecs is NULL: + raise MemoryError() for i in range(n): val = values[i] @@ -1041,6 +1045,8 @@ cdef class StringHashTable(HashTable): # these by-definition *must* be strings vecs = malloc(n * sizeof(char *)) + if vecs is NULL: + raise MemoryError() for i in range(n): val = values[i] @@ -1116,6 +1122,8 @@ cdef class StringHashTable(HashTable): # assign pointers and pre-filter out missing (if ignore_na) vecs = malloc(n * sizeof(char *)) + if vecs is NULL: + raise MemoryError() for i in range(n): val = values[i] diff --git a/pandas/_libs/sas.pyx b/pandas/_libs/sas.pyx index 9e1af2cb9c3e7..209e82c6284f5 100644 --- a/pandas/_libs/sas.pyx +++ b/pandas/_libs/sas.pyx @@ -49,7 +49,7 @@ cdef bytes buf_as_bytes(Buffer buf, size_t offset, size_t length): cdef Buffer buf_new(size_t length) except *: cdef uint8_t *data = calloc(length, sizeof(uint8_t)) - if data == NULL: + if data is NULL: raise MemoryError(f"Failed to allocate {length} bytes") return Buffer(data, length) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 3da0fa182faf3..838b5b9f4595f 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -679,6 +679,8 @@ cdef char* c_strftime(npy_datetimestruct *dts, char *fmt): c_date.tm_isdst = -1 result = malloc(result_len * sizeof(char)) + if result is NULL: + raise MemoryError() strftime(result, result_len, fmt, &c_date) From 0f7ded2a3a637b312f6ad454fd6c0b89d3d3e7aa Mon Sep 17 00:00:00 2001 From: Asish Mahapatra Date: Wed, 20 Mar 2024 13:06:55 -0400 Subject: [PATCH 19/23] BUG: Replace on Series/DataFrame stops replacing after first NA (#57865) * update test for GH#56599 * bug: ser/df.replace only replaces first occurence with NAs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add whatsnew * fmt fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- doc/source/whatsnew/v3.0.0.rst | 1 + pandas/core/array_algos/replace.py | 21 +++++++++++---------- pandas/tests/series/methods/test_replace.py | 7 +++++-- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index f3fcdcdb79ed6..8e9c72faf3231 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -298,6 +298,7 @@ Bug fixes - Fixed bug in :meth:`DataFrameGroupBy.apply` that was returning a completely empty DataFrame when all return values of ``func`` were ``None`` instead of returning an empty DataFrame with the original columns and dtypes. (:issue:`57775`) - Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`) - Fixed bug in :meth:`Series.rank` that doesn't preserve missing values for nullable integers when ``na_option='keep'``. (:issue:`56976`) +- Fixed bug in :meth:`Series.replace` and :meth:`DataFrame.replace` inconsistently replacing matching instances when ``regex=True`` and missing values are present. (:issue:`56599`) Categorical ^^^^^^^^^^^ diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 6cc867c60fd82..f946c5adcbb0b 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -93,17 +93,18 @@ def _check_comparison_types( ) # GH#32621 use mask to avoid comparing to NAs - if isinstance(a, np.ndarray): + if isinstance(a, np.ndarray) and mask is not None: a = a[mask] - - result = op(a) - - if isinstance(result, np.ndarray) and mask is not None: - # The shape of the mask can differ to that of the result - # since we may compare only a subset of a's or b's elements - tmp = np.zeros(mask.shape, dtype=np.bool_) - np.place(tmp, mask, result) - result = tmp + result = op(a) + + if isinstance(result, np.ndarray): + # The shape of the mask can differ to that of the result + # since we may compare only a subset of a's or b's elements + tmp = np.zeros(mask.shape, dtype=np.bool_) + np.place(tmp, mask, result) + result = tmp + else: + result = op(a) _check_comparison_types(result, a, b) return result diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index d4ec09332ad97..c7b894e73d0dd 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -616,7 +616,8 @@ def test_replace_with_compiled_regex(self): def test_pandas_replace_na(self): # GH#43344 - ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string") + # GH#56599 + ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA, "AA"], dtype="string") regex_mapping = { "AA": "CC", "BB": "CC", @@ -624,7 +625,9 @@ def test_pandas_replace_na(self): "CC": "CC-REPL", } result = ser.replace(regex_mapping, regex=True) - exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string") + exp = pd.Series( + ["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA, "CC"], dtype="string" + ) tm.assert_series_equal(result, exp) @pytest.mark.parametrize( From 303d78bfddef7126388cdd7fb2dd60d2265cf1c8 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Wed, 20 Mar 2024 13:48:17 -0400 Subject: [PATCH 20/23] CLN: Hashtable rename struct members (#57704) Hashtable rename struct members --- pandas/_libs/hashtable.pxd | 2 +- pandas/_libs/hashtable_class_helper.pxi.in | 74 +++++++++++----------- pandas/_libs/intervaltree.pxi.in | 10 +-- 3 files changed, 43 insertions(+), 43 deletions(-) diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index 22b923580c491..29ace4a339ced 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -174,7 +174,7 @@ cdef class StringHashTable(HashTable): cdef struct Int64VectorData: int64_t *data - Py_ssize_t n, m + Py_ssize_t size, capacity cdef class Vector: cdef bint external_view_exists diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index e8827c58b5924..f37a32ed61555 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -133,7 +133,7 @@ dtypes = [('Complex128', 'complex128', 'khcomplex128_t'), ctypedef struct {{name}}VectorData: {{c_type}} *data - Py_ssize_t n, m + Py_ssize_t size, capacity {{endif}} @@ -143,8 +143,8 @@ ctypedef struct {{name}}VectorData: cdef void append_data_{{dtype}}({{name}}VectorData *data, {{c_type}} x) noexcept nogil: - data.data[data.n] = x - data.n += 1 + data.data[data.size] = x + data.size += 1 {{endfor}} @@ -164,7 +164,7 @@ ctypedef fused vector_data: StringVectorData cdef bint needs_resize(vector_data *data) noexcept nogil: - return data.n == data.m + return data.size == data.capacity # ---------------------------------------------------------------------- # Vector @@ -209,26 +209,26 @@ cdef class {{name}}Vector(Vector): {{endif}} def __cinit__(self): - self.data.n = 0 - self.data.m = _INIT_VEC_CAP - self.ao = np.empty(self.data.m, dtype=np.{{dtype}}) + self.data.size = 0 + self.data.capacity = _INIT_VEC_CAP + self.ao = np.empty(self.data.capacity, dtype=np.{{dtype}}) self.data.data = <{{c_type}}*>self.ao.data cdef resize(self): - self.data.m = max(self.data.m * 4, _INIT_VEC_CAP) - self.ao.resize(self.data.m, refcheck=False) + self.data.capacity = max(self.data.capacity * 4, _INIT_VEC_CAP) + self.ao.resize(self.data.capacity, refcheck=False) self.data.data = <{{c_type}}*>self.ao.data def __len__(self) -> int: - return self.data.n + return self.data.size cpdef ndarray to_array(self): - if self.data.m != self.data.n: + if self.data.capacity != self.data.size: if self.external_view_exists: # should never happen raise ValueError("should have raised on append()") - self.ao.resize(self.data.n, refcheck=False) - self.data.m = self.data.n + self.ao.resize(self.data.size, refcheck=False) + self.data.capacity = self.data.size self.external_view_exists = True return self.ao @@ -254,32 +254,32 @@ cdef class StringVector(Vector): StringVectorData data def __cinit__(self): - self.data.n = 0 - self.data.m = _INIT_VEC_CAP - self.data.data = malloc(self.data.m * sizeof(char *)) + self.data.size = 0 + self.data.capacity = _INIT_VEC_CAP + self.data.data = malloc(self.data.capacity * sizeof(char *)) if self.data.data is NULL: raise MemoryError() cdef resize(self): cdef: char **orig_data - Py_ssize_t i, m + Py_ssize_t i, orig_capacity - m = self.data.m - self.data.m = max(self.data.m * 4, _INIT_VEC_CAP) + orig_capacity = self.data.capacity + self.data.capacity = max(self.data.capacity * 4, _INIT_VEC_CAP) orig_data = self.data.data - self.data.data = malloc(self.data.m * sizeof(char *)) + self.data.data = malloc(self.data.capacity * sizeof(char *)) if self.data.data is NULL: raise MemoryError() - for i in range(m): + for i in range(orig_capacity): self.data.data[i] = orig_data[i] def __dealloc__(self): free(self.data.data) def __len__(self) -> int: - return self.data.n + return self.data.size cpdef ndarray[object, ndim=1] to_array(self): cdef: @@ -287,12 +287,12 @@ cdef class StringVector(Vector): Py_ssize_t n object val - ao = np.empty(self.data.n, dtype=object) - for i in range(self.data.n): + ao = np.empty(self.data.size, dtype=object) + for i in range(self.data.size): val = self.data.data[i] ao[i] = val self.external_view_exists = True - self.data.m = self.data.n + self.data.capacity = self.data.size return ao cdef void append(self, char *x) noexcept: @@ -311,37 +311,37 @@ cdef class ObjectVector(Vector): cdef: PyObject **data - Py_ssize_t n, m + Py_ssize_t size, capacity ndarray ao def __cinit__(self): - self.n = 0 - self.m = _INIT_VEC_CAP + self.size = 0 + self.capacity = _INIT_VEC_CAP self.ao = np.empty(_INIT_VEC_CAP, dtype=object) self.data = self.ao.data def __len__(self) -> int: - return self.n + return self.size cdef append(self, object obj): - if self.n == self.m: + if self.size == self.capacity: if self.external_view_exists: raise ValueError("external reference but " "Vector.resize() needed") - self.m = max(self.m * 2, _INIT_VEC_CAP) - self.ao.resize(self.m, refcheck=False) + self.capacity = max(self.capacity * 2, _INIT_VEC_CAP) + self.ao.resize(self.capacity, refcheck=False) self.data = self.ao.data Py_INCREF(obj) - self.data[self.n] = obj - self.n += 1 + self.data[self.size] = obj + self.size += 1 cpdef ndarray[object, ndim=1] to_array(self): - if self.m != self.n: + if self.capacity != self.size: if self.external_view_exists: raise ValueError("should have raised on append()") - self.ao.resize(self.n, refcheck=False) - self.m = self.n + self.ao.resize(self.size, refcheck=False) + self.capacity = self.size self.external_view_exists = True return self.ao diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index a6cec0fb30ecc..b94f60c272e5d 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -145,12 +145,12 @@ cdef class IntervalTree(IntervalMixin): # overflow -> no match, which is already handled below pass - if result.data.n == old_len: + if result.data.size == old_len: result.append(-1) - elif result.data.n > old_len + 1: + elif result.data.size > old_len + 1: raise KeyError( 'indexer does not intersect a unique set of intervals') - old_len = result.data.n + old_len = result.data.size return result.to_array().astype('intp') def get_indexer_non_unique(self, ndarray[scalar_t, ndim=1] target): @@ -172,10 +172,10 @@ cdef class IntervalTree(IntervalMixin): # overflow -> no match, which is already handled below pass - if result.data.n == old_len: + if result.data.size == old_len: result.append(-1) missing.append(i) - old_len = result.data.n + old_len = result.data.size return (result.to_array().astype('intp'), missing.to_array().astype('intp')) From 1b849305a3664ab04f510aae45260662061cbbe0 Mon Sep 17 00:00:00 2001 From: jrmylow <33999325+jrmylow@users.noreply.github.com> Date: Thu, 21 Mar 2024 05:59:30 +1100 Subject: [PATCH 21/23] BUG: Negative freq in date_range produces values out of start and endpoints (#56832) * pandas-dev#56147 negative offset and year end interaction * pandas-dev#56147 tests * documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixing typing/pylint to mirror other branch * moved note to Datetimelike * documentation re-merge * whatsnew update * updated date_range docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reformatted docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update doc/source/whatsnew/v3.0.0.rst Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- doc/source/whatsnew/v3.0.0.rst | 1 + pandas/core/arrays/datetimes.py | 7 ++++++- pandas/core/indexes/datetimes.py | 16 +++++++++------- .../tests/indexes/datetimes/test_date_range.py | 15 +++++++++++++++ 4 files changed, 31 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 8e9c72faf3231..10d5a518f686d 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -308,6 +308,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - Bug in :func:`date_range` where the last valid timestamp would sometimes not be produced (:issue:`56134`) +- Bug in :func:`date_range` where using a negative frequency value would not include all points between the start and end values (:issue:`56382`) - Timedelta diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index e4862ac1030b6..ad4611aac9e35 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2777,7 +2777,12 @@ def _generate_range( if start and not offset.is_on_offset(start): # Incompatible types in assignment (expression has type "datetime", # variable has type "Optional[Timestamp]") - start = offset.rollforward(start) # type: ignore[assignment] + + # GH #56147 account for negative direction and range bounds + if offset.n >= 0: + start = offset.rollforward(start) # type: ignore[assignment] + else: + start = offset.rollback(start) # type: ignore[assignment] # Unsupported operand types for < ("Timestamp" and "None") if periods is None and end < start and offset.n >= 0: # type: ignore[operator] diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4f9c810cc7e1d..2d773c04b8ea9 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -841,13 +841,15 @@ def date_range( Return a fixed frequency DatetimeIndex. Returns the range of equally spaced time points (where the difference between any - two adjacent points is specified by the given frequency) such that they all - satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp., - the first and last time points in that range that fall on the boundary of ``freq`` - (if given as a frequency string) or that are valid for ``freq`` (if given as a - :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``, - ``end``, or ``freq`` is *not* specified, this missing parameter can be computed - given ``periods``, the number of timesteps in the range. See the note below.) + two adjacent points is specified by the given frequency) such that they fall in the + range `[start, end]` , where the first one and the last one are, resp., the first + and last time points in that range that fall on the boundary of ``freq`` (if given + as a frequency string) or that are valid for ``freq`` (if given as a + :class:`pandas.tseries.offsets.DateOffset`). If ``freq`` is positive, the points + satisfy `start <[=] x <[=] end`, and if ``freq`` is negative, the points satisfy + `end <[=] x <[=] start`. (If exactly one of ``start``, ``end``, or ``freq`` is *not* + specified, this missing parameter can be computed given ``periods``, the number of + timesteps in the range. See the note below.) Parameters ---------- diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index fecd7f4e7f2b0..ddbeecf150a5e 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -1735,3 +1735,18 @@ def test_date_range_partial_day_year_end(self, unit): freq="YE", ) tm.assert_index_equal(rng, exp) + + def test_date_range_negative_freq_year_end_inbounds(self, unit): + # GH#56147 + rng = date_range( + start="2023-10-31 00:00:00", + end="2021-10-31 00:00:00", + freq="-1YE", + unit=unit, + ) + exp = DatetimeIndex( + ["2022-12-31 00:00:00", "2021-12-31 00:00:00"], + dtype=f"M8[{unit}]", + freq="-1YE", + ) + tm.assert_index_equal(rng, exp) From 825dfe43b0d627b077e7368ceea0d3992033da86 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:00:43 -1000 Subject: [PATCH 22/23] REF: Clean up concat statefullness and validation (#57933) * REF: Clean up concat statefullness and validation * Use DataFrame again * Ignore false positive mypy? --- pandas/core/reshape/concat.py | 229 ++++++++++++++++------------------ 1 file changed, 110 insertions(+), 119 deletions(-) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 1f0fe0542a0c0..35a08e0167924 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -17,10 +17,7 @@ from pandas.util._decorators import cache_readonly -from pandas.core.dtypes.common import ( - is_bool, - is_iterator, -) +from pandas.core.dtypes.common import is_bool from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -423,11 +420,12 @@ def __init__( self.ignore_index = ignore_index self.verify_integrity = verify_integrity - objs, keys = self._clean_keys_and_objs(objs, keys) + objs, keys, ndims = _clean_keys_and_objs(objs, keys) - # figure out what our result ndim is going to be - ndims = self._get_ndims(objs) - sample, objs = self._get_sample_object(objs, ndims, keys, names, levels) + # select an object to be our result reference + sample, objs = _get_sample_object( + objs, ndims, keys, names, levels, self.intersect + ) # Standardize axis parameter to int if sample.ndim == 1: @@ -458,100 +456,6 @@ def __init__( self.names = names or getattr(keys, "names", None) self.levels = levels - def _get_ndims(self, objs: list[Series | DataFrame]) -> set[int]: - # figure out what our result ndim is going to be - ndims = set() - for obj in objs: - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - msg = ( - f"cannot concatenate object of type '{type(obj)}'; " - "only Series and DataFrame objs are valid" - ) - raise TypeError(msg) - - ndims.add(obj.ndim) - return ndims - - def _clean_keys_and_objs( - self, - objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], - keys, - ) -> tuple[list[Series | DataFrame], Index | None]: - if isinstance(objs, abc.Mapping): - if keys is None: - keys = list(objs.keys()) - objs_list = [objs[k] for k in keys] - else: - objs_list = list(objs) - - if len(objs_list) == 0: - raise ValueError("No objects to concatenate") - - if keys is None: - objs_list = list(com.not_none(*objs_list)) - else: - # GH#1649 - key_indices = [] - clean_objs = [] - if is_iterator(keys): - keys = list(keys) - if len(keys) != len(objs_list): - # GH#43485 - raise ValueError( - f"The length of the keys ({len(keys)}) must match " - f"the length of the objects to concatenate ({len(objs_list)})" - ) - for i, obj in enumerate(objs_list): - if obj is not None: - key_indices.append(i) - clean_objs.append(obj) - objs_list = clean_objs - - if not isinstance(keys, Index): - keys = Index(keys) - - if len(key_indices) < len(keys): - keys = keys.take(key_indices) - - if len(objs_list) == 0: - raise ValueError("All objects passed were None") - - return objs_list, keys - - def _get_sample_object( - self, - objs: list[Series | DataFrame], - ndims: set[int], - keys, - names, - levels, - ) -> tuple[Series | DataFrame, list[Series | DataFrame]]: - # get the sample - # want the highest ndim that we have, and must be non-empty - # unless all objs are empty - sample: Series | DataFrame | None = None - if len(ndims) > 1: - max_ndim = max(ndims) - for obj in objs: - if obj.ndim == max_ndim and np.sum(obj.shape): - sample = obj - break - - else: - # filter out the empties if we have not multi-index possibilities - # note to keep empty Series as it affect to result columns / name - non_empties = [obj for obj in objs if sum(obj.shape) > 0 or obj.ndim == 1] - - if len(non_empties) and ( - keys is None and names is None and levels is None and not self.intersect - ): - objs = non_empties - sample = objs[0] - - if sample is None: - sample = objs[0] - return sample, objs - def _sanitize_mixed_ndim( self, objs: list[Series | DataFrame], @@ -664,29 +568,24 @@ def get_result(self): out = sample._constructor_from_mgr(new_data, axes=new_data.axes) return out.__finalize__(self, method="concat") - def _get_result_dim(self) -> int: - if self._is_series and self.bm_axis == 1: - return 2 - else: - return self.objs[0].ndim - @cache_readonly def new_axes(self) -> list[Index]: - ndim = self._get_result_dim() + if self._is_series and self.bm_axis == 1: + ndim = 2 + else: + ndim = self.objs[0].ndim return [ - self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i) + self._get_concat_axis + if i == self.bm_axis + else get_objs_combined_axis( + self.objs, + axis=self.objs[0]._get_block_manager_axis(i), + intersect=self.intersect, + sort=self.sort, + ) for i in range(ndim) ] - def _get_comb_axis(self, i: AxisInt) -> Index: - data_axis = self.objs[0]._get_block_manager_axis(i) - return get_objs_combined_axis( - self.objs, - axis=data_axis, - intersect=self.intersect, - sort=self.sort, - ) - @cache_readonly def _get_concat_axis(self) -> Index: """ @@ -747,6 +646,98 @@ def _maybe_check_integrity(self, concat_index: Index) -> None: raise ValueError(f"Indexes have overlapping values: {overlap}") +def _clean_keys_and_objs( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + keys, +) -> tuple[list[Series | DataFrame], Index | None, set[int]]: + """ + Returns + ------- + clean_objs : list[Series | DataFrame] + LIst of DataFrame and Series with Nones removed. + keys : Index | None + None if keys was None + Index if objs was a Mapping or keys was not None. Filtered where objs was None. + ndim : set[int] + Unique .ndim attribute of obj encountered. + """ + if isinstance(objs, abc.Mapping): + if keys is None: + keys = objs.keys() + objs_list = [objs[k] for k in keys] + else: + objs_list = list(objs) + + if len(objs_list) == 0: + raise ValueError("No objects to concatenate") + + if keys is not None: + if not isinstance(keys, Index): + keys = Index(keys) + if len(keys) != len(objs_list): + # GH#43485 + raise ValueError( + f"The length of the keys ({len(keys)}) must match " + f"the length of the objects to concatenate ({len(objs_list)})" + ) + + # GH#1649 + key_indices = [] + clean_objs = [] + ndims = set() + for i, obj in enumerate(objs_list): + if obj is None: + continue + elif isinstance(obj, (ABCSeries, ABCDataFrame)): + key_indices.append(i) + clean_objs.append(obj) + ndims.add(obj.ndim) + else: + msg = ( + f"cannot concatenate object of type '{type(obj)}'; " + "only Series and DataFrame objs are valid" + ) + raise TypeError(msg) + + if keys is not None and len(key_indices) < len(keys): + keys = keys.take(key_indices) + + if len(clean_objs) == 0: + raise ValueError("All objects passed were None") + + return clean_objs, keys, ndims + + +def _get_sample_object( + objs: list[Series | DataFrame], + ndims: set[int], + keys, + names, + levels, + intersect: bool, +) -> tuple[Series | DataFrame, list[Series | DataFrame]]: + # get the sample + # want the highest ndim that we have, and must be non-empty + # unless all objs are empty + if len(ndims) > 1: + max_ndim = max(ndims) + for obj in objs: + if obj.ndim == max_ndim and sum(obj.shape): # type: ignore[arg-type] + return obj, objs + elif keys is None and names is None and levels is None and not intersect: + # filter out the empties if we have not multi-index possibilities + # note to keep empty Series as it affect to result columns / name + if ndims.pop() == 2: + non_empties = [obj for obj in objs if sum(obj.shape)] + else: + non_empties = objs + + if len(non_empties): + return non_empties[0], non_empties + + return objs[0], objs + + def _concat_indexes(indexes) -> Index: return indexes[0].append(indexes[1:]) From 710720e6555c779a6539354ebae59b1a649cebb3 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Wed, 20 Mar 2024 21:10:55 +0000 Subject: [PATCH 23/23] BUG: PyArrow dtypes were not supported in the interchange protocol (#57764) * fix pyarrow interchange * reduce diff * reduce diff * start simplifying * simplify, remove is_validity arg * remove unnecessary branch * doc maybe_rechunk * mypy * extra test * mark _col unused, assert rechunking did not modify original df * declare buffer: Buffer outside of if/else branch --- doc/source/whatsnew/v2.2.2.rst | 4 +- pandas/core/interchange/buffer.py | 58 ++++++++ pandas/core/interchange/column.py | 68 +++++++-- pandas/core/interchange/dataframe.py | 5 + pandas/core/interchange/from_dataframe.py | 17 ++- pandas/core/interchange/utils.py | 28 ++++ pandas/tests/interchange/test_impl.py | 162 +++++++++++++++++++--- 7 files changed, 303 insertions(+), 39 deletions(-) diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst index 96f210ce6b7b9..54084abab7817 100644 --- a/doc/source/whatsnew/v2.2.2.rst +++ b/doc/source/whatsnew/v2.2.2.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pandas nullable on with missing values (:issue:`56702`) +- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pyarrow nullable on with missing values (:issue:`57664`) - .. --------------------------------------------------------------------------- @@ -21,7 +22,8 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- :meth:`DataFrame.__dataframe__` was showing bytemask instead of bitmask for ``'string[pyarrow]'`` validity buffer (:issue:`57762`) +- :meth:`DataFrame.__dataframe__` was showing non-null validity buffer (instead of ``None``) ``'string[pyarrow]'`` without missing values (:issue:`57761`) .. --------------------------------------------------------------------------- .. _whatsnew_222.other: diff --git a/pandas/core/interchange/buffer.py b/pandas/core/interchange/buffer.py index 5c97fc17d7070..5d24325e67f62 100644 --- a/pandas/core/interchange/buffer.py +++ b/pandas/core/interchange/buffer.py @@ -12,6 +12,7 @@ if TYPE_CHECKING: import numpy as np + import pyarrow as pa class PandasBuffer(Buffer): @@ -76,3 +77,60 @@ def __repr__(self) -> str: ) + ")" ) + + +class PandasBufferPyarrow(Buffer): + """ + Data in the buffer is guaranteed to be contiguous in memory. + """ + + def __init__( + self, + buffer: pa.Buffer, + *, + length: int, + ) -> None: + """ + Handle pyarrow chunked arrays. + """ + self._buffer = buffer + self._length = length + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._buffer.size + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._buffer.address + + def __dlpack__(self) -> Any: + """ + Represent this structure as DLPack interface. + """ + raise NotImplementedError() + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + """ + return (DlpackDeviceType.CPU, None) + + def __repr__(self) -> str: + return ( + "PandasBuffer[pyarrow](" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": "CPU", + } + ) + + ")" + ) diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py index bf20f0b5433cd..c27a9d8141712 100644 --- a/pandas/core/interchange/column.py +++ b/pandas/core/interchange/column.py @@ -1,6 +1,9 @@ from __future__ import annotations -from typing import Any +from typing import ( + TYPE_CHECKING, + Any, +) import numpy as np @@ -9,15 +12,18 @@ from pandas.errors import NoBufferPresent from pandas.util._decorators import cache_readonly -from pandas.core.dtypes.dtypes import ( +from pandas.core.dtypes.dtypes import BaseMaskedDtype + +import pandas as pd +from pandas import ( ArrowDtype, - BaseMaskedDtype, DatetimeTZDtype, ) - -import pandas as pd from pandas.api.types import is_string_dtype -from pandas.core.interchange.buffer import PandasBuffer +from pandas.core.interchange.buffer import ( + PandasBuffer, + PandasBufferPyarrow, +) from pandas.core.interchange.dataframe_protocol import ( Column, ColumnBuffers, @@ -30,6 +36,9 @@ dtype_to_arrow_c_fmt, ) +if TYPE_CHECKING: + from pandas.core.interchange.dataframe_protocol import Buffer + _NP_KINDS = { "i": DtypeKind.INT, "u": DtypeKind.UINT, @@ -157,6 +166,16 @@ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]: else: byteorder = dtype.byteorder + if dtype == "bool[pyarrow]": + # return early to avoid the `* 8` below, as this is a bitmask + # rather than a bytemask + return ( + kind, + dtype.itemsize, # pyright: ignore[reportAttributeAccessIssue] + ArrowCTypes.BOOL, + byteorder, + ) + return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder @property @@ -194,6 +213,12 @@ def describe_null(self): column_null_dtype = ColumnNullType.USE_BYTEMASK null_value = 1 return column_null_dtype, null_value + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so this + # is already single-chunk by the time we get here. + if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined] + return ColumnNullType.NON_NULLABLE, None + return ColumnNullType.USE_BITMASK, 0 kind = self.dtype[0] try: null, value = _NULL_DESCRIPTION[kind] @@ -278,10 +303,11 @@ def get_buffers(self) -> ColumnBuffers: def _get_data_buffer( self, - ) -> tuple[PandasBuffer, Any]: # Any is for self.dtype tuple + ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]: """ Return the buffer containing the data and the buffer's associated dtype. """ + buffer: Buffer if self.dtype[0] == DtypeKind.DATETIME: # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make # it longer than 4 characters @@ -302,15 +328,22 @@ def _get_data_buffer( DtypeKind.FLOAT, DtypeKind.BOOL, ): + dtype = self.dtype arr = self._col.array + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so + # this is already single-chunk by the time we get here. + arr = arr._pa_array.chunks[0] # type: ignore[attr-defined] + buffer = PandasBufferPyarrow( + arr.buffers()[1], # type: ignore[attr-defined] + length=len(arr), + ) + return buffer, dtype if isinstance(self._col.dtype, BaseMaskedDtype): np_arr = arr._data # type: ignore[attr-defined] - elif isinstance(self._col.dtype, ArrowDtype): - raise NotImplementedError("ArrowDtype not handled yet") else: np_arr = arr._ndarray # type: ignore[attr-defined] buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy) - dtype = self.dtype elif self.dtype[0] == DtypeKind.CATEGORICAL: codes = self._col.values._codes buffer = PandasBuffer(codes, allow_copy=self._allow_copy) @@ -343,13 +376,26 @@ def _get_data_buffer( return buffer, dtype - def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]: + def _get_validity_buffer(self) -> tuple[Buffer, Any] | None: """ Return the buffer containing the mask values indicating missing data and the buffer's associated dtype. Raises NoBufferPresent if null representation is not a bit or byte mask. """ null, invalid = self.describe_null + buffer: Buffer + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so this + # is already single-chunk by the time we get here. + arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined] + dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE) + if arr.buffers()[0] is None: + return None + buffer = PandasBufferPyarrow( + arr.buffers()[0], + length=len(arr), + ) + return buffer, dtype if isinstance(self._col.dtype, BaseMaskedDtype): mask = self._col.array._mask # type: ignore[attr-defined] diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py index 1ffe0e8e8dbb0..1abacddfc7e3b 100644 --- a/pandas/core/interchange/dataframe.py +++ b/pandas/core/interchange/dataframe.py @@ -5,6 +5,7 @@ from pandas.core.interchange.column import PandasColumn from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg +from pandas.core.interchange.utils import maybe_rechunk if TYPE_CHECKING: from collections.abc import ( @@ -34,6 +35,10 @@ def __init__(self, df: DataFrame, allow_copy: bool = True) -> None: """ self._df = df.rename(columns=str, copy=False) self._allow_copy = allow_copy + for i, _col in enumerate(self._df.columns): + rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy) + if rechunked is not None: + self._df.isetitem(i, rechunked) def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py index a952887d7eed2..4575837fb12fc 100644 --- a/pandas/core/interchange/from_dataframe.py +++ b/pandas/core/interchange/from_dataframe.py @@ -298,13 +298,14 @@ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: null_pos = None if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): - assert buffers["validity"], "Validity buffers cannot be empty for masks" - valid_buff, valid_dtype = buffers["validity"] - null_pos = buffer_to_ndarray( - valid_buff, valid_dtype, offset=col.offset, length=col.size() - ) - if sentinel_val == 0: - null_pos = ~null_pos + validity = buffers["validity"] + if validity is not None: + valid_buff, valid_dtype = validity + null_pos = buffer_to_ndarray( + valid_buff, valid_dtype, offset=col.offset, length=col.size() + ) + if sentinel_val == 0: + null_pos = ~null_pos # Assemble the strings from the code units str_list: list[None | float | str] = [None] * col.size() @@ -516,6 +517,8 @@ def set_nulls( np.ndarray or pd.Series Data with the nulls being set. """ + if validity is None: + return data null_kind, sentinel_val = col.describe_null null_pos = None diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py index 2e73e560e5740..2a19dd5046aa3 100644 --- a/pandas/core/interchange/utils.py +++ b/pandas/core/interchange/utils.py @@ -16,6 +16,8 @@ DatetimeTZDtype, ) +import pandas as pd + if typing.TYPE_CHECKING: from pandas._typing import DtypeObj @@ -145,3 +147,29 @@ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: raise NotImplementedError( f"Conversion of {dtype} to Arrow C format string is not implemented." ) + + +def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None: + """ + Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary. + + - Returns `None` if the input series is not backed by a multi-chunk pyarrow array + (and so doesn't need rechunking) + - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk + pyarrow array and `allow_copy` is `True`. + - Raises a `RuntimeError` if `allow_copy` is `False` and input is a + based by a multi-chunk pyarrow array. + """ + if not isinstance(series.dtype, pd.ArrowDtype): + return None + chunked_array = series.array._pa_array # type: ignore[attr-defined] + if len(chunked_array.chunks) == 1: + return None + if not allow_copy: + raise RuntimeError( + "Found multi-chunk pyarrow array, but `allow_copy` is False. " + "Please rechunk the array before calling this function, or set " + "`allow_copy=True`." + ) + arr = chunked_array.combine_chunks() + return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index) diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py index 94b2da894ad0f..83574e8630d6f 100644 --- a/pandas/tests/interchange/test_impl.py +++ b/pandas/tests/interchange/test_impl.py @@ -1,4 +1,7 @@ -from datetime import datetime +from datetime import ( + datetime, + timezone, +) import numpy as np import pytest @@ -291,6 +294,27 @@ def test_multi_chunk_pyarrow() -> None: pd.api.interchange.from_dataframe(table, allow_copy=False) +def test_multi_chunk_column() -> None: + pytest.importorskip("pyarrow", "11.0.0") + ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]") + df = pd.concat([ser, ser], ignore_index=True).to_frame("a") + df_orig = df.copy() + with pytest.raises( + RuntimeError, match="Found multi-chunk pyarrow array, but `allow_copy` is False" + ): + pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=False)) + result = pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=True)) + # Interchange protocol defaults to creating numpy-backed columns, so currently this + # is 'float64'. + expected = pd.DataFrame({"a": [1.0, 2.0, None, 1.0, 2.0, None]}, dtype="float64") + tm.assert_frame_equal(result, expected) + + # Check that the rechunking we did didn't modify the original DataFrame. + tm.assert_frame_equal(df, df_orig) + assert len(df["a"].array._pa_array.chunks) == 2 + assert len(df_orig["a"].array._pa_array.chunks) == 2 + + def test_timestamp_ns_pyarrow(): # GH 56712 pytest.importorskip("pyarrow", "11.0.0") @@ -416,42 +440,60 @@ def test_non_str_names_w_duplicates(): pd.api.interchange.from_dataframe(dfi, allow_copy=False) -def test_nullable_integers() -> None: - # https://github.com/pandas-dev/pandas/issues/55069 - df = pd.DataFrame({"a": [1]}, dtype="Int8") - expected = pd.DataFrame({"a": [1]}, dtype="int8") - result = pd.api.interchange.from_dataframe(df.__dataframe__()) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/57664") -def test_nullable_integers_pyarrow() -> None: - # https://github.com/pandas-dev/pandas/issues/55069 - df = pd.DataFrame({"a": [1]}, dtype="Int8[pyarrow]") - expected = pd.DataFrame({"a": [1]}, dtype="int8") - result = pd.api.interchange.from_dataframe(df.__dataframe__()) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( ("data", "dtype", "expected_dtype"), [ ([1, 2, None], "Int64", "int64"), + ([1, 2, None], "Int64[pyarrow]", "int64"), + ([1, 2, None], "Int8", "int8"), + ([1, 2, None], "Int8[pyarrow]", "int8"), ( [1, 2, None], "UInt64", "uint64", ), + ( + [1, 2, None], + "UInt64[pyarrow]", + "uint64", + ), ([1.0, 2.25, None], "Float32", "float32"), + ([1.0, 2.25, None], "Float32[pyarrow]", "float32"), + ([True, False, None], "boolean[pyarrow]", "bool"), + (["much ado", "about", None], "string[pyarrow_numpy]", "large_string"), + (["much ado", "about", None], "string[pyarrow]", "large_string"), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), None], + "timestamp[ns][pyarrow]", + "timestamp[ns]", + ), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), None], + "timestamp[us][pyarrow]", + "timestamp[us]", + ), + ( + [ + datetime(2020, 1, 1, tzinfo=timezone.utc), + datetime(2020, 1, 2, tzinfo=timezone.utc), + None, + ], + "timestamp[us, Asia/Kathmandu][pyarrow]", + "timestamp[us, tz=Asia/Kathmandu]", + ), ], ) -def test_pandas_nullable_w_missing_values( +def test_pandas_nullable_with_missing_values( data: list, dtype: str, expected_dtype: str ) -> None: # https://github.com/pandas-dev/pandas/issues/57643 - pytest.importorskip("pyarrow", "11.0.0") + # https://github.com/pandas-dev/pandas/issues/57664 + pa = pytest.importorskip("pyarrow", "11.0.0") import pyarrow.interchange as pai + if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]": + expected_dtype = pa.timestamp("us", "Asia/Kathmandu") + df = pd.DataFrame({"a": data}, dtype=dtype) result = pai.from_dataframe(df.__dataframe__())["a"] assert result.type == expected_dtype @@ -460,6 +502,86 @@ def test_pandas_nullable_w_missing_values( assert result[2].as_py() is None +@pytest.mark.parametrize( + ("data", "dtype", "expected_dtype"), + [ + ([1, 2, 3], "Int64", "int64"), + ([1, 2, 3], "Int64[pyarrow]", "int64"), + ([1, 2, 3], "Int8", "int8"), + ([1, 2, 3], "Int8[pyarrow]", "int8"), + ( + [1, 2, 3], + "UInt64", + "uint64", + ), + ( + [1, 2, 3], + "UInt64[pyarrow]", + "uint64", + ), + ([1.0, 2.25, 5.0], "Float32", "float32"), + ([1.0, 2.25, 5.0], "Float32[pyarrow]", "float32"), + ([True, False, False], "boolean[pyarrow]", "bool"), + (["much ado", "about", "nothing"], "string[pyarrow_numpy]", "large_string"), + (["much ado", "about", "nothing"], "string[pyarrow]", "large_string"), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)], + "timestamp[ns][pyarrow]", + "timestamp[ns]", + ), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)], + "timestamp[us][pyarrow]", + "timestamp[us]", + ), + ( + [ + datetime(2020, 1, 1, tzinfo=timezone.utc), + datetime(2020, 1, 2, tzinfo=timezone.utc), + datetime(2020, 1, 3, tzinfo=timezone.utc), + ], + "timestamp[us, Asia/Kathmandu][pyarrow]", + "timestamp[us, tz=Asia/Kathmandu]", + ), + ], +) +def test_pandas_nullable_without_missing_values( + data: list, dtype: str, expected_dtype: str +) -> None: + # https://github.com/pandas-dev/pandas/issues/57643 + pa = pytest.importorskip("pyarrow", "11.0.0") + import pyarrow.interchange as pai + + if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]": + expected_dtype = pa.timestamp("us", "Asia/Kathmandu") + + df = pd.DataFrame({"a": data}, dtype=dtype) + result = pai.from_dataframe(df.__dataframe__())["a"] + assert result.type == expected_dtype + assert result[0].as_py() == data[0] + assert result[1].as_py() == data[1] + assert result[2].as_py() == data[2] + + +def test_string_validity_buffer() -> None: + # https://github.com/pandas-dev/pandas/issues/57761 + pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]") + result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"] + assert result is None + + +def test_string_validity_buffer_no_missing() -> None: + # https://github.com/pandas-dev/pandas/issues/57762 + pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]") + validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"] + assert validity is not None + result = validity[1] + expected = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, "=") + assert result == expected + + def test_empty_dataframe(): # https://github.com/pandas-dev/pandas/issues/56700 df = pd.DataFrame({"a": []}, dtype="int8")