Skip to content

Commit

Permalink
TST: Fail on warning (pandas-dev#22699)
Browse files Browse the repository at this point in the history
  • Loading branch information
TomAugspurger authored and aeltanawy committed Sep 20, 2018
1 parent d64c0a8 commit 0ba7b16
Show file tree
Hide file tree
Showing 106 changed files with 2,677 additions and 2,298 deletions.
4 changes: 2 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ matrix:
# In allow_failures
- dist: trusty
env:
- JOB="3.6, NumPy dev" ENV_FILE="ci/travis-36-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
- JOB="3.7, NumPy dev" ENV_FILE="ci/travis-37-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network -W error" PANDAS_TESTING_MODE="deprecate"
addons:
apt:
packages:
Expand All @@ -79,7 +79,7 @@ matrix:
- JOB="3.6, slow" ENV_FILE="ci/travis-36-slow.yaml" SLOW=true
- dist: trusty
env:
- JOB="3.6, NumPy dev" ENV_FILE="ci/travis-36-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
- JOB="3.7, NumPy dev" ENV_FILE="ci/travis-37-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network -W error" PANDAS_TESTING_MODE="deprecate"
addons:
apt:
packages:
Expand Down
2 changes: 1 addition & 1 deletion ci/travis-36-numpydev.yaml → ci/travis-37-numpydev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: pandas
channels:
- defaults
dependencies:
- python=3.6*
- python=3.7*
- pytz
- Cython>=0.28.2
# universal
Expand Down
57 changes: 57 additions & 0 deletions doc/source/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,14 @@ Otherwise, you need to do it manually:
warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2)
new_func()
You'll also need to

1. write a new test that asserts a warning is issued when calling with the deprecated argument
2. Update all of pandas existing tests and code to use the new argument

See :ref:`contributing.warnings` for more.


.. _contributing.ci:

Testing With Continuous Integration
Expand Down Expand Up @@ -859,6 +867,55 @@ preferred if the inputs or logic are simple, with Hypothesis tests reserved
for cases with complex logic or where there are too many combinations of
options or subtle interactions to test (or think of!) all of them.

.. _contributing.warnings:

Testing Warnings
~~~~~~~~~~~~~~~~

By default, one of pandas CI workers will fail if any unhandled warnings are emitted.

If your change involves checking that a warning is actually emitted, use
``tm.assert_produces_warning(ExpectedWarning)``.


.. code-block:: python
with tm.assert_prodcues_warning(FutureWarning):
df.some_operation()
We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's
stacklevel is set correctly. The stacklevel is what ensure the *user's* file name and line number
is printed in the warning, rather than something internal to pandas. It represents the number of
function calls from user code (e.g. ``df.some_operation()``) to the function that actually emits
the warning. Our linter will fail the build if you use ``pytest.warns`` in a test.

If you have a test that would emit a warning, but you aren't actually testing the
warning itself (say because it's going to be removed in the future, or because we're
matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to
ignore the error.

.. code-block:: python
@pytest.mark.filterwarnings("ignore:msg:category")
def test_thing(self):
...
If the test generates a warning of class ``category`` whose message starts
with ``msg``, the warning will be ignored and the test will pass.

If you need finer-grained control, you can use Python's usual
`warnings module <https://docs.python.org/3/library/warnings.html>`__
to control whether a warning is ignored / raised at different places within
a single test.

.. code-block:: python
with warch.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# Or use warnings.filterwarnings(...)
Alternatively, consider breaking up the unit test.


Running the test suite
----------------------
Expand Down
12 changes: 12 additions & 0 deletions pandas/compat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import struct
import inspect
from collections import namedtuple
import collections

PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] >= 3
Expand Down Expand Up @@ -135,6 +136,11 @@ def lfilter(*args, **kwargs):

from importlib import reload
reload = reload
Hashable = collections.abc.Hashable
Iterable = collections.abc.Iterable
Mapping = collections.abc.Mapping
Sequence = collections.abc.Sequence
Sized = collections.abc.Sized

else:
# Python 2
Expand Down Expand Up @@ -190,6 +196,12 @@ def get_range_parameters(data):

reload = builtins.reload

Hashable = collections.Hashable
Iterable = collections.Iterable
Mapping = collections.Mapping
Sequence = collections.Sequence
Sized = collections.Sized

if PY2:
def iteritems(obj, **kw):
return obj.iteritems(**kw)
Expand Down
9 changes: 8 additions & 1 deletion pandas/compat/chainmap_impl.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
from collections import MutableMapping
import sys

PY3 = sys.version_info[0] >= 3

if PY3:
from collections.abc import MutableMapping
else:
from collections import MutableMapping

try:
from thread import get_ident
Expand Down
5 changes: 3 additions & 2 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
intended for public consumption
"""
from __future__ import division
from warnings import warn, catch_warnings
from warnings import warn, catch_warnings, simplefilter
from textwrap import dedent

import numpy as np
Expand Down Expand Up @@ -91,7 +91,8 @@ def _ensure_data(values, dtype=None):

# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings(record=True):
with catch_warnings():
simplefilter("ignore", np.ComplexWarning)
values = ensure_float64(values)
return values, 'float64', 'float64'

Expand Down
1 change: 1 addition & 0 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def cmp_method(self, other):
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))

Expand Down
10 changes: 8 additions & 2 deletions pandas/core/arrays/integer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from pandas._libs.lib import infer_dtype
from pandas.util._decorators import cache_readonly
from pandas.compat import u, range
from pandas.compat import u, range, string_types
from pandas.compat import set_function_name

from pandas.core.dtypes.cast import astype_nansafe
Expand Down Expand Up @@ -147,6 +147,11 @@ def coerce_to_array(values, dtype, mask=None, copy=False):
dtype = values.dtype

if dtype is not None:
if (isinstance(dtype, string_types) and
(dtype.startswith("Int") or dtype.startswith("UInt"))):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
Expand Down Expand Up @@ -507,7 +512,8 @@ def cmp_method(self, other):

# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all='ignore'):
result = op(self._data, other)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ def standardize_mapping(into):
return partial(
collections.defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, collections.Mapping):
if not issubclass(into, compat.Mapping):
raise TypeError('unsupported type: {into}'.format(into=into))
elif into == collections.defaultdict:
raise TypeError(
Expand Down
1 change: 1 addition & 0 deletions pandas/core/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True,
# to use a non-numeric indexer
try:
with warnings.catch_warnings(record=True):
# TODO: Filter the warnings we actually care about here.
target[assigner] = ret
except (TypeError, IndexError):
raise ValueError("Cannot assign expression output to target")
Expand Down
7 changes: 3 additions & 4 deletions pandas/core/dtypes/inference.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
""" basic inference routines """

import collections
import re
import numpy as np
from collections import Iterable
from numbers import Number
from pandas import compat
from pandas.compat import (PY2, string_types, text_type,
string_and_binary_types, re_type)
from pandas._libs import lib
Expand Down Expand Up @@ -112,7 +111,7 @@ def _iterable_not_string(obj):
False
"""

return (isinstance(obj, collections.Iterable) and
return (isinstance(obj, compat.Iterable) and
not isinstance(obj, string_types))


Expand Down Expand Up @@ -284,7 +283,7 @@ def is_list_like(obj):
False
"""

return (isinstance(obj, Iterable) and
return (isinstance(obj, compat.Iterable) and
# we do not count strings/unicode/bytes as list-like
not isinstance(obj, string_and_binary_types) and
# exclude zero-dimensional numpy arrays, effectively scalars
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,9 +417,9 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=copy)

# For data is list-like, or Iterable (will consume into list)
elif (isinstance(data, collections.Iterable)
elif (isinstance(data, compat.Iterable)
and not isinstance(data, string_and_binary_types)):
if not isinstance(data, collections.Sequence):
if not isinstance(data, compat.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
Expand Down Expand Up @@ -7640,7 +7640,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
elif isinstance(data[0], compat.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)

if isinstance(func_or_funcs, collections.Iterable):
if isinstance(func_or_funcs, compat.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
ret = self._aggregate_multiple_funcs(func_or_funcs,
Expand Down
1 change: 1 addition & 0 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ def cmp_method(self, other):
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))

Expand Down
1 change: 1 addition & 0 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -3490,6 +3490,7 @@ def _putmask_smart(v, m, n):

# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(v.dtype)

# avoid invalid dtype comparisons
Expand Down
5 changes: 2 additions & 3 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201

import collections
import warnings
from textwrap import dedent

Expand Down Expand Up @@ -240,8 +239,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
# If data is Iterable but not list-like, consume into list.
elif (isinstance(data, collections.Iterable)
and not isinstance(data, collections.Sized)):
elif (isinstance(data, compat.Iterable)
and not isinstance(data, compat.Sized)):
data = list(data)
else:

Expand Down
2 changes: 2 additions & 0 deletions pandas/core/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -2387,11 +2387,13 @@ def dataframe_from_int_dict(data, frame_template):
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X

with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
Expand Down
2 changes: 2 additions & 0 deletions pandas/io/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,8 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None,
# ZIP Compression
elif compression == 'zip':
zf = BytesZipFile(path_or_buf, mode)
# Ensure the container is closed as well.
handles.append(zf)
if zf.mode == 'w':
f = zf
elif zf.mode == 'r':
Expand Down
4 changes: 2 additions & 2 deletions pandas/io/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@
import os
import re
import numbers
import collections

from distutils.version import LooseVersion

from pandas.core.dtypes.common import is_list_like
from pandas.errors import EmptyDataError
from pandas.io.common import _is_url, urlopen, _validate_header_arg
from pandas.io.parsers import TextParser
from pandas import compat
from pandas.compat import (lrange, lmap, u, string_types, iteritems,
raise_with_traceback, binary_type)
from pandas import Series
Expand Down Expand Up @@ -859,7 +859,7 @@ def _validate_flavor(flavor):
flavor = 'lxml', 'bs4'
elif isinstance(flavor, string_types):
flavor = flavor,
elif isinstance(flavor, collections.Iterable):
elif isinstance(flavor, compat.Iterable):
if not all(isinstance(flav, string_types) for flav in flavor):
raise TypeError('Object of type {typ!r} is not an iterable of '
'strings'
Expand Down
3 changes: 2 additions & 1 deletion pandas/io/pickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,8 @@ def try_read(path, encoding=None):
# GH 6899
try:
with warnings.catch_warnings(record=True):
# We want to silencce any warnings about, e.g. moved modules.
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return read_wrapper(lambda f: pkl.load(f))
except Exception:
# reg/patched pickle
Expand Down
Loading

0 comments on commit 0ba7b16

Please sign in to comment.