Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into STY-repr-batch-5
Browse files Browse the repository at this point in the history
  • Loading branch information
MomIsBestFriend committed Dec 4, 2019
2 parents aa5ee8d + 21c93fc commit 8c504b8
Show file tree
Hide file tree
Showing 39 changed files with 499 additions and 496 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ jobs:
runs-on: ubuntu-latest
steps:

- name: Setting conda path
run: echo "::add-path::${HOME}/miniconda3/bin"

- name: Checkout
uses: actions/checkout@v1

- name: Setting conda path
run: echo "::set-env name=PATH::${HOME}/miniconda3/bin:${PATH}"

- name: Looking for unwanted patterns
run: ci/code_checks.sh patterns
if: true
Expand Down
3 changes: 3 additions & 0 deletions ci/deps/azure-36-locale.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ dependencies:
- xlrd=1.1.0
- xlsxwriter=0.9.8
- xlwt=1.2.0
# lowest supported version of pyarrow (putting it here instead of in
# azure-36-minimum_versions because it needs numpy >= 1.14)
- pyarrow=0.12
- pip
- pip:
- html5lib==1.0b2
2 changes: 1 addition & 1 deletion ci/deps/azure-37-locale.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
name: pandas-dev
channels:
- defaults
- conda-forge
dependencies:
- python=3.7.*
Expand Down Expand Up @@ -33,3 +32,4 @@ dependencies:
- xlrd
- xlsxwriter
- xlwt
- pyarrow>=0.15
3 changes: 1 addition & 2 deletions ci/deps/travis-36-cov.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ dependencies:
# https://github.com/pydata/pandas-gbq/issues/271
- google-cloud-bigquery<=1.11
- psycopg2
# pyarrow segfaults on load: https://github.com/pandas-dev/pandas/issues/26716
# - pyarrow=0.9.0
- pyarrow>=0.12.0
- pymysql
- pytables
- python-snappy
Expand Down
9 changes: 4 additions & 5 deletions ci/print_skipped.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@

def main(filename):
if not os.path.isfile(filename):
return
raise RuntimeError(f"Could not find junit file {filename!r}")

tree = et.parse(filename)
root = tree.getroot()
current_class = ""
for el in root.findall("testcase"):
for el in root.iter("testcase"):
cn = el.attrib["classname"]
for sk in el.findall("skipped"):
old_class = current_class
Expand All @@ -32,8 +32,7 @@ def main(filename):
print("-" * 80)
else:
print(
"#{i} {class_name}.{test_name}: {message}".format(
**dict(test_data, i=i)
)
f"#{i} {test_data['class_name']}."
f"{test_data['test_name']}: {test_data['message']}"
)
i += 1
1 change: 0 additions & 1 deletion doc/redirects.csv
Original file line number Diff line number Diff line change
Expand Up @@ -1119,7 +1119,6 @@ generated/pandas.Series.pow,../reference/api/pandas.Series.pow
generated/pandas.Series.prod,../reference/api/pandas.Series.prod
generated/pandas.Series.product,../reference/api/pandas.Series.product
generated/pandas.Series.ptp,../reference/api/pandas.Series.ptp
generated/pandas.Series.put,../reference/api/pandas.Series.put
generated/pandas.Series.quantile,../reference/api/pandas.Series.quantile
generated/pandas.Series.radd,../reference/api/pandas.Series.radd
generated/pandas.Series.rank,../reference/api/pandas.Series.rank
Expand Down
1 change: 0 additions & 1 deletion doc/source/reference/series.rst
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ Attributes
Series.empty
Series.dtypes
Series.name
Series.put

Conversion
----------
Expand Down
3 changes: 3 additions & 0 deletions doc/source/whatsnew/v1.0.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -565,6 +565,9 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Passing multiple axes to :meth:`DataFrame.dropna` is no longer supported (:issue:`20995`)
- Removed previously deprecated :meth:`Series.nonzero`, use `to_numpy().nonzero()` instead (:issue:`24048`)
- Passing floating dtype ``codes`` to :meth:`Categorical.from_codes` is no longer supported, pass ``codes.astype(np.int64)`` instead (:issue:`21775`)
- :meth:`Series.str.partition` and :meth:`Series.str.rpartition` no longer accept "pat" keyword, use "sep" instead (:issue:`23767`)
- Removed the previously deprecated :meth:`Series.put` (:issue:`27106`)
- Removed the previously deprecated :attr:`Series.real`, :attr:`Series.imag` (:issue:`27106`)
- Removed the previously deprecated :meth:`Series.to_dense`, :meth:`DataFrame.to_dense` (:issue:`26684`)
- Removed the previously deprecated :meth:`Index.dtype_str`, use ``str(index.dtype)`` instead (:issue:`27106`)
- :meth:`Categorical.ravel` returns a :class:`Categorical` instead of a ``ndarray`` (:issue:`27199`)
Expand Down
12 changes: 6 additions & 6 deletions doc/sphinxext/announce.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,13 +113,13 @@ def build_string(revision_range, heading="Contributors"):
components["authors"] = "* " + "\n* ".join(components["authors"])

tpl = textwrap.dedent(
"""\
{heading}
{uline}
f"""\
{components['heading']}
{components['uline']}
{author_message}
{authors}"""
).format(**components)
{components['author_message']}
{components['authors']}"""
)
return tpl


Expand Down
2 changes: 1 addition & 1 deletion doc/sphinxext/contributors.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def run(self):
except git.GitCommandError as exc:
return [
self.state.document.reporter.warning(
"Cannot find contributors for range '{}': {}".format(range_, exc),
f"Cannot find contributors for range {repr(range_)}: {exc}",
line=self.lineno,
)
]
Expand Down
5 changes: 0 additions & 5 deletions pandas/_libs/parsers.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,6 @@ from pandas.errors import (ParserError, DtypeWarning,

lzma = _import_lzma()

# Import CParserError as alias of ParserError for backwards compatibility.
# Ultimately, we want to remove this import. See gh-12665 and gh-14479.
CParserError = ParserError


cdef:
float64_t INF = <float64_t>np.inf
float64_t NEGINF = -INF
Expand Down
6 changes: 6 additions & 0 deletions pandas/compat/chainmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ def __setitem__(self, key, value):
self.maps[0][key] = value

def __delitem__(self, key):
"""
Raises
------
KeyError
If `key` doesn't exist.
"""
for mapping in self.maps:
if key in mapping:
del mapping[key]
Expand Down
27 changes: 8 additions & 19 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,7 @@ def __setattr__(self, key, value):
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(
"You cannot add any new attribute '{key}'".format(key=key)
)
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)


Expand Down Expand Up @@ -220,28 +218,22 @@ def _obj_with_exclusions(self):

def __getitem__(self, key):
if self._selection is not None:
raise IndexError(
"Column(s) {selection} already selected".format(
selection=self._selection
)
)
raise IndexError(f"Column(s) {self._selection} already selected")

if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(
"Columns not found: {missing}".format(missing=str(bad_keys)[1:-1])
)
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)

elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)

else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=1)

def _gotitem(self, key, ndim, subset=None):
Expand Down Expand Up @@ -293,8 +285,7 @@ def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
return f(self, *args, **kwargs)

raise AttributeError(
"'{arg}' is not a valid function for "
"'{cls}' object".format(arg=arg, cls=type(self).__name__)
f"'{arg}' is not a valid function for '{type(self).__name__}' object"
)

def _aggregate(self, arg, *args, **kwargs):
Expand Down Expand Up @@ -359,7 +350,7 @@ def _aggregate(self, arg, *args, **kwargs):
elif isinstance(obj, ABCSeries):
raise SpecificationError("nested renamer is not supported")
elif isinstance(obj, ABCDataFrame) and k not in obj.columns:
raise KeyError("Column '{col}' does not exist!".format(col=k))
raise KeyError(f"Column '{k}' does not exist!")

arg = new_arg

Expand Down Expand Up @@ -1101,9 +1092,7 @@ def _reduce(
func = getattr(self, name, None)
if func is None:
raise TypeError(
"{klass} cannot perform the operation {op}".format(
klass=type(self).__name__, op=name
)
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)

Expand Down
Loading

0 comments on commit 8c504b8

Please sign in to comment.