Skip to content

Commit

Permalink
CLN: replace %s syntax with .format in core.dtypes and core.sparse (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
jschendel authored and jreback committed Aug 17, 2017
1 parent 57befd1 commit ecaac87
Show file tree
Hide file tree
Showing 6 changed files with 52 additions and 42 deletions.
20 changes: 12 additions & 8 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,8 @@ def maybe_cast_item(obj, item, dtype):
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
raise ValueError("Unexpected dtype encountered: {dtype}"
.format(dtype=dtype))


def invalidate_string_dtypes(dtype_set):
Expand Down Expand Up @@ -620,8 +621,9 @@ def astype_nansafe(arr, dtype, copy=True):
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
(arr.dtype, dtype))
raise TypeError("cannot astype a datetimelike from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
Expand All @@ -640,8 +642,9 @@ def astype_nansafe(arr, dtype, copy=True):
result[mask] = np.nan
return result

raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
(arr.dtype, dtype))
raise TypeError("cannot astype a timedelta from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))

return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
Expand Down Expand Up @@ -926,7 +929,7 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
"dtype [%s]" % dtype)
"dtype [{dtype}]".format(dtype=dtype))
elif is_datetime64tz:

# our NaT doesn't support tz's
Expand All @@ -943,7 +946,7 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
"dtype [%s]" % dtype)
"dtype [{dtype}]".format(dtype=dtype))

if is_scalar(value):
if value == iNaT or isna(value):
Expand Down Expand Up @@ -982,7 +985,8 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
return tslib.ints_to_pydatetime(ints)

# we have a non-castable dtype that was passed
raise TypeError('Cannot cast datetime64 to %s' % dtype)
raise TypeError('Cannot cast datetime64 to {dtype}'
.format(dtype=dtype))

else:

Expand Down
8 changes: 4 additions & 4 deletions pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1854,10 +1854,10 @@ def _validate_date_like_dtype(dtype):
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('%s' % e)
raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
raise ValueError('%r is too specific of a frequency, try passing %r' %
(dtype.name, dtype.type.__name__))
msg = '{name!r} is too specific of a frequency, try passing {type!r}'
raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))


_string_dtypes = frozenset(map(_get_dtype_from_object, (binary_type,
Expand Down Expand Up @@ -1924,6 +1924,6 @@ def pandas_dtype(dtype):
if dtype in [object, np.object_, 'object', 'O']:
return npdtype
elif npdtype.kind == 'O':
raise TypeError('dtype {0} not understood'.format(dtype))
raise TypeError('dtype {dtype} not understood'.format(dtype=dtype))

return npdtype
39 changes: 21 additions & 18 deletions pandas/core/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
raise AssertionError("length mismatch: {self} vs. {other}"
.format(self=len(self), other=len(other)))
if not isinstance(other, ABCSparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
Expand All @@ -66,7 +66,8 @@ def wrapper(self, other):

return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
raise TypeError('operation with {other} not supported'
.format(other=type(other)))

if name.startswith("__"):
name = name[2:-2]
Expand Down Expand Up @@ -218,9 +219,9 @@ def __new__(cls, data, sparse_index=None, index=None, kind='integer',
else:
values = _sanitize_values(data)
if len(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same length as the"
" index".format(type(values)))
raise AssertionError("Non array-like type {type} must "
"have the same length as the index"
.format(type=type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
Expand Down Expand Up @@ -330,9 +331,10 @@ def __len__(self):
return 0

def __unicode__(self):
return '%s\nFill: %s\n%s' % (printing.pprint_thing(self),
printing.pprint_thing(self.fill_value),
printing.pprint_thing(self.sp_index))
return '{self}\nFill: {fill}\n{index}'.format(
self=printing.pprint_thing(self),
fill=printing.pprint_thing(self.fill_value),
index=printing.pprint_thing(self.sp_index))

def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
Expand Down Expand Up @@ -377,8 +379,8 @@ def fill_value(self, value):
if is_dtype_equal(self.dtype, new_dtype):
self._fill_value = fill_value
else:
msg = 'unable to set fill_value {0} to {1} dtype'
raise ValueError(msg.format(value, self.dtype))
msg = 'unable to set fill_value {fill} to {dtype} dtype'
raise ValueError(msg.format(fill=value, dtype=self.dtype))

def get_values(self, fill=None):
""" return a dense representation """
Expand Down Expand Up @@ -466,7 +468,8 @@ def take(self, indices, axis=0, allow_fill=True,
nv.validate_take(tuple(), kwargs)

if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
raise ValueError("axis must be 0, input was {axis}"
.format(axis=axis))

if is_integer(indices):
# return scalar
Expand All @@ -482,12 +485,12 @@ def take(self, indices, axis=0, allow_fill=True,
'all indices must be >= -1')
raise ValueError(msg)
elif (n <= indices).any():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.format(n))
msg = 'index is out of bounds for size {size}'.format(size=n)
raise IndexError(msg)
else:
if ((indices < -n) | (n <= indices)).any():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.format(n))
msg = 'index is out of bounds for size {size}'.format(size=n)
raise IndexError(msg)

indices = indices.astype(np.int32)
if not (allow_fill and fill_value is not None):
Expand Down Expand Up @@ -543,8 +546,8 @@ def astype(self, dtype=None, copy=True):
else:
fill_value = dtype.type(self.fill_value)
except ValueError:
msg = 'unable to coerce current fill_value {0} to {1} dtype'
raise ValueError(msg.format(self.fill_value, dtype))
msg = 'unable to coerce current fill_value {fill} to {dtype} dtype'
raise ValueError(msg.format(fill=self.fill_value, dtype=dtype))
return self._simple_new(sp_values, self.sp_index,
fill_value=fill_value)

Expand Down
16 changes: 8 additions & 8 deletions pandas/core/sparse/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,11 +214,11 @@ def _prep_index(self, data, index, columns):
columns = _default_index(K)

if len(columns) != K:
raise ValueError('Column length mismatch: %d vs. %d' %
(len(columns), K))
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: %d vs. %d' %
(len(index), N))
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns

def to_coo(self):
Expand Down Expand Up @@ -725,17 +725,17 @@ def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s'
% to_rename)
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))

def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x

def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x

this = self.rename(columns=lrenamer)
Expand Down
5 changes: 3 additions & 2 deletions pandas/core/sparse/list.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,16 @@ def __init__(self, data=None, fill_value=np.nan):

def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
return '{self}\n{contents}'.format(self=object.__repr__(self),
contents=pprint_thing(contents))

def __len__(self):
return sum(len(c) for c in self._chunks)

def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('%d out of range' % i)
raise ValueError('{index} out of range'.format(index=i))
i += len(self)

passed = 0
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/sparse/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ def wrapper(self, other):
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
raise TypeError('operation with {other} not supported'
.format(other=type(other)))

wrapper.__name__ = name
if name.startswith("__"):
Expand Down Expand Up @@ -295,7 +296,8 @@ def shape(self):
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '%s\n%s' % (series_rep, repr(self.sp_index))
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep

def __array_wrap__(self, result, context=None):
Expand Down

0 comments on commit ecaac87

Please sign in to comment.